repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
mariomosca/damnvid
ui/dUI.py
12
2218
# -*- coding: utf-8 -*- from dCore import * from dConstants import * from dConfig import * from dLog import * from dWx import wx def DamnFadeIn(frame): if not frame.CanSetTransparent() or not DV.prefs.get('splashscreen') == 'True': frame.Show() return frame.SetTransparent(0) frame.Show() frame.fadeTimer = wx.Timer(frame) frame.fadeCurrent = 0 frame.fadeDelta = 1 frame.fadeInterval = 4 frame.fadeLagTolerance = 2 frame.fadeObjective = 255 frame.fadeTime = time.time() * 1000 frame.fadeDestroy = False frame.Bind(wx.EVT_TIMER, DamnCurry(DamnFadeCycle, frame), frame.fadeTimer) frame.fadeTimer.Start(frame.fadeInterval) def DamnFadeOut(frame, destroy=True): if not frame.CanSetTransparent() or not DV.prefs.get('splashscreen') == 'True': frame.Hide() if destroy: frame.Destroy() return frame.fadeTimer = wx.Timer(frame) frame.fadeCurrent = 255 frame.fadeDelta = -1 frame.fadeInterval = 4 frame.fadeLagTolerance = 2 frame.fadeObjective = 0 frame.fadeTime = time.time() * 1000 frame.fadeDestroy = destroy frame.Bind(wx.EVT_TIMER, DamnCurry(DamnFadeCycle, frame), frame.fadeTimer) frame.fadeTimer.Start(frame.fadeInterval) def DamnFadeCycle(frame, event=None): try: frame.SetTransparent(frame.fadeCurrent) except: pass newTime = time.time() * 1000 if newTime - frame.fadeTime > frame.fadeInterval * frame.fadeLagTolerance: frame.fadeDelta *= 2 # Increase fade delta to make up for machine slowness. frame.fadeLagTolerance *= 2 frame.fadeTime = newTime frame.fadeCurrent += frame.fadeDelta if (frame.fadeDelta > 0 and frame.fadeCurrent >= frame.fadeObjective) or (frame.fadeDelta < 0 and frame.fadeCurrent <= frame.fadeObjective): try: frame.SetTransparent(frame.fadeObjective) except: pass frame.fadeTimer.Stop() if frame.fadeDestroy: frame.Destroy() class DamnFrame(wx.Frame): def fadeIn(self): DamnFadeIn(self) def fadeOut(self, destroy=True): DamnFadeOut(self, destroy) # Now load UI stuff from dEvent import * from dWidgets import * from dPrefEditor import * from dDoneDialog import * from dAddURLDialog import * from dAboutDialog import * from dReportBug import * from dBrowser import * from dVideoHistory import * from dMainFrame import *
gpl-3.0
luotao1/Paddle
python/paddle/fluid/tests/unittests/test_conv2d_op.py
1
52199
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np import paddle import paddle.fluid.core as core import paddle.fluid as fluid from op_test import OpTest from paddle.fluid import Program, program_guard def conv2d_forward_naive(input, filter, group, conv_param, padding_algorithm='EXPLICIT', data_format='NCHW'): if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]: raise ValueError("Unknown Attr(padding_algorithm): '%s'. " "It can only be 'SAME' or 'VALID'." % str(padding_algorithm)) if data_format not in ["NCHW", "NHWC"]: raise ValueError("Unknown Attr(data_format): '%s' ." "It can only be 'NCHW' or 'NHWC'." % str(data_format)) channel_last = (data_format == "NHWC") if channel_last: input = np.transpose(input, [0, 3, 1, 2]) in_n, in_c, in_h, in_w = input.shape f_n, f_c, f_h, f_w = filter.shape out_n = in_n out_c = f_n assert f_c * group == in_c assert np.mod(out_c, group) == 0 sub_out_c = out_c // group sub_f_n = f_n // group stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ 'dilation'] # update pad and dilation def _get_padding_with_SAME(input_shape, pool_size, pool_stride): padding = [] for input_size, filter_size, stride_size in zip(input_shape, pool_size, pool_stride): out_size = int((input_size + stride_size - 1) / stride_size) pad_sum = np.max(( (out_size - 1) * stride_size + filter_size - input_size, 0)) pad_0 = int(pad_sum / 2) pad_1 = int(pad_sum - pad_0) padding.append(pad_0) padding.append(pad_1) return padding ksize = filter.shape[2:4] if padding_algorithm == "VALID": pad = [0, 0, 0, 0] elif padding_algorithm == "SAME": dilation = [1, 1] input_data_shape = input.shape[2:4] pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad_h_0, pad_h_1 = pad[0], pad[0] pad_w_0, pad_w_1 = pad[1], pad[1] if len(pad) == 4: pad_h_0, pad_h_1 = pad[0], pad[1] pad_w_0, pad_w_1 = pad[2], pad[3] out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[0] * (f_h - 1) + 1)) // stride[0] out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[1] * (f_w - 1) + 1)) // stride[1] out = np.zeros((out_n, out_c, out_h, out_w)) d_bolck_h = (dilation[0] * (f_h - 1) + 1) d_bolck_w = (dilation[1] * (f_w - 1) + 1) input_pad = np.pad(input, ((0, 0), (0, 0), (pad_h_0, pad_h_1), (pad_w_0, pad_w_1)), mode='constant', constant_values=0) filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w)) filter_dilation[:, :, 0:d_bolck_h:dilation[0], 0:d_bolck_w:dilation[ 1]] = filter for i in range(out_h): for j in range(out_w): for g in range(group): input_pad_masked = \ input_pad[:, g * f_c:(g + 1) * f_c, i * stride[0]:i * stride[0] + d_bolck_h, j * stride[1]:j * stride[1] + d_bolck_w] f_sub = filter_dilation[g * sub_f_n:(g + 1) * sub_f_n, :, :, :] # sub_f_n == sub_out_c for k in range(sub_out_c): # Multiplication of Corresponding Elements, then sum all out[:, g * sub_out_c + k, i, j] = \ np.sum(input_pad_masked * f_sub[k, :, :, :], axis=(1, 2, 3)) if channel_last: out = np.transpose(out, [0, 2, 3, 1]) return out, in_n, out_h, out_w, out_c def create_test_cudnn_class(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCUDNNCase(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm( ) else np.float64 cls_name = "{0}_{1}".format(parent.__name__, "CUDNN") TestCUDNNCase.__name__ = cls_name globals()[cls_name] = TestCUDNNCase def create_test_cudnn_fp16_class(parent, grad_check=True): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestConv2DCUDNNFp16(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) def test_check_grad_no_filter(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: self.check_grad_with_place( place, ['Input'], 'Output', no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: self.check_grad_with_place( place, ['Filter'], 'Output', no_grad_set=set(['Input'])) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16") TestConv2DCUDNNFp16.__name__ = cls_name globals()[cls_name] = TestConv2DCUDNNFp16 def create_test_channel_last_class(parent): class TestChannelLastCase(parent): def init_data_format(self): self.data_format = "NHWC" def init_test_case_2(self): N, C, H, W = self.input_size self.input_size = [N, H, W, C] cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast") TestChannelLastCase.__name__ = cls_name globals()[cls_name] = TestChannelLastCase def create_test_cudnn_channel_last_class(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCudnnChannelLastCase(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm( ) else np.float64 def init_data_format(self): self.data_format = "NHWC" def init_test_case_2(self): N, C, H, W = self.input_size self.input_size = [N, H, W, C] cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast") TestCudnnChannelLastCase.__name__ = cls_name globals()[cls_name] = TestCudnnChannelLastCase def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCudnnChannelLastFp16(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float16 def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) def test_check_grad_no_filter(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: self.check_grad_with_place( place, ['Input'], 'Output', no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): place = core.CUDAPlace(0) if core.is_float16_supported(place) and grad_check: self.check_grad_with_place( place, ['Filter'], 'Output', no_grad_set=set(['Input'])) def init_data_format(self): self.data_format = "NHWC" def init_test_case_2(self): N, C, H, W = self.input_size self.input_size = [N, H, W, C] cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLastFp16") TestCudnnChannelLastFp16.__name__ = cls_name globals()[cls_name] = TestCudnnChannelLastFp16 def create_test_padding_SAME_class(parent): class TestPaddingSMAECase(parent): def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "SAME" cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp") TestPaddingSMAECase.__name__ = cls_name globals()[cls_name] = TestPaddingSMAECase def create_test_padding_VALID_class(parent): class TestPaddingVALIDCase(parent): def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp") TestPaddingVALIDCase.__name__ = cls_name globals()[cls_name] = TestPaddingVALIDCase def create_test_cudnn_padding_SAME_class(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCUDNNPaddingSMAECase(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm( ) else np.float64 def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "SAME" cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp") TestCUDNNPaddingSMAECase.__name__ = cls_name globals()[cls_name] = TestCUDNNPaddingSMAECase def create_test_cudnn_padding_VALID_class(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCUDNNPaddingVALIDCase(parent): def init_kernel_type(self): self.use_cudnn = True self.dtype = np.float32 if core.is_compiled_with_rocm( ) else np.float64 def init_paddings(self): self.pad = [1, 1] self.padding_algorithm = "VALID" cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp") TestCUDNNPaddingVALIDCase.__name__ = cls_name globals()[cls_name] = TestCUDNNPaddingVALIDCase class TestConv2DOp(OpTest): def setUp(self): self.op_type = "conv2d" self.use_cudnn = False self.exhaustive_search = False self.use_cuda = False self.use_mkldnn = False self.fuse_relu_before_depthwise_conv = False self.data_format = "AnyLayout" self.dtype = np.float64 self.init_kernel_type() self.init_group() self.init_dilation() self.init_test_case() conv2d_param = { 'stride': self.stride, 'pad': self.pad, 'dilation': self.dilations } input = np.random.random(self.input_size).astype(self.dtype) if not self.has_cuda(): self.fuse_relu_before_depthwise_conv = False if self.fuse_relu_before_depthwise_conv: input = input - 0.5 input -= (input < 0) * 0.1 input += (input >= 0) * 0.1 input2 = np.maximum(input, 0.0) else: input2 = input filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) output, _, _, _, _ = conv2d_forward_naive(input2, filter, self.groups, conv2d_param) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search } self.outputs = {'Output': output} def has_cuda(self): return core.is_compiled_with_cuda() and (self.use_cudnn or self.use_cuda) def test_check_output(self): place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output_with_place( place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) def test_check_grad(self): if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_grad_with_place( place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02, check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_filter(self): if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_grad_with_place( place, ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_input(self): if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and self.no_need_check_grad == True): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_grad_with_place( place, ['Filter'], 'Output', no_grad_set=set(['Input']), check_dygraph=(self.use_mkldnn == False)) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_test_case_2(self): pass def init_dilation(self): self.dilations = [1, 1] def init_group(self): self.groups = 1 def init_kernel_type(self): pass class TestWithPad(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] class TestWithStride(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] class TestWithGroup(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.group = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [18, f_c, 3, 3] class TestWith1x1(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 3 class TestWithDepthWise3x3(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] def init_group(self): self.groups = 4 class TestWithDepthWise5x5(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [8, f_c, 5, 5] def init_group(self): self.groups = 4 class TestWithDepthWise7x7(TestConv2DOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 7, 7] def init_group(self): self.groups = 8 class TestWithDilation(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] def init_group(self): self.groups = 3 class TestWithInput1x1Filter1x1(TestConv2DOp): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [100, 3, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 3 #----------------Conv2DCUDNN---------------- create_test_cudnn_class(TestConv2DOp) create_test_cudnn_class(TestWithPad) create_test_cudnn_class(TestWithStride) create_test_cudnn_class(TestWithGroup) create_test_cudnn_class(TestWith1x1) create_test_cudnn_class(TestWithInput1x1Filter1x1) #----------------Conv2DCUDNN fp16---------------- create_test_cudnn_fp16_class(TestConv2DOp, grad_check=False) create_test_cudnn_fp16_class(TestWithPad, grad_check=False) create_test_cudnn_fp16_class(TestWithStride, grad_check=False) create_test_cudnn_fp16_class(TestWithGroup, grad_check=False) create_test_cudnn_fp16_class(TestWith1x1, grad_check=False) create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False) #----------------TestDepthwiseConv ----- class TestDepthwiseConv(TestConv2DOp): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConv2(TestConv2DOp): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConv3(TestConv2DOp): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilation(TestConv2DOp): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilation2(TestConv2DOp): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConvandFuse(TestConv2DOp): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConv2andFuse(TestConv2DOp): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConv3andFuse(TestConv2DOp): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilationandFuse(TestConv2DOp): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestDepthwiseConvWithDilation2andFuse(TestConv2DOp): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" class TestCUDNNExhaustiveSearch(TestConv2DOp): def init_kernel_type(self): self.use_cudnn = True self.exhaustive_search = True self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 class TestConv2DOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of conv2d must be Variable. x1 = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) fluid.layers.conv2d(x1, 1, 1) self.assertRaises(TypeError, test_Variable) def test_dtype(): # the input dtype of conv2d must be float16 or float32 or float64 # float16 only can be set on GPU place x2 = fluid.layers.data( name='x2', shape=[3, 4, 5, 6], dtype="int32") fluid.layers.conv2d(x2, 1, 1) self.assertRaises(TypeError, test_dtype) # Please Don't remove the following code. # Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): # def init_op_type(self): # self.op_type = "conv_cudnn" # ---- test asymmetric padding ---- class TestConv2DOp_v2(OpTest): def setUp(self): self.op_type = "conv2d" self.use_cudnn = False self.exhaustive_search = False self.use_cuda = False self.use_mkldnn = False self.fuse_relu_before_depthwise_conv = False self.dtype = np.float64 self.init_kernel_type() self.init_group() self.init_dilation() self.init_data_format() self.init_test_case() self.init_paddings() self.init_test_case_2() conv2d_param = { 'stride': self.stride, 'pad': self.pad, 'dilation': self.dilations } input = np.random.random(self.input_size).astype(self.dtype) if not self.has_cuda(): self.fuse_relu_before_depthwise_conv = False if self.fuse_relu_before_depthwise_conv: input = input - 0.5 input -= (input < 0) * 0.1 input += (input >= 0) * 0.1 input2 = np.maximum(input, 0.0) else: input2 = input filter = np.random.uniform(-1, 1, self.filter_size).astype(self.dtype) output, _, _, _, _ = conv2d_forward_naive( input2, filter, self.groups, conv2d_param, self.padding_algorithm, self.data_format) output = output.astype(self.dtype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, 'paddings': self.pad, 'padding_algorithm': self.padding_algorithm, 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search } self.outputs = {'Output': output} def has_cuda(self): return core.is_compiled_with_cuda() and (self.use_cudnn or self.use_cuda) def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_output_with_place( place, atol=1e-5, check_dygraph=(self.use_mkldnn == False)) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_grad_with_place( place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02, check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_filter(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_grad_with_place( place, ['Input'], 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), check_dygraph=(self.use_mkldnn == False)) def test_check_grad_no_input(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_grad_with_place( place, ['Filter'], 'Output', no_grad_set=set(['Input']), check_dygraph=(self.use_mkldnn == False)) def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 4, 3] def init_dilation(self): self.dilations = [1, 1] def init_group(self): self.groups = 1 def init_kernel_type(self): pass def init_paddings(self): self.pad = [0, 0] self.padding_algorithm = "EXPLICIT" def init_data_format(self): self.data_format = "NCHW" def init_test_case_2(self): pass class TestConv2DOp_AsyPadding(TestConv2DOp_v2): def init_paddings(self): self.pad = [0, 0, 1, 2] self.padding_algorithm = "EXPLICIT" class TestWithPad_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_paddings(self): self.pad = [2, 1, 3, 2] self.padding_algorithm = "EXPLICIT" class TestWithStride_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] def init_paddings(self): self.pad = [2, 1, 3, 2] self.padding_algorithm = "EXPLICIT" class TestWithGroup_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.pad = [0, 0] self.stride = [1, 2] self.input_size = [2, 3, 5, 5] # NCHW self.group = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 4, 3] class TestWith1x1_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 3 def init_paddings(self): self.pad = [2, 2, 4, 0] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise3x3_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [3, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] def init_group(self): self.groups = 4 def init_paddings(self): self.pad = [1, 3, 2, 1] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise5x5_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 4, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [8, f_c, 5, 5] def init_group(self): self.groups = 4 def init_paddings(self): self.pad = [0, 1, 1, 0] self.padding_algorithm = "EXPLICIT" class TestWithDepthWise7x7_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [2, 2] self.input_size = [2, 8, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [16, f_c, 7, 7] def init_group(self): self.groups = 8 def init_paddings(self): self.pad = [1, 3, 4, 1] self.padding_algorithm = "EXPLICIT" class TestWithDilation_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [2, 3, 10, 10] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] def init_dilation(self): self.dilations = [2, 2] def init_group(self): self.groups = 3 def init_paddings(self): self.pad = [0, 1, 3, 0] self.padding_algorithm = "EXPLICIT" class TestWithInput1x1Filter1x1_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.stride = [1, 1] self.input_size = [40, 3, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [120, f_c, 1, 1] def init_group(self): self.groups = 3 def init_paddings(self): self.pad = [0, 3, 4, 0] self.padding_algorithm = "EXPLICIT" create_test_cudnn_class(TestConv2DOp_AsyPadding) create_test_cudnn_class(TestWithPad_AsyPadding) create_test_cudnn_class(TestWithStride_AsyPadding) create_test_cudnn_class(TestWithGroup_AsyPadding) create_test_cudnn_class(TestWith1x1_AsyPadding) create_test_cudnn_class(TestWithInput1x1Filter1x1_AsyPadding) class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.use_cuda = True self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 1, 0, 1] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.use_cuda = True self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [0, 1, 0, 2] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.use_cuda = True self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 1, 0, 0] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilation_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 1, 2, 1] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilation2_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [0, 1, 1, 0] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [2, 1, 2, 3] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [12, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 1, 1, 2] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 2, 0, 2] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilationandFuse_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [2, 1, 1, 0] self.padding_algorithm = "EXPLICIT" class TestDepthwiseConvWithDilation2andFuse_AsyPadding(TestConv2DOp_v2): def init_test_case(self): self.fuse_relu_before_depthwise_conv = True self.use_cuda = True self.pad = [1, 1] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW self.groups = 3 self.dilations = [2, 2] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [24, f_c, 3, 3] self.op_type = "depthwise_conv2d" def init_paddings(self): self.pad = [1, 3, 1, 3] self.padding_algorithm = "EXPLICIT" #---------- test SAME VALID ----------- create_test_padding_SAME_class(TestConv2DOp_AsyPadding) create_test_padding_SAME_class(TestWithPad_AsyPadding) create_test_padding_SAME_class(TestWithStride_AsyPadding) create_test_padding_SAME_class(TestWithGroup_AsyPadding) create_test_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_padding_VALID_class(TestConv2DOp_AsyPadding) create_test_padding_VALID_class(TestWithPad_AsyPadding) create_test_padding_VALID_class(TestWithStride_AsyPadding) create_test_padding_VALID_class(TestWithGroup_AsyPadding) create_test_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_padding_SAME_class(TestConv2DOp_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithPad_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithStride_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_SAME_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_cudnn_padding_VALID_class(TestConv2DOp_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithPad_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithStride_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithGroup_AsyPadding) create_test_cudnn_padding_VALID_class(TestWithInput1x1Filter1x1_AsyPadding) # depthwise conv2d create_test_padding_SAME_class(TestDepthwiseConv_AsyPadding) create_test_padding_SAME_class(TestDepthwiseConvWithDilation_AsyPadding) create_test_padding_SAME_class(TestDepthwiseConvandFuse_AsyPadding) create_test_padding_SAME_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) # ------------ test channel last --------- create_test_channel_last_class(TestConv2DOp_AsyPadding) create_test_channel_last_class(TestWithPad_AsyPadding) create_test_channel_last_class(TestWithGroup_AsyPadding) create_test_channel_last_class(TestWith1x1_AsyPadding) create_test_channel_last_class(TestWithInput1x1Filter1x1_AsyPadding) create_test_channel_last_class(TestDepthwiseConv_AsyPadding) create_test_channel_last_class(TestDepthwiseConvWithDilation2_AsyPadding) create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding) create_test_channel_last_class(TestDepthwiseConvWithDilationandFuse_AsyPadding) create_test_cudnn_channel_last_class(TestConv2DOp_AsyPadding) create_test_cudnn_channel_last_class(TestWithPad_AsyPadding) create_test_cudnn_channel_last_class(TestWithStride_AsyPadding) create_test_cudnn_channel_last_class(TestWithGroup_AsyPadding) create_test_cudnn_channel_last_class(TestWithDilation_AsyPadding) # ------------ depthwise conv2d in MIOPEN --------- if core.is_compiled_with_rocm(): create_test_cudnn_padding_SAME_class(TestDepthwiseConv_AsyPadding) create_test_cudnn_padding_SAME_class( TestDepthwiseConvWithDilation_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding) create_test_padding_VALID_class(TestDepthwiseConvWithDilation_AsyPadding) create_test_cudnn_channel_last_class(TestDepthwiseConv_AsyPadding) create_test_cudnn_channel_last_class( TestDepthwiseConvWithDilation2_AsyPadding) create_test_cudnn_channel_last_fp16_class( TestConv2DOp_AsyPadding, grad_check=False) create_test_cudnn_channel_last_fp16_class( TestWithPad_AsyPadding, grad_check=False) create_test_cudnn_channel_last_fp16_class( TestWithStride_AsyPadding, grad_check=False) create_test_cudnn_channel_last_fp16_class( TestWithGroup_AsyPadding, grad_check=False) create_test_cudnn_channel_last_fp16_class( TestWithDilation_AsyPadding, grad_check=False) # --------- test python API --------------- class TestConv2DAPI(unittest.TestCase): def test_api(self): input_NHWC = fluid.layers.data( name="input_NHWC", shape=[2, 5, 5, 3], append_batch_size=False, dtype="float32") input_NCHW = fluid.layers.data( name="input_NCHW", shape=[2, 3, 5, 5], append_batch_size=False, dtype="float32") fluid.layers.conv2d( input=input_NHWC, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=1, data_format="NCHW") fluid.layers.conv2d( input=input_NCHW, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=[1, 2, 1, 0], dilation=[1, 1], groups=1, data_format="NCHW") fluid.layers.conv2d( input=input_NCHW, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=[[0, 0], [0, 0], [1, 1], [1, 1]], dilation=[1, 1], groups=1, data_format="NCHW") fluid.layers.conv2d( input=input_NHWC, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=[[0, 0], [1, 1], [1, 1], [0, 0]], dilation=[1, 1], groups=1, data_format="NHWC") fluid.layers.conv2d( input=input_NCHW, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding="SAME", dilation=[1, 1], groups=1, data_format="NCHW") fluid.layers.conv2d( input=input_NCHW, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding="VALID", dilation=[1, 1], groups=1, data_format="NCHW") def test_depthwise_conv2d(self): x_var = paddle.uniform((2, 8, 8, 4), dtype='float32', min=-1., max=1.) conv = paddle.nn.Conv2D( in_channels=4, out_channels=4, kernel_size=(3, 3), groups=4, data_format='NHWC') y_var = conv(x_var) class TestConv2DAPI_Error(unittest.TestCase): def test_api(self): input = fluid.layers.data( name="input", shape=[2, 5, 5, 5], append_batch_size=False, dtype="float32") # ValueError: cudnn def run_1(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=1, use_cudnn=[0], data_format="NCHW") self.assertRaises(ValueError, run_1) # ValueError: data_format def run_2(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=1, use_cudnn=False, data_format="NCHWC") self.assertRaises(ValueError, run_2) # ValueError: padding def run_3(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding="SAMEE", dilation=[1, 1], groups=1, use_cudnn=False, data_format="NCHW") self.assertRaises(ValueError, run_3) def run_4(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=[[0, 1], [0, 1], [0, 1], [0, 1]], dilation=[1, 1], groups=1, use_cudnn=False, data_format="NCHW") self.assertRaises(ValueError, run_4) def run_5(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=[[0, 1], [0, 1], [0, 1], [0, 1]], dilation=[1, 1], groups=1, use_cudnn=False, data_format="NHWC") self.assertRaises(ValueError, run_5) # ValueError: channel dimmention x = fluid.layers.data( name="x", shape=[2, 5, 5, -1], append_batch_size=False, dtype="float32") def run_6(): fluid.layers.conv2d( input=x, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=1, use_cudnn=False, data_format="NHWC") self.assertRaises(ValueError, run_6) # ValueError: groups def run_7(): fluid.layers.conv2d( input=input, num_filters=3, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=3, use_cudnn=False, data_format="NHWC") self.assertRaises(ValueError, run_7) # --------- test environment variable ------ @unittest.skipIf( not (core.is_compiled_with_cuda() or core.is_compiled_with_rocm()), "core is not compiled with CUDA or ROCM") class TestConv2DEnviron(unittest.TestCase): def run1(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): inputs = fluid.layers.data( shape=[2, 3, 5, 5], append_batch_size=False, name="inputs", dtype="float32") result = fluid.layers.conv2d( input=inputs, num_filters=4, filter_size=[3, 3], stride=[1, 1], padding=0, dilation=[1, 1], groups=1, data_format="NCHW") exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fetches = exe.run(fluid.default_main_program(), feed={"inputs": self.input_np}, fetch_list=[result]) def run2(self, place): with fluid.dygraph.guard(place): inputs = fluid.dygraph.to_variable(self.input_np) conv = paddle.nn.Conv2D( in_channels=3, out_channels=4, kernel_size=(3, 3), data_format="NCHW") result = conv(inputs) def run3(self, place): with fluid.dygraph.guard(place): inputs = fluid.dygraph.to_variable(self.input_np) conv = paddle.fluid.dygraph.nn.Conv2D( num_channels=3, num_filters=4, filter_size=(3, 3), ) result = conv(inputs) def run_all(self, place): self.run1(place) self.run2(place) self.run3(place) def test_environ(self): self.input_np = np.random.random([2, 3, 5, 5]).astype("float32") for place in [paddle.CPUPlace(), paddle.CUDAPlace(0)]: fluid.set_flags({'FLAGS_conv2d_disable_cudnn': False}) self.run_all(place) fluid.set_flags({'FLAGS_conv2d_disable_cudnn': True}) self.run_all(place) if __name__ == '__main__': unittest.main()
apache-2.0
hkawasaki/kawasaki-aio8-0
cms/djangoapps/contentstore/views/tests/test_transcripts.py
10
26441
"""Tests for items views.""" import os import json import tempfile from uuid import uuid4 import copy import textwrap from pymongo import MongoClient from django.core.urlresolvers import reverse from django.test.utils import override_settings from django.conf import settings from xmodule.video_module import transcripts_utils from contentstore.tests.utils import CourseTestCase from cache_toolbox.core import del_cached_content from xmodule.modulestore.django import modulestore from xmodule.contentstore.django import contentstore, _CONTENTSTORE from xmodule.contentstore.content import StaticContent from xmodule.exceptions import NotFoundError from xmodule.modulestore.django import loc_mapper from xmodule.modulestore.locator import BlockUsageLocator from contentstore.tests.modulestore_config import TEST_MODULESTORE TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE) TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE, MODULESTORE=TEST_MODULESTORE) class Basetranscripts(CourseTestCase): """Base test class for transcripts tests.""" org = 'MITx' number = '999' def clear_subs_content(self): """Remove, if transcripts content exists.""" for youtube_id in self.get_youtube_ids().values(): filename = 'subs_{0}.srt.sjson'.format(youtube_id) content_location = StaticContent.compute_location( self.org, self.number, filename) try: content = contentstore().find(content_location) contentstore().delete(content.get_id()) except NotFoundError: pass def setUp(self): """Create initial data.""" super(Basetranscripts, self).setUp() self.unicode_locator = unicode(loc_mapper().translate_location( self.course.location.course_id, self.course.location, False, True )) # Add video module data = { 'parent_locator': self.unicode_locator, 'category': 'video', 'type': 'video' } resp = self.client.ajax_post('/xblock', data) self.item_locator, self.item_location = self._get_locator(resp) self.assertEqual(resp.status_code, 200) self.item = modulestore().get_item(self.item_location) # hI10vDNYz4M - valid Youtube ID with transcripts. # JMD_ifUUfsU, AKqURZnYqpk, DYpADpL7jAY - valid Youtube IDs without transcripts. self.item.data = '<video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />' modulestore().update_item(self.item, self.user.id) self.item = modulestore().get_item(self.item_location) # Remove all transcripts for current module. self.clear_subs_content() def _get_locator(self, resp): """ Returns the locator and old-style location (as a string) from the response returned by a create operation. """ locator = json.loads(resp.content).get('locator') return locator, loc_mapper().translate_locator_to_location(BlockUsageLocator(locator)).url() def get_youtube_ids(self): """Return youtube speeds and ids.""" item = modulestore().get_item(self.item_location) return { 0.75: item.youtube_id_0_75, 1: item.youtube_id_1_0, 1.25: item.youtube_id_1_25, 1.5: item.youtube_id_1_5 } def tearDown(self): MongoClient().drop_database(TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db']) _CONTENTSTORE.clear() class TestUploadtranscripts(Basetranscripts): """Tests for '/transcripts/upload' url.""" def setUp(self): """Create initial data.""" super(TestUploadtranscripts, self).setUp() self.good_srt_file = tempfile.NamedTemporaryFile(suffix='.srt') self.good_srt_file.write(textwrap.dedent(""" 1 00:00:10,500 --> 00:00:13,000 Elephant's Dream 2 00:00:15,000 --> 00:00:18,000 At the left we can see... """)) self.good_srt_file.seek(0) self.bad_data_srt_file = tempfile.NamedTemporaryFile(suffix='.srt') self.bad_data_srt_file.write('Some BAD data') self.bad_data_srt_file.seek(0) self.bad_name_srt_file = tempfile.NamedTemporaryFile(suffix='.BAD') self.bad_name_srt_file.write(textwrap.dedent(""" 1 00:00:10,500 --> 00:00:13,000 Elephant's Dream 2 00:00:15,000 --> 00:00:18,000 At the left we can see... """)) self.bad_name_srt_file.seek(0) def test_success_video_module_source_subs_uploading(self): self.item.data = textwrap.dedent(""" <video youtube=""> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """) modulestore().update_item(self.item, self.user.id) link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0] resp = self.client.post(link, { 'locator': self.item_locator, 'transcript-file': self.good_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 200) self.assertEqual(json.loads(resp.content).get('status'), 'Success') item = modulestore().get_item(self.item_location) self.assertEqual(item.sub, filename) content_location = StaticContent.compute_location( self.org, self.number, 'subs_{0}.srt.sjson'.format(filename)) self.assertTrue(contentstore().find(content_location)) def test_fail_data_without_id(self): link = reverse('upload_transcripts') resp = self.client.post(link, {'transcript-file': self.good_srt_file}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "locator" form data.') def test_fail_data_without_file(self): link = reverse('upload_transcripts') resp = self.client.post(link, {'locator': self.item_locator}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "file" form data.') def test_fail_data_with_bad_locator(self): # Test for raising `InvalidLocationError` exception. link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0] resp = self.client.post(link, { 'locator': 'BAD_LOCATOR', 'transcript-file': self.good_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.") # Test for raising `ItemNotFoundError` exception. link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0] resp = self.client.post(link, { 'locator': '{0}_{1}'.format(self.item_locator, 'BAD_LOCATOR'), 'transcript-file': self.good_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.") def test_fail_for_non_video_module(self): # non_video module: setup data = { 'parent_locator': self.unicode_locator, 'category': 'non_video', 'type': 'non_video' } resp = self.client.ajax_post('/xblock', data) item_locator, item_location = self._get_locator(resp) item = modulestore().get_item(item_location) item.data = '<non_video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M" />' modulestore().update_item(item, self.user.id) # non_video module: testing link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0] resp = self.client.post(link, { 'locator': item_locator, 'transcript-file': self.good_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.') def test_fail_bad_xml(self): self.item.data = '<<<video youtube="0.75:JMD_ifUUfsU,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />' modulestore().update_item(self.item, self.user.id) link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0] resp = self.client.post(link, { 'locator': self.item_locator, 'transcript-file': self.good_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) # incorrect xml produces incorrect item category error self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.') def test_fail_bad_data_srt_file(self): link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.bad_data_srt_file.name))[0] resp = self.client.post(link, { 'locator': self.item_locator, 'transcript-file': self.bad_data_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'Something wrong with SubRip transcripts file during parsing.') def test_fail_bad_name_srt_file(self): link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(self.bad_name_srt_file.name))[0] resp = self.client.post(link, { 'locator': self.item_locator, 'transcript-file': self.bad_name_srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'We support only SubRip (*.srt) transcripts format.') def test_undefined_file_extension(self): srt_file = tempfile.NamedTemporaryFile(suffix='') srt_file.write(textwrap.dedent(""" 1 00:00:10,500 --> 00:00:13,000 Elephant's Dream 2 00:00:15,000 --> 00:00:18,000 At the left we can see... """)) srt_file.seek(0) link = reverse('upload_transcripts') filename = os.path.splitext(os.path.basename(srt_file.name))[0] resp = self.client.post(link, { 'locator': self.item_locator, 'transcript-file': srt_file, 'video_list': json.dumps([{ 'type': 'html5', 'video': filename, 'mode': 'mp4', }]) }) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'Undefined file extension.') def tearDown(self): super(TestUploadtranscripts, self).tearDown() self.good_srt_file.close() self.bad_data_srt_file.close() self.bad_name_srt_file.close() class TestDownloadtranscripts(Basetranscripts): """Tests for '/transcripts/download' url.""" def save_subs_to_store(self, subs, subs_id): """Save transcripts into `StaticContent`.""" filedata = json.dumps(subs, indent=2) mime_type = 'application/json' filename = 'subs_{0}.srt.sjson'.format(subs_id) content_location = StaticContent.compute_location( self.org, self.number, filename) content = StaticContent(content_location, filename, mime_type, filedata) contentstore().save(content) del_cached_content(content_location) return content_location def test_success_download_youtube(self): self.item.data = '<video youtube="1:JMD_ifUUfsU" />' modulestore().update_item(self.item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, 'JMD_ifUUfsU') link = reverse('download_transcripts') resp = self.client.get(link, {'locator': self.item_locator, 'subs_id': "JMD_ifUUfsU"}) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content, """0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> 00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n""") def test_success_download_nonyoutube(self): subs_id = str(uuid4()) self.item.data = textwrap.dedent(""" <video youtube="" sub="{}"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """.format(subs_id)) modulestore().update_item(self.item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, subs_id) link = reverse('download_transcripts') resp = self.client.get(link, {'locator': self.item_locator, 'subs_id': subs_id}) self.assertEqual(resp.status_code, 200) self.assertEqual( resp.content, '0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> ' '00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n' ) transcripts_utils.remove_subs_from_store(subs_id, self.item) def test_fail_data_without_file(self): link = reverse('download_transcripts') resp = self.client.get(link, {'locator': ''}) self.assertEqual(resp.status_code, 404) resp = self.client.get(link, {}) self.assertEqual(resp.status_code, 404) def test_fail_data_with_bad_locator(self): # Test for raising `InvalidLocationError` exception. link = reverse('download_transcripts') resp = self.client.get(link, {'locator': 'BAD_LOCATOR'}) self.assertEqual(resp.status_code, 404) # Test for raising `ItemNotFoundError` exception. link = reverse('download_transcripts') resp = self.client.get(link, {'locator': '{0}_{1}'.format(self.item_locator, 'BAD_LOCATOR')}) self.assertEqual(resp.status_code, 404) def test_fail_for_non_video_module(self): # Video module: setup data = { 'parent_locator': self.unicode_locator, 'category': 'videoalpha', 'type': 'videoalpha' } resp = self.client.ajax_post('/xblock', data) item_locator, item_location = self._get_locator(resp) subs_id = str(uuid4()) item = modulestore().get_item(item_location) item.data = textwrap.dedent(""" <videoalpha youtube="" sub="{}"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </videoalpha> """.format(subs_id)) modulestore().update_item(item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, subs_id) link = reverse('download_transcripts') resp = self.client.get(link, {'locator': item_locator}) self.assertEqual(resp.status_code, 404) def test_fail_nonyoutube_subs_dont_exist(self): self.item.data = textwrap.dedent(""" <video youtube="" sub="UNDEFINED"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """) modulestore().update_item(self.item, self.user.id) link = reverse('download_transcripts') resp = self.client.get(link, {'locator': self.item_locator}) self.assertEqual(resp.status_code, 404) def test_empty_youtube_attr_and_sub_attr(self): self.item.data = textwrap.dedent(""" <video youtube=""> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """) modulestore().update_item(self.item, self.user.id) link = reverse('download_transcripts') resp = self.client.get(link, {'locator': self.item_locator}) self.assertEqual(resp.status_code, 404) def test_fail_bad_sjson_subs(self): subs_id = str(uuid4()) self.item.data = textwrap.dedent(""" <video youtube="" sub="{}"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """.format(subs_id)) modulestore().update_item(self.item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1' ] } self.save_subs_to_store(subs, 'JMD_ifUUfsU') link = reverse('download_transcripts') resp = self.client.get(link, {'locator': self.item_locator}) self.assertEqual(resp.status_code, 404) class TestChecktranscripts(Basetranscripts): """Tests for '/transcripts/check' url.""" def save_subs_to_store(self, subs, subs_id): """Save transcripts into `StaticContent`.""" filedata = json.dumps(subs, indent=2) mime_type = 'application/json' filename = 'subs_{0}.srt.sjson'.format(subs_id) content_location = StaticContent.compute_location( self.org, self.number, filename) content = StaticContent(content_location, filename, mime_type, filedata) contentstore().save(content) del_cached_content(content_location) return content_location def test_success_download_nonyoutube(self): subs_id = str(uuid4()) self.item.data = textwrap.dedent(""" <video youtube="" sub="{}"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </video> """.format(subs_id)) modulestore().update_item(self.item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, subs_id) data = { 'locator': self.item_locator, 'videos': [{ 'type': 'html5', 'video': subs_id, 'mode': 'mp4', }] } link = reverse('check_transcripts') resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 200) self.assertDictEqual( json.loads(resp.content), { u'status': u'Success', u'subs': unicode(subs_id), u'youtube_local': False, u'is_youtube_mode': False, u'youtube_server': False, u'command': u'found', u'current_item_subs': unicode(subs_id), u'youtube_diff': True, u'html5_local': [unicode(subs_id)], u'html5_equal': False, } ) transcripts_utils.remove_subs_from_store(subs_id, self.item) def test_check_youtube(self): self.item.data = '<video youtube="1:JMD_ifUUfsU" />' modulestore().update_item(self.item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, 'JMD_ifUUfsU') link = reverse('check_transcripts') data = { 'locator': self.item_locator, 'videos': [{ 'type': 'youtube', 'video': 'JMD_ifUUfsU', 'mode': 'youtube', }] } resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 200) self.assertDictEqual( json.loads(resp.content), { u'status': u'Success', u'subs': u'JMD_ifUUfsU', u'youtube_local': True, u'is_youtube_mode': True, u'youtube_server': False, u'command': u'found', u'current_item_subs': None, u'youtube_diff': True, u'html5_local': [], u'html5_equal': False, } ) def test_fail_data_without_id(self): link = reverse('check_transcripts') data = { 'locator': '', 'videos': [{ 'type': '', 'video': '', 'mode': '', }] } resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.") def test_fail_data_with_bad_locator(self): # Test for raising `InvalidLocationError` exception. link = reverse('check_transcripts') data = { 'locator': '', 'videos': [{ 'type': '', 'video': '', 'mode': '', }] } resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.") # Test for raising `ItemNotFoundError` exception. data = { 'locator': '{0}_{1}'.format(self.item_locator, 'BAD_LOCATOR'), 'videos': [{ 'type': '', 'video': '', 'mode': '', }] } resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.") def test_fail_for_non_video_module(self): # Not video module: setup data = { 'parent_locator': self.unicode_locator, 'category': 'not_video', 'type': 'not_video' } resp = self.client.ajax_post('/xblock', data) item_locator, item_location = self._get_locator(resp) subs_id = str(uuid4()) item = modulestore().get_item(item_location) item.data = textwrap.dedent(""" <not_video youtube="" sub="{}"> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/> <source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/> </videoalpha> """.format(subs_id)) modulestore().update_item(item, self.user.id) subs = { 'start': [100, 200, 240], 'end': [200, 240, 380], 'text': [ 'subs #1', 'subs #2', 'subs #3' ] } self.save_subs_to_store(subs, subs_id) data = { 'locator': item_locator, 'videos': [{ 'type': '', 'video': '', 'mode': '', }] } link = reverse('check_transcripts') resp = self.client.get(link, {'data': json.dumps(data)}) self.assertEqual(resp.status_code, 400) self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
agpl-3.0
RafaelTorrealba/odoo
addons/stock/wizard/__init__.py
323
1149
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import stock_move import stock_return_picking import stock_change_product_qty import make_procurement_product import orderpoint_procurement import stock_transfer_details
agpl-3.0
bdelliott/wordgame
web/django/core/cache/backends/dummy.py
234
1218
"Dummy cache backend" from django.core.cache.backends.base import BaseCache class DummyCache(BaseCache): def __init__(self, host, *args, **kwargs): BaseCache.__init__(self, *args, **kwargs) def add(self, key, value, timeout=None, version=None): key = self.make_key(key, version=version) self.validate_key(key) return True def get(self, key, default=None, version=None): key = self.make_key(key, version=version) self.validate_key(key) return default def set(self, key, value, timeout=None, version=None): key = self.make_key(key, version=version) self.validate_key(key) def delete(self, key, version=None): key = self.make_key(key, version=version) self.validate_key(key) def get_many(self, keys, version=None): return {} def has_key(self, key, version=None): key = self.make_key(key, version=version) self.validate_key(key) return False def set_many(self, data, version=None): pass def delete_many(self, keys, version=None): pass def clear(self): pass # For backwards compatibility class CacheClass(DummyCache): pass
mit
gangadhar-kadam/latestchurcherp
erpnext/patches/v4_0/global_defaults_to_system_settings.py
119
1369
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe from collections import Counter from frappe.core.doctype.user.user import STANDARD_USERS def execute(): frappe.reload_doc("core", "doctype", "system_settings") system_settings = frappe.get_doc("System Settings") # set values from global_defauls global_defaults = frappe.db.get_value("Global Defaults", None, ["time_zone", "date_format", "number_format", "float_precision", "session_expiry"], as_dict=True) if global_defaults: for key, val in global_defaults.items(): if not system_settings.get(key): system_settings.set(key, val) # language if not system_settings.get("language"): # find most common language lang = frappe.db.sql_list("""select language from `tabUser` where ifnull(language, '')!='' and language not like "Loading%%" and name not in ({standard_users})""".format( standard_users=", ".join(["%s"]*len(STANDARD_USERS))), tuple(STANDARD_USERS)) lang = Counter(lang).most_common(1) lang = (len(lang) > 0) and lang[0][0] or "english" system_settings.language = lang system_settings.flags.ignore_mandatory = True system_settings.save() global_defaults = frappe.get_doc("Global Defaults") global_defaults.flags.ignore_mandatory = True global_defaults.save()
agpl-3.0
jazkarta/edx-platform
lms/djangoapps/shoppingcart/tests/test_views.py
6
94449
""" Tests for Shopping Cart views """ from collections import OrderedDict import pytz from urlparse import urlparse from decimal import Decimal import json from django.http import HttpRequest from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from django.core.urlresolvers import reverse from django.contrib.admin.sites import AdminSite from django.contrib.auth.models import Group, User from django.contrib.messages.storage.fallback import FallbackStorage from django.core import mail from django.core.cache import cache from pytz import UTC from freezegun import freeze_time from datetime import datetime, timedelta from mock import patch, Mock import ddt from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from student.roles import CourseSalesAdminRole from util.date_utils import get_default_time_display from util.testing import UrlResetMixin from shoppingcart.views import _can_download_report, _get_date_from_str from shoppingcart.models import ( Order, CertificateItem, PaidCourseRegistration, CourseRegCodeItem, Coupon, CourseRegistrationCode, RegistrationCodeRedemption, DonationConfiguration, CouponRedemption) from student.tests.factories import UserFactory, AdminFactory, CourseModeFactory from courseware.tests.factories import InstructorFactory from student.models import CourseEnrollment from course_modes.models import CourseMode from edxmako.shortcuts import render_to_response from embargo.test_utils import restrict_course from shoppingcart.processors import render_purchase_form_html from shoppingcart.admin import SoftDeleteCouponAdmin from shoppingcart.views import initialize_report from shoppingcart.tests.payment_fake import PaymentFakeView from shoppingcart.processors.CyberSource2 import sign def mock_render_purchase_form_html(*args, **kwargs): return render_purchase_form_html(*args, **kwargs) form_mock = Mock(side_effect=mock_render_purchase_form_html) def mock_render_to_response(*args, **kwargs): return render_to_response(*args, **kwargs) render_mock = Mock(side_effect=mock_render_to_response) postpay_mock = Mock() @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) @ddt.ddt class ShoppingCartViewsTests(ModuleStoreTestCase): def setUp(self): super(ShoppingCartViewsTests, self).setUp() patcher = patch('student.models.tracker') self.mock_tracker = patcher.start() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.instructor = AdminFactory.create() self.cost = 40 self.coupon_code = 'abcde' self.reg_code = 'qwerty' self.percentage_discount = 10 self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() # Saving another testing course mode self.testing_cost = 20 self.testing_course = CourseFactory.create(org='edX', number='888', display_name='Testing Super Course') self.testing_course_mode = CourseMode(course_id=self.testing_course.id, mode_slug="honor", mode_display_name="testing honor cert", min_price=self.testing_cost) self.testing_course_mode.save() verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course') self.verified_course_key = verified_course.id self.cart = Order.get_cart_for_user(self.user) self.addCleanup(patcher.stop) self.now = datetime.now(pytz.UTC) self.yesterday = self.now - timedelta(days=1) self.tomorrow = self.now + timedelta(days=1) def get_discount(self, cost): """ This method simple return the discounted amount """ val = Decimal("{0:.2f}".format(Decimal(self.percentage_discount / 100.00) * cost)) return cost - val def add_coupon(self, course_key, is_active, code): """ add dummy coupon into models """ coupon = Coupon(code=code, description='testing code', course_id=course_key, percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active) coupon.save() def add_reg_code(self, course_key, mode_slug='honor', is_valid=True): """ add dummy registration code into models """ course_reg_code = CourseRegistrationCode( code=self.reg_code, course_id=course_key, created_by=self.user, mode_slug=mode_slug, is_valid=is_valid ) course_reg_code.save() def _add_course_mode(self, min_price=50, mode_slug='honor', expiration_date=None): """ Adds a course mode to the test course. """ mode = CourseModeFactory.create() mode.course_id = self.course.id mode.min_price = min_price mode.mode_slug = mode_slug mode.expiration_date = expiration_date mode.save() return mode def add_course_to_user_cart(self, course_key): """ adding course to user cart """ self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, course_key) return reg_item def login_user(self): self.client.login(username=self.user.username, password="password") def test_add_course_to_cart_anon(self): resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 403) @patch('shoppingcart.views.render_to_response', render_mock) def test_billing_details(self): billing_url = reverse('billing_details') self.login_user() # page not found error because order_type is not business resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 404) #chagne the order_type to business self.cart.order_type = 'business' self.cart.save() resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/billing_details.html') # check for the default currency in the context self.assertEqual(context['currency'], 'usd') self.assertEqual(context['currency_symbol'], '$') data = {'company_name': 'Test Company', 'company_contact_name': 'JohnDoe', 'company_contact_email': 'john@est.com', 'recipient_name': 'Mocker', 'recipient_email': 'mock@germ.com', 'company_address_line_1': 'DC Street # 1', 'company_address_line_2': '', 'company_city': 'DC', 'company_state': 'NY', 'company_zip': '22003', 'company_country': 'US', 'customer_reference_number': 'PO#23'} resp = self.client.post(billing_url, data) self.assertEqual(resp.status_code, 200) @patch('shoppingcart.views.render_to_response', render_mock) @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) def test_billing_details_with_override_currency_settings(self): billing_url = reverse('billing_details') self.login_user() #chagne the order_type to business self.cart.order_type = 'business' self.cart.save() resp = self.client.get(billing_url) self.assertEqual(resp.status_code, 200) ((template, context), __) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/billing_details.html') # check for the override currency settings in the context self.assertEqual(context['currency'], 'PKR') self.assertEqual(context['currency_symbol'], 'Rs') def test_same_coupon_code_applied_on_multiple_items_in_the_cart(self): """ test to check that that the same coupon code applied on multiple items in the cart. """ self.login_user() # add first course to user cart resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) # add and apply the coupon code to course in the cart self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # now add the same coupon code to the second course(testing_course) self.add_coupon(self.testing_course.id, True, self.coupon_code) #now add the second course to cart, the coupon code should be # applied when adding the second course to the cart resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.testing_course.id.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) #now check the user cart and see that the discount has been applied on both the courses resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) #first course price is 40$ and the second course price is 20$ # after 10% discount on both the courses the total price will be 18+36 = 54 self.assertIn('54.00', resp.content) def test_add_course_to_cart_already_in_cart(self): PaidCourseRegistration.add_to_order(self.cart, self.course_key) self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 400) self.assertIn('The course {0} is already in your cart.'.format(self.course_key.to_deprecated_string()), resp.content) def test_course_discount_invalid_coupon(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) non_existing_code = "non_existing_code" resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content) def test_valid_qty_greater_then_one_and_purchase_type_should_business(self): qty = 2 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * qty) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'business') def test_in_valid_qty_case(self): # invalid quantity, Quantity must be between 1 and 1000. qty = 0 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be between 1 and 1000.", resp.content) # invalid quantity, Quantity must be an integer. qty = 'abcde' resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be an integer.", resp.content) # invalid quantity, Quantity is not present in request resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id}) self.assertEqual(resp.status_code, 400) self.assertIn("Quantity must be between 1 and 1000.", resp.content) def test_valid_qty_but_item_not_found(self): qty = 2 item_id = '-1' self.login_user() resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item_id, 'qty': qty}) self.assertEqual(resp.status_code, 404) self.assertEqual('Order item does not exist.', resp.content) # now testing the case if item id not found in request, resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'qty': qty}) self.assertEqual(resp.status_code, 400) self.assertEqual('Order item not found in request.', resp.content) def test_purchase_type_should_be_personal_when_qty_is_one(self): qty = 1 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * 1) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_purchase_type_on_removing_item_and_cart_has_item_with_qty_one(self): qty = 5 self.add_course_to_user_cart(self.course_key) item2 = self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) cart = Order.get_cart_for_user(self.user) cart_items = cart.orderitem_set.all() test_flag = False for cartitem in cart_items: if cartitem.qty == 5: test_flag = True resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id}) self.assertEqual(resp.status_code, 200) self.assertTrue(test_flag) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_billing_details_btn_in_cart_when_qty_is_greater_than_one(self): qty = 5 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertIn("Billing Details", resp.content) def test_purchase_type_should_be_personal_when_remove_all_items_from_cart(self): item1 = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item1.id, 'qty': 2}) self.assertEqual(resp.status_code, 200) item2 = self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': 5}) self.assertEqual(resp.status_code, 200) cart = Order.get_cart_for_user(self.user) cart_items = cart.orderitem_set.all() test_flag = False for cartitem in cart_items: test_flag = True resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id}) self.assertEqual(resp.status_code, 200) self.assertTrue(test_flag) cart = Order.get_cart_for_user(self.user) self.assertEqual(cart.order_type, 'personal') def test_use_valid_coupon_code_and_qty_is_greater_than_one(self): qty = 5 item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty}) self.assertEqual(resp.status_code, 200) data = json.loads(resp.content) self.assertEqual(data['total_cost'], item.unit_cost * qty) # use coupon code self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost * qty, 180) def test_course_discount_invalid_reg_code(self): self.add_reg_code(self.course_key) self.add_course_to_user_cart(self.course_key) non_existing_code = "non_existing_code" resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content) def test_course_discount_inactive_coupon(self): self.add_coupon(self.course_key, False, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content) def test_course_does_not_exist_in_cart_against_valid_coupon(self): course_key = self.course_key.to_deprecated_string() + 'testing' self.add_coupon(course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content) def test_inactive_registration_code_returns_error(self): """ test to redeem inactive registration code and it returns an error. """ course_key = self.course_key.to_deprecated_string() self.add_reg_code(course_key, is_valid=False) self.add_course_to_user_cart(self.course_key) # now apply the inactive registration code # it will raise an exception resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 400) self.assertIn( "This enrollment code ({enrollment_code}) is no longer valid.".format( enrollment_code=self.reg_code), resp.content) def test_course_does_not_exist_in_cart_against_valid_reg_code(self): course_key = self.course_key.to_deprecated_string() + 'testing' self.add_reg_code(course_key) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Code '{0}' is not valid for any course in the shopping cart.".format(self.reg_code), resp.content) def test_cart_item_qty_greater_than_1_against_valid_reg_code(self): course_key = self.course_key.to_deprecated_string() self.add_reg_code(course_key) item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4}) self.assertEqual(resp.status_code, 200) # now update the cart item quantity and then apply the registration code # it will raise an exception resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 404) self.assertIn("Cart item quantity should not be greater than 1 when applying activation code", resp.content) @ddt.data(True, False) def test_reg_code_uses_associated_mode(self, expired_mode): """Tests the use of reg codes on verified courses, expired or active. """ course_key = self.course_key.to_deprecated_string() expiration_date = self.yesterday if expired_mode else self.tomorrow self._add_course_mode(mode_slug='verified', expiration_date=expiration_date) self.add_reg_code(course_key, mode_slug='verified') self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost') self.assertEqual(resp.status_code, 200) self.assertIn(self.course.display_name, resp.content) @ddt.data(True, False) def test_reg_code_uses_unknown_mode(self, expired_mode): """Tests the use of reg codes on verified courses, expired or active. """ course_key = self.course_key.to_deprecated_string() expiration_date = self.yesterday if expired_mode else self.tomorrow self._add_course_mode(mode_slug='verified', expiration_date=expiration_date) self.add_reg_code(course_key, mode_slug='bananas') self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('register_code_redemption', args=[self.reg_code]), HTTP_HOST='localhost') self.assertEqual(resp.status_code, 200) self.assertIn(self.course.display_name, resp.content) self.assertIn("error processing your redeem code", resp.content) def test_course_discount_for_valid_active_coupon_code(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) # after getting 10 percent discount self.assertEqual(self.cart.total_cost, self.get_discount(self.cost)) # now using the same coupon code against the same order. # Only one coupon redemption should be allowed per order. resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 400) self.assertIn("Only one coupon redemption is allowed against an order", resp.content) def test_course_discount_against_two_distinct_coupon_codes(self): self.add_coupon(self.course_key, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) # now using another valid active coupon code. # Only one coupon redemption should be allowed per order. self.add_coupon(self.course_key, True, 'abxyz') resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'abxyz'}) self.assertEqual(resp.status_code, 400) self.assertIn("Only one coupon redemption is allowed against an order", resp.content) def test_same_coupons_code_on_multiple_courses(self): # add two same coupon codes on two different courses self.add_coupon(self.course_key, True, self.coupon_code) self.add_coupon(self.testing_course.id, True, self.coupon_code) self.add_course_to_user_cart(self.course_key) self.add_course_to_user_cart(self.testing_course.id) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit price should be updated for that course item = self.cart.orderitem_set.all().select_subclasses()[0] self.assertEquals(item.unit_cost, self.get_discount(self.cost)) item = self.cart.orderitem_set.all().select_subclasses()[1] self.assertEquals(item.unit_cost, self.get_discount(self.testing_cost)) def test_soft_delete_coupon(self): self.add_coupon(self.course_key, True, self.coupon_code) coupon = Coupon(code='TestCode', description='testing', course_id=self.course_key, percentage_discount=12, created_by=self.user, is_active=True) coupon.save() self.assertEquals(coupon.__unicode__(), '[Coupon] code: TestCode course: MITx/999/Robot_Super_Course') admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo') admin.is_staff = True get_coupon = Coupon.objects.get(id=1) request = HttpRequest() request.user = admin setattr(request, 'session', 'session') messages = FallbackStorage(request) setattr(request, '_messages', messages) coupon_admin = SoftDeleteCouponAdmin(Coupon, AdminSite()) test_query_set = coupon_admin.queryset(request) test_actions = coupon_admin.get_actions(request) self.assertIn('really_delete_selected', test_actions['really_delete_selected']) self.assertEqual(get_coupon.is_active, True) coupon_admin.really_delete_selected(request, test_query_set) for coupon in test_query_set: self.assertEqual(coupon.is_active, False) coupon_admin.delete_model(request, get_coupon) self.assertEqual(get_coupon.is_active, False) coupon = Coupon(code='TestCode123', description='testing123', course_id=self.course_key, percentage_discount=22, created_by=self.user, is_active=True) coupon.save() test_query_set = coupon_admin.queryset(request) coupon_admin.really_delete_selected(request, test_query_set) for coupon in test_query_set: self.assertEqual(coupon.is_active, False) def test_course_free_discount_for_valid_active_reg_code(self): self.add_reg_code(self.course_key) self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) # now testing registration code already used scenario, reusing the same code # the item has been removed when using the registration code for the first time resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 400) self.assertIn("This enrollment code ({enrollment_code}) is not valid.".format( enrollment_code=self.reg_code ), resp.content) def test_upgrade_from_valid_reg_code(self): """Use a valid registration code to upgrade from honor to verified mode. """ # Ensure the course has a verified mode course_key = self.course_key.to_deprecated_string() self._add_course_mode(mode_slug='verified') self.add_reg_code(course_key, mode_slug='verified') # Enroll as honor in the course with the current user. CourseEnrollment.enroll(self.user, self.course_key) self.login_user() current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key) self.assertEquals('honor', current_enrollment) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) # Once upgraded, should be "verified" current_enrollment, __ = CourseEnrollment.enrollment_mode_for_user(self.user, self.course_key) self.assertEquals('verified', current_enrollment) @patch('shoppingcart.views.log.debug') def test_non_existing_coupon_redemption_on_removing_item(self, debug_log): reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) debug_log.assert_called_with( 'Code redemption does not exist for order item id=%s.', str(reg_item.id) ) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) @patch('shoppingcart.views.log.info') def test_existing_coupon_redemption_on_removing_item(self, info_log): self.add_coupon(self.course_key, True, self.coupon_code) reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) info_log.assert_called_with( 'Coupon "%s" redemption entry removed for user "%s" for order item "%s"', self.coupon_code, self.user, str(reg_item.id) ) @patch('shoppingcart.views.log.info') def test_reset_redemption_for_coupon(self, info_log): self.add_coupon(self.course_key, True, self.coupon_code) reg_item = self.add_course_to_user_cart(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.reset_code_redemption', args=[])) self.assertEqual(resp.status_code, 200) info_log.assert_called_with( 'Coupon redemption entry removed for user %s for order %s', self.user, reg_item.id ) @patch('shoppingcart.views.log.info') def test_coupon_discount_for_multiple_courses_in_cart(self, info_log): reg_item = self.add_course_to_user_cart(self.course_key) self.add_coupon(self.course_key, True, self.coupon_code) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) # unit_cost should be updated for that particular course for which coupon code is registered items = self.cart.orderitem_set.all().select_subclasses() for item in items: if item.id == reg_item.id: self.assertEquals(item.unit_cost, self.get_discount(self.cost)) self.assertEquals(item.list_price, self.cost) elif item.id == cert_item.id: self.assertEquals(item.list_price, self.cost) self.assertEquals(item.unit_cost, self.cost) # Delete the discounted item, corresponding coupon redemption should # be removed for that particular discounted item resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) info_log.assert_called_with( 'Coupon "%s" redemption entry removed for user "%s" for order item "%s"', self.coupon_code, self.user, str(reg_item.id) ) @patch('shoppingcart.views.log.info') def test_delete_certificate_item(self, info_log): self.add_course_to_user_cart(self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) # Delete the discounted item, corresponding coupon redemption should be removed for that particular discounted item resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cert_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) info_log.assert_called_with("order item %s removed for user %s", str(cert_item.id), self.user) @patch('shoppingcart.views.log.info') def test_remove_coupon_redemption_on_clear_cart(self, info_log): reg_item = self.add_course_to_user_cart(self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) info_log.assert_called_with( 'Coupon redemption entry removed for user %s for order %s', self.user, reg_item.id ) def test_add_course_to_cart_already_registered(self): CourseEnrollment.enroll(self.user, self.course_key) self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 400) self.assertIn('You are already registered in course {0}.'.format(self.course_key.to_deprecated_string()), resp.content) def test_add_nonexistent_course_to_cart(self): self.login_user() resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=['non/existent/course'])) self.assertEqual(resp.status_code, 404) self.assertIn("The course you requested does not exist.", resp.content) def test_add_course_to_cart_success(self): self.login_user() reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]) resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])) self.assertEqual(resp.status_code, 200) self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key)) @patch('shoppingcart.views.render_purchase_form_html', form_mock) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_cart(self): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) ((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses() self.assertIn(reg_item, purchase_form_arg_cart_items) self.assertIn(cert_item, purchase_form_arg_cart_items) self.assertEqual(len(purchase_form_arg_cart_items), 2) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') self.assertEqual(len(context['shoppingcart_items']), 2) self.assertEqual(context['amount'], 80) self.assertIn("80.00", context['form_html']) # check for the default currency in the context self.assertEqual(context['currency'], 'usd') self.assertEqual(context['currency_symbol'], '$') @patch('shoppingcart.views.render_purchase_form_html', form_mock) @patch('shoppingcart.views.render_to_response', render_mock) @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) def test_show_cart_with_override_currency_settings(self): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) ((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses() self.assertIn(reg_item, purchase_form_arg_cart_items) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') # check for the override currency settings in the context self.assertEqual(context['currency'], 'PKR') self.assertEqual(context['currency_symbol'], 'Rs') def test_clear_cart(self): self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 0) @patch('shoppingcart.views.log.exception') def test_remove_item(self, exception_log): self.login_user() reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': reg_item.id}) self.assertEqual(resp.status_code, 200) self.assertEquals(self.cart.orderitem_set.count(), 1) self.assertNotIn(reg_item, self.cart.orderitem_set.all().select_subclasses()) self.cart.purchase() resp2 = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cert_item.id}) self.assertEqual(resp2.status_code, 200) exception_log.assert_called_with( 'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased', str(cert_item.id) ) resp3 = self.client.post( reverse('shoppingcart.views.remove_item', args=[]), {'id': -1} ) self.assertEqual(resp3.status_code, 200) exception_log.assert_called_with( 'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased', '-1' ) @patch('shoppingcart.views.process_postpay_callback', postpay_mock) def test_postpay_callback_success(self): postpay_mock.return_value = {'success': True, 'order': self.cart} self.login_user() resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[])) self.assertEqual(resp.status_code, 302) self.assertEqual(urlparse(resp.__getitem__('location')).path, reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) @patch('shoppingcart.views.process_postpay_callback', postpay_mock) @patch('shoppingcart.views.render_to_response', render_mock) def test_postpay_callback_failure(self): postpay_mock.return_value = {'success': False, 'order': self.cart, 'error_html': 'ERROR_TEST!!!'} self.login_user() resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn('ERROR_TEST!!!', resp.content) ((template, context), _) = render_mock.call_args self.assertEqual(template, 'shoppingcart/error.html') self.assertEqual(context['order'], self.cart) self.assertEqual(context['error_html'], 'ERROR_TEST!!!') @ddt.data(0, 1) def test_show_receipt_json(self, num_items): # Create the correct number of items in the order for __ in range(num_items): CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") # Should have gotten a successful response self.assertEqual(resp.status_code, 200) # Parse the response as JSON and check the contents json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('currency'), self.cart.currency) self.assertEqual(json_resp.get('purchase_datetime'), get_default_time_display(self.cart.purchase_time)) self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost) self.assertEqual(json_resp.get('status'), "purchased") self.assertEqual(json_resp.get('billed_to'), { 'first_name': self.cart.bill_to_first, 'last_name': self.cart.bill_to_last, 'street1': self.cart.bill_to_street1, 'street2': self.cart.bill_to_street2, 'city': self.cart.bill_to_city, 'state': self.cart.bill_to_state, 'postal_code': self.cart.bill_to_postalcode, 'country': self.cart.bill_to_country }) self.assertEqual(len(json_resp.get('items')), num_items) for item in json_resp.get('items'): self.assertEqual(item, { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Honor Code Certificate for course Test Course', 'course_key': unicode(self.verified_course_key) }) def test_show_receipt_json_multiple_items(self): # Two different item types PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") # Should have gotten a successful response self.assertEqual(resp.status_code, 200) # Parse the response as JSON and check the contents json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost) items = json_resp.get('items') self.assertEqual(len(items), 2) self.assertEqual(items[0], { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Registration for Course: Robot Super Course', 'course_key': unicode(self.course_key) }) self.assertEqual(items[1], { 'unit_cost': 40, 'quantity': 1, 'line_cost': 40, 'line_desc': 'Honor Code Certificate for course Test Course', 'course_key': unicode(self.verified_course_key) }) def test_receipt_json_refunded(self): mock_enrollment = Mock() mock_enrollment.refundable.side_effect = lambda: True mock_enrollment.course_id = self.verified_course_key mock_enrollment.user = self.user CourseMode.objects.create( course_id=self.verified_course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost ) cert = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified') self.cart.purchase() cert.refund_cert_callback(course_enrollment=mock_enrollment) self.login_user() url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(url, HTTP_ACCEPT="application/json") self.assertEqual(resp.status_code, 200) json_resp = json.loads(resp.content) self.assertEqual(json_resp.get('status'), 'refunded') def test_show_receipt_404s(self): PaidCourseRegistration.add_to_order(self.cart, self.course_key) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase() user2 = UserFactory.create() cart2 = Order.get_cart_for_user(user2) PaidCourseRegistration.add_to_order(cart2, self.course_key) cart2.purchase() self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[cart2.id])) self.assertEqual(resp.status_code, 404) resp2 = self.client.get(reverse('shoppingcart.views.show_receipt', args=[1000])) self.assertEqual(resp2.status_code, 404) def test_total_amount_of_purchased_course(self): self.add_course_to_user_cart(self.course_key) self.assertEquals(self.cart.orderitem_set.count(), 1) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') # Total amount of a particular course that is purchased by different users total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key) self.assertEqual(total_amount, 36) self.client.login(username=self.instructor.username, password="test") cart = Order.get_cart_for_user(self.instructor) PaidCourseRegistration.add_to_order(cart, self.course_key) cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key) self.assertEqual(total_amount, 76) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_valid_coupon_code(self): self.add_course_to_user_cart(self.course_key) self.add_coupon(self.course_key, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn(str(self.get_discount(self.cost)), resp.content) @patch('shoppingcart.views.render_to_response', render_mock) def test_reg_code_and_course_registration_scenario(self): self.add_reg_code(self.course_key) # One courses in user shopping cart self.add_course_to_user_cart(self.course_key) self.assertEquals(self.cart.orderitem_set.count(), 1) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) @patch('shoppingcart.views.render_to_response', render_mock) def test_reg_code_with_multiple_courses_and_checkout_scenario(self): self.add_reg_code(self.course_key) # Two courses in user shopping cart self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) item2 = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) self.assertEquals(self.cart.orderitem_set.count(), 2) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) redeem_url = reverse('register_code_redemption', args=[self.reg_code]) resp = self.client.get(redeem_url) self.assertEquals(resp.status_code, 200) # check button text self.assertTrue('Activate Course Enrollment' in resp.content) #now activate the user by enrolling him/her to the course resp = self.client.post(redeem_url) self.assertEquals(resp.status_code, 200) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertIn('Payment', resp.content) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertEqual(context['order'].total_cost, self.testing_cost) course_enrollment = CourseEnrollment.objects.filter(user=self.user) self.assertEqual(course_enrollment.count(), 2) # make sure the enrollment_ids were stored in the PaidCourseRegistration items # refetch them first since they are updated # item1 has been deleted from the the cart. # User has been enrolled for the item1 item2 = PaidCourseRegistration.objects.get(id=item2.id) self.assertIsNotNone(item2.course_enrollment) self.assertEqual(item2.course_enrollment.course_id, self.testing_course.id) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_valid_reg_code(self): self.add_course_to_user_cart(self.course_key) self.add_reg_code(self.course_key) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code}) self.assertEqual(resp.status_code, 200) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('0.00', resp.content) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertFalse(context['any_refunds']) # check for the default currency settings in the context self.assertEqual(context['currency_symbol'], '$') self.assertEqual(context['currency'], 'usd') @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_override_currency_settings(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) # check for the override currency settings in the context self.assertEqual(context['currency_symbol'], 'Rs') self.assertEqual(context['currency'], 'PKR') @patch('shoppingcart.views.render_to_response', render_mock) def test_courseregcode_item_total_price(self): self.cart.order_type = 'business' self.cart.save() CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2) self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.assertEquals(CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key), 80) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_order_type_business(self): self.cart.order_type = 'business' self.cart.save() reg_item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2) self.cart.add_billing_details(company_name='T1Omega', company_contact_name='C1', company_contact_email='test@t1.com', recipient_email='test@t2.com') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') # mail is sent to these emails recipient_email, company_contact_email, order.user.email self.assertEquals(len(mail.outbox), 3) self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) # when order_type = 'business' the user is not enrolled in the # course but presented with the enrollment links self.assertFalse(CourseEnrollment.is_enrolled(self.cart.user, self.course_key)) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) # check for the enrollment codes content self.assertIn('Please send each professional one of these unique registration codes to enroll into the course.', resp.content) # fetch the newly generated registration codes course_registration_codes = CourseRegistrationCode.objects.filter(order=self.cart) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) # now check for all the registration codes in the receipt # and all the codes should be unused at this point self.assertIn(course_registration_codes[0].code, context['reg_code_info_list'][0]['code']) self.assertIn(course_registration_codes[1].code, context['reg_code_info_list'][1]['code']) self.assertFalse(context['reg_code_info_list'][0]['is_redeemed']) self.assertFalse(context['reg_code_info_list'][1]['is_redeemed']) self.assertIn(self.cart.purchase_time.strftime("%B %d, %Y"), resp.content) self.assertIn(self.cart.company_name, resp.content) self.assertIn(self.cart.company_contact_name, resp.content) self.assertIn(self.cart.company_contact_email, resp.content) self.assertIn(self.cart.recipient_email, resp.content) self.assertIn("Invoice #{order_id}".format(order_id=self.cart.id), resp.content) self.assertIn('You have successfully purchased <b>{total_registration_codes} course registration codes' .format(total_registration_codes=context['total_registration_codes']), resp.content) # now redeem one of registration code from the previous order redeem_url = reverse('register_code_redemption', args=[context['reg_code_info_list'][0]['code']]) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) self.assertTrue('View Dashboard' in response.content) # now view the receipt page again to see if any registration codes # has been expired or not resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) ((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name self.assertEqual(template, 'shoppingcart/receipt.html') # now check for all the registration codes in the receipt # and one of code should be used at this point self.assertTrue(context['reg_code_info_list'][0]['is_redeemed']) self.assertFalse(context['reg_code_info_list'][1]['is_redeemed']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_with_upgrade(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') self.login_user() self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('FirstNameTesting123', resp.content) self.assertIn('80.00', resp.content) ((template, context), _) = render_mock.call_args # When we come from the upgrade flow, we get these context variables self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertFalse(context['any_refunds']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_refund(self): reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key) cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor') self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123') cert_item.status = "refunded" cert_item.save() self.assertEqual(self.cart.total_cost, 40) self.login_user() resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id])) self.assertEqual(resp.status_code, 200) self.assertIn('40.00', resp.content) ((template, context), _tmp) = render_mock.call_args self.assertEqual(template, 'shoppingcart/receipt.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item, context['shoppingcart_items'][0]) self.assertIn(cert_item, context['shoppingcart_items'][1]) self.assertTrue(context['any_refunds']) @patch('shoppingcart.views.render_to_response', render_mock) def test_show_receipt_success_custom_receipt_page(self): cert_item = CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'honor') self.cart.purchase() self.login_user() receipt_url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id]) resp = self.client.get(receipt_url) self.assertEqual(resp.status_code, 200) ((template, _context), _tmp) = render_mock.call_args self.assertEqual(template, cert_item.single_item_receipt_template) def _assert_404(self, url, use_post=False): """ Helper method to assert that a given url will return a 404 status code """ if use_post: response = self.client.post(url) else: response = self.client.get(url) self.assertEquals(response.status_code, 404) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': False}) def test_disabled_paid_courses(self): """ Assert that the pages that require ENABLE_PAID_COURSE_REGISTRATION=True return a HTTP 404 status code when we have this flag turned off """ self.login_user() self._assert_404(reverse('shoppingcart.views.show_cart', args=[])) self._assert_404(reverse('shoppingcart.views.clear_cart', args=[])) self._assert_404(reverse('shoppingcart.views.remove_item', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.register_code_redemption', args=["testing"])) self._assert_404(reverse('shoppingcart.views.use_code', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.update_user_cart', args=[])) self._assert_404(reverse('shoppingcart.views.reset_code_redemption', args=[]), use_post=True) self._assert_404(reverse('shoppingcart.views.billing_details', args=[])) def test_upgrade_postpay_callback_emits_ga_event(self): # Enroll as honor in the course with the current user. CourseEnrollment.enroll(self.user, self.course_key) # add verified mode CourseMode.objects.create( course_id=self.verified_course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost ) # Purchase a verified certificate self.cart = Order.get_cart_for_user(self.user) CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified') self.cart.start_purchase() self.login_user() # setting the attempting upgrade session value. session = self.client.session session['attempting_upgrade'] = True session.save() ordered_params = OrderedDict([ ('amount', self.cost), ('currency', 'usd'), ('transaction_type', 'sale'), ('orderNumber', str(self.cart.id)), ('access_key', '123456789'), ('merchantID', 'edx'), ('djch', '012345678912'), ('orderPage_version', 2), ('orderPage_serialNumber', '1234567890'), ('profile_id', "00000001"), ('reference_number', str(self.cart.id)), ('locale', 'en'), ('signed_date_time', '2014-08-18T13:59:31Z'), ]) resp_params = PaymentFakeView.response_post_params(sign(ordered_params)) self.assertTrue(self.client.session.get('attempting_upgrade')) url = reverse('shoppingcart.views.postpay_callback') self.client.post(url, resp_params, follow=True) self.assertFalse(self.client.session.get('attempting_upgrade')) self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member 'edx.course.enrollment.upgrade.succeeded', { 'user_id': self.user.id, 'course_id': self.verified_course_key.to_deprecated_string(), 'mode': 'verified' } ) class ReceiptRedirectTest(ModuleStoreTestCase): """Test special-case redirect from the receipt page. """ COST = 40 PASSWORD = 'password' def setUp(self): super(ReceiptRedirectTest, self).setUp() self.user = UserFactory.create() self.user.set_password(self.PASSWORD) self.user.save() self.course = CourseFactory.create() self.course_key = self.course.id self.course_mode = CourseMode( course_id=self.course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.COST ) self.course_mode.save() self.cart = Order.get_cart_for_user(self.user) self.client.login( username=self.user.username, password=self.PASSWORD ) def test_postpay_callback_redirect_to_verify_student(self): # Create other carts first # This ensures that the order ID and order item IDs do not match Order.get_cart_for_user(self.user).start_purchase() Order.get_cart_for_user(self.user).start_purchase() Order.get_cart_for_user(self.user).start_purchase() # Purchase a verified certificate self.cart = Order.get_cart_for_user(self.user) CertificateItem.add_to_order( self.cart, self.course_key, self.COST, 'verified' ) self.cart.start_purchase() # Simulate hitting the post-pay callback with patch('shoppingcart.views.process_postpay_callback') as mock_process: mock_process.return_value = {'success': True, 'order': self.cart} url = reverse('shoppingcart.views.postpay_callback') resp = self.client.post(url, follow=True) # Expect to be redirected to the payment confirmation # page in the verify_student app redirect_url = reverse( 'verify_student_payment_confirmation', kwargs={'course_id': unicode(self.course_key)} ) redirect_url += '?payment-order-num={order_num}'.format( order_num=self.cart.id ) self.assertIn(redirect_url, resp.redirect_chain[0][0]) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) class ShoppingcartViewsClosedEnrollment(ModuleStoreTestCase): """ Test suite for ShoppingcartViews Course Enrollments Closed or not """ def setUp(self): super(ShoppingcartViewsClosedEnrollment, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.instructor = AdminFactory.create() self.cost = 40 self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.testing_course = CourseFactory.create( org='Edx', number='999', display_name='Testing Super Course', metadata={"invitation_only": False} ) self.percentage_discount = 20.0 self.coupon_code = 'asdsad' self.course_mode = CourseMode(course_id=self.testing_course.id, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.cart = Order.get_cart_for_user(self.user) self.now = datetime.now(pytz.UTC) self.tomorrow = self.now + timedelta(days=1) self.nextday = self.tomorrow + timedelta(days=1) def add_coupon(self, course_key, is_active, code): """ add dummy coupon into models """ coupon = Coupon(code=code, description='testing code', course_id=course_key, percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active) coupon.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") @patch('shoppingcart.views.render_to_response', render_mock) def test_to_check_that_cart_item_enrollment_is_closed(self): self.login_user() reg_item1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key) expired_course_item = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) # now add the same coupon code to the second course(testing_course) self.add_coupon(self.testing_course.id, True, self.coupon_code) resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code}) self.assertEqual(resp.status_code, 200) coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=getattr(expired_course_item, 'course_id'), order=expired_course_item.order_id) self.assertEqual(coupon_redemption.count(), 1) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart # coupon redemption entry should also be deleted when the item is expired. resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) # now the redemption entry should be deleted from the table. coupon_redemption = CouponRedemption.objects.filter(coupon__course_id=getattr(expired_course_item, 'course_id'), order=expired_course_item.order_id) self.assertEqual(coupon_redemption.count(), 0) ((template, context), _tmp) = render_mock.call_args self.assertEqual(template, 'shoppingcart/shopping_cart.html') self.assertEqual(context['order'], self.cart) self.assertIn(reg_item1, context['shoppingcart_items'][0]) self.assertEqual(1, len(context['shoppingcart_items'])) self.assertEqual(True, context['is_course_enrollment_closed']) self.assertIn(self.testing_course.display_name, context['expired_course_names']) def test_to_check_that_cart_item_enrollment_is_closed_when_clicking_the_payment_button(self): self.login_user() PaidCourseRegistration.add_to_order(self.cart, self.course_key) PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart resp = self.client.get(reverse('shoppingcart.views.verify_cart')) self.assertEqual(resp.status_code, 200) self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed']) resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) self.assertIn('40.00', resp.content) def test_is_enrollment_closed_when_order_type_is_business(self): self.login_user() self.cart.order_type = 'business' self.cart.save() PaidCourseRegistration.add_to_order(self.cart, self.course_key) CourseRegCodeItem.add_to_order(self.cart, self.testing_course.id, 2) # update the testing_course enrollment dates self.testing_course.enrollment_start = self.tomorrow self.testing_course.enrollment_end = self.nextday self.testing_course = self.update_course(self.testing_course, self.user.id) resp = self.client.post(reverse('shoppingcart.views.billing_details')) self.assertEqual(resp.status_code, 200) self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed']) # testing_course enrollment is closed but the course is in the cart # so we delete that item from the cart and display the message in the cart resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[])) self.assertEqual(resp.status_code, 200) self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content) self.assertIn('40.00', resp.content) @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) class RegistrationCodeRedemptionCourseEnrollment(ModuleStoreTestCase): """ Test suite for RegistrationCodeRedemption Course Enrollments """ def setUp(self, **kwargs): super(RegistrationCodeRedemptionCourseEnrollment, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.cost = 40 self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") def test_registration_redemption_post_request_ratelimited(self): """ Try (and fail) registration code redemption 30 times in a row on an non-existing registration code post request """ cache.clear() url = reverse('register_code_redemption', args=['asdasd']) self.login_user() for i in xrange(30): # pylint: disable=unused-variable response = self.client.post(url) self.assertEquals(response.status_code, 404) # then the rate limiter should kick in and give a HttpForbidden response response = self.client.post(url) self.assertEquals(response.status_code, 403) # now reset the time to 5 mins from now in future in order to unblock reset_time = datetime.now(UTC) + timedelta(seconds=300) with freeze_time(reset_time): response = self.client.post(url) self.assertEquals(response.status_code, 404) cache.clear() def test_registration_redemption_get_request_ratelimited(self): """ Try (and fail) registration code redemption 30 times in a row on an non-existing registration code get request """ cache.clear() url = reverse('register_code_redemption', args=['asdasd']) self.login_user() for i in xrange(30): # pylint: disable=unused-variable response = self.client.get(url) self.assertEquals(response.status_code, 404) # then the rate limiter should kick in and give a HttpForbidden response response = self.client.get(url) self.assertEquals(response.status_code, 403) # now reset the time to 5 mins from now in future in order to unblock reset_time = datetime.now(UTC) + timedelta(seconds=300) with freeze_time(reset_time): response = self.client.get(url) self.assertEquals(response.status_code, 404) cache.clear() def test_course_enrollment_active_registration_code_redemption(self): """ Test for active registration code course enrollment """ cache.clear() instructor = InstructorFactory(course_key=self.course_key) self.client.login(username=instructor.username, password='test') # Registration Code Generation only available to Sales Admins. CourseSalesAdminRole(self.course.id).add_users(instructor) url = reverse('generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}) data = { 'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com', 'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123', 'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '', 'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': '' } response = self.client.post(url, data) self.assertEquals(response.status_code, 200) # get the first registration from the newly created registration codes registration_code = CourseRegistrationCode.objects.all()[0].code redeem_url = reverse('register_code_redemption', args=[registration_code]) self.login_user() response = self.client.get(redeem_url) self.assertEquals(response.status_code, 200) # check button text self.assertIn('Activate Course Enrollment', response.content) #now activate the user by enrolling him/her to the course response = self.client.post(redeem_url) self.assertEquals(response.status_code, 200) self.assertIn('View Dashboard', response.content) #now check that the registration code has already been redeemed and user is already registered in the course RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code) response = self.client.get(redeem_url) self.assertEquals(len(RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)), 1) self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content) #now check that the registration code has already been redeemed response = self.client.post(redeem_url) self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content) #now check the response of the dashboard page dashboard_url = reverse('dashboard') response = self.client.get(dashboard_url) self.assertEquals(response.status_code, 200) self.assertIn(self.course.display_name, response.content) @ddt.ddt class RedeemCodeEmbargoTests(UrlResetMixin, ModuleStoreTestCase): """Test blocking redeem code redemption based on country access rules. """ USERNAME = 'bob' PASSWORD = 'test' @patch.dict(settings.FEATURES, {'EMBARGO': True}) def setUp(self): super(RedeemCodeEmbargoTests, self).setUp('embargo') self.course = CourseFactory.create() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) result = self.client.login(username=self.user.username, password=self.PASSWORD) self.assertTrue(result, msg="Could not log in") @ddt.data('get', 'post') @patch.dict(settings.FEATURES, {'EMBARGO': True}) def test_registration_code_redemption_embargo(self, method): # Create a valid registration code reg_code = CourseRegistrationCode.objects.create( code="abcd1234", course_id=self.course.id, created_by=self.user ) # Try to redeem the code from a restricted country with restrict_course(self.course.id) as redirect_url: url = reverse( 'register_code_redemption', kwargs={'registration_code': 'abcd1234'} ) response = getattr(self.client, method)(url) self.assertRedirects(response, redirect_url) # The registration code should NOT be redeemed is_redeemed = RegistrationCodeRedemption.objects.filter( registration_code=reg_code ).exists() self.assertFalse(is_redeemed) # The user should NOT be enrolled is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id) self.assertFalse(is_enrolled) @ddt.ddt class DonationViewTest(ModuleStoreTestCase): """Tests for making a donation. These tests cover both the single-item purchase flow, as well as the receipt page for donation items. """ DONATION_AMOUNT = "23.45" PASSWORD = "password" def setUp(self): """Create a test user and order. """ super(DonationViewTest, self).setUp() # Create and login a user self.user = UserFactory.create() self.user.set_password(self.PASSWORD) self.user.save() result = self.client.login(username=self.user.username, password=self.PASSWORD) self.assertTrue(result) # Enable donations config = DonationConfiguration.current() config.enabled = True config.save() def test_donation_for_org(self): self._donate(self.DONATION_AMOUNT) self._assert_receipt_contains("tax purposes") def test_donation_for_course_receipt(self): # Create a test course and donate to it self.course = CourseFactory.create(display_name="Test Course") self._donate(self.DONATION_AMOUNT, course_id=self.course.id) # Verify the receipt page self._assert_receipt_contains("tax purposes") self._assert_receipt_contains(self.course.display_name) def test_smallest_possible_donation(self): self._donate("0.01") self._assert_receipt_contains("0.01") @ddt.data( {}, {"amount": "abcd"}, {"amount": "-1.00"}, {"amount": "0.00"}, {"amount": "0.001"}, {"amount": "0"}, {"amount": "23.45", "course_id": "invalid"} ) def test_donation_bad_request(self, bad_params): response = self.client.post(reverse('donation'), bad_params) self.assertEqual(response.status_code, 400) def test_donation_requires_login(self): self.client.logout() response = self.client.post(reverse('donation'), {'amount': self.DONATION_AMOUNT}) self.assertEqual(response.status_code, 302) def test_no_such_course(self): response = self.client.post( reverse("donation"), {"amount": self.DONATION_AMOUNT, "course_id": "edx/DemoX/Demo"} ) self.assertEqual(response.status_code, 400) @ddt.data("get", "put", "head", "options", "delete") def test_donation_requires_post(self, invalid_method): response = getattr(self.client, invalid_method)( reverse("donation"), {"amount": self.DONATION_AMOUNT} ) self.assertEqual(response.status_code, 405) def test_donations_disabled(self): config = DonationConfiguration.current() config.enabled = False config.save() # Logged in -- should be a 404 response = self.client.post(reverse('donation')) self.assertEqual(response.status_code, 404) # Logged out -- should still be a 404 self.client.logout() response = self.client.post(reverse('donation')) self.assertEqual(response.status_code, 404) def _donate(self, donation_amount, course_id=None): """Simulate a donation to a course. This covers the entire payment flow, except for the external payment processor, which is simulated. Arguments: donation_amount (unicode): The amount the user is donating. Keyword Arguments: course_id (CourseKey): If provided, make a donation to the specific course. Raises: AssertionError """ # Purchase a single donation item # Optionally specify a particular course for the donation params = {'amount': donation_amount} if course_id is not None: params['course_id'] = course_id url = reverse('donation') response = self.client.post(url, params) self.assertEqual(response.status_code, 200) # Use the fake payment implementation to simulate the parameters # we would receive from the payment processor. payment_info = json.loads(response.content) self.assertEqual(payment_info["payment_url"], "/shoppingcart/payment_fake") # If this is a per-course donation, verify that we're sending # the course ID to the payment processor. if course_id is not None: self.assertEqual( payment_info["payment_params"]["merchant_defined_data1"], unicode(course_id) ) self.assertEqual( payment_info["payment_params"]["merchant_defined_data2"], "donation_course" ) else: self.assertEqual(payment_info["payment_params"]["merchant_defined_data1"], "") self.assertEqual( payment_info["payment_params"]["merchant_defined_data2"], "donation_general" ) processor_response_params = PaymentFakeView.response_post_params(payment_info["payment_params"]) # Use the response parameters to simulate a successful payment url = reverse('shoppingcart.views.postpay_callback') response = self.client.post(url, processor_response_params) self.assertRedirects(response, self._receipt_url) def _assert_receipt_contains(self, expected_text): """Load the receipt page and verify that it contains the expected text.""" resp = self.client.get(self._receipt_url) self.assertContains(resp, expected_text) @property def _receipt_url(self): order_id = Order.objects.get(user=self.user, status="purchased").id return reverse("shoppingcart.views.show_receipt", kwargs={"ordernum": order_id}) class CSVReportViewsTest(ModuleStoreTestCase): """ Test suite for CSV Purchase Reporting """ def setUp(self): super(CSVReportViewsTest, self).setUp() self.user = UserFactory.create() self.user.set_password('password') self.user.save() self.cost = 40 self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id self.course_mode = CourseMode(course_id=self.course_key, mode_slug="honor", mode_display_name="honor cert", min_price=self.cost) self.course_mode.save() self.course_mode2 = CourseMode(course_id=self.course_key, mode_slug="verified", mode_display_name="verified cert", min_price=self.cost) self.course_mode2.save() verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course') self.verified_course_key = verified_course.id self.cart = Order.get_cart_for_user(self.user) self.dl_grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP) self.dl_grp.save() def login_user(self): """ Helper fn to login self.user """ self.client.login(username=self.user.username, password="password") def add_to_download_group(self, user): """ Helper fn to add self.user to group that's allowed to download report CSV """ user.groups.add(self.dl_grp) def test_report_csv_no_access(self): self.login_user() response = self.client.get(reverse('payment_csv_report')) self.assertEqual(response.status_code, 403) def test_report_csv_bad_method(self): self.login_user() self.add_to_download_group(self.user) response = self.client.put(reverse('payment_csv_report')) self.assertEqual(response.status_code, 400) @patch('shoppingcart.views.render_to_response', render_mock) def test_report_csv_get(self): self.login_user() self.add_to_download_group(self.user) response = self.client.get(reverse('payment_csv_report')) ((template, context), unused_kwargs) = render_mock.call_args self.assertEqual(template, 'shoppingcart/download_report.html') self.assertFalse(context['total_count_error']) self.assertFalse(context['date_fmt_error']) self.assertIn("Download CSV Reports", response.content.decode('UTF-8')) @patch('shoppingcart.views.render_to_response', render_mock) def test_report_csv_bad_date(self): self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': 'BAD', 'end_date': 'BAD', 'requested_report': 'itemized_purchase_report'}) ((template, context), unused_kwargs) = render_mock.call_args self.assertEqual(template, 'shoppingcart/download_report.html') self.assertFalse(context['total_count_error']) self.assertTrue(context['date_fmt_error']) self.assertIn("There was an error in your date input. It should be formatted as YYYY-MM-DD", response.content.decode('UTF-8')) CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE = ",1,purchased,1,40,40,usd,Registration for Course: Robot Super Course," def test_report_csv_itemized(self): report_type = 'itemized_purchase_report' start_date = '1970-01-01' end_date = '2100-01-01' PaidCourseRegistration.add_to_order(self.cart, self.course_key) self.cart.purchase() self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date, 'end_date': end_date, 'requested_report': report_type}) self.assertEqual(response['Content-Type'], 'text/csv') report = initialize_report(report_type, start_date, end_date) self.assertIn(",".join(report.header()), response.content) self.assertIn(self.CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE, response.content) def test_report_csv_university_revenue_share(self): report_type = 'university_revenue_share' start_date = '1970-01-01' end_date = '2100-01-01' start_letter = 'A' end_letter = 'Z' self.login_user() self.add_to_download_group(self.user) response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date, 'end_date': end_date, 'start_letter': start_letter, 'end_letter': end_letter, 'requested_report': report_type}) self.assertEqual(response['Content-Type'], 'text/csv') report = initialize_report(report_type, start_date, end_date, start_letter, end_letter) self.assertIn(",".join(report.header()), response.content) class UtilFnsTest(TestCase): """ Tests for utility functions in views.py """ def setUp(self): super(UtilFnsTest, self).setUp() self.user = UserFactory.create() def test_can_download_report_no_group(self): """ Group controlling perms is not present """ self.assertFalse(_can_download_report(self.user)) def test_can_download_report_not_member(self): """ User is not part of group controlling perms """ Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP).save() self.assertFalse(_can_download_report(self.user)) def test_can_download_report(self): """ User is part of group controlling perms """ grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP) grp.save() self.user.groups.add(grp) self.assertTrue(_can_download_report(self.user)) def test_get_date_from_str(self): test_str = "2013-10-01" date = _get_date_from_str(test_str) self.assertEqual(2013, date.year) self.assertEqual(10, date.month) self.assertEqual(1, date.day)
agpl-3.0
hackoose/cfi-team30
Django/food/migrations/0002_comments_feedback.py
3
1817
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django_extensions.db.fields class Migration(migrations.Migration): dependencies = [ ('food', '0001_initial'), ] operations = [ migrations.CreateModel( name='Comments', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('comment', models.CharField(max_length=10000)), ('school', models.ForeignKey(to='food.School')), ], options={ 'ordering': ('-modified', '-created'), 'abstract': False, 'get_latest_by': 'modified', }, ), migrations.CreateModel( name='Feedback', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('feedback', models.CharField(max_length=10000)), ('school', models.ForeignKey(to='food.School')), ], options={ 'ordering': ('-modified', '-created'), 'abstract': False, 'get_latest_by': 'modified', }, ), ]
apache-2.0
cldershem/osf.io
website/addons/badges/model/settings.py
37
1913
# -*- coding: utf -*- from modularodm import fields from website.addons.base import AddonUserSettingsBase, AddonNodeSettingsBase from . import Badge #TODO Better way around this, No longer needed? class BadgesNodeSettings(AddonNodeSettingsBase): pass class BadgesUserSettings(AddonUserSettingsBase): revocation_list = fields.DictionaryField() # {'id':'12345', 'reason':'is a loser'} @property def can_award(self): return bool(self.badges) or len(Badge.get_system_badges()) > 0 @property def badges(self): return list(self.badge__creator) + [badge for badge in Badge.get_system_badges() if badge.creator != self] @property def issued(self): assertions = [] for badge in self.badges: for assertion in badge.assertions: if assertion.awarder == self: assertions.append(assertion) return assertions def get_badges_json(self): return [badge.to_json() for badge in self.badges] def get_badges_json_simple(self): return [{'value': badge._id, 'text': badge.name} for badge in self.badges] def to_json(self, user): ret = super(BadgesUserSettings, self).to_json(user) ret['badges'] = self.get_badges_json() return ret def to_openbadge(self): ret = { 'name': self.owner.fullname, 'email': self.owner.username, } # Place holder for later when orgaizations get worked on # if self.description: # ret['description'] = self.description, # if self.image: # ret['image'] = self.image, # if self.url: # ret['url'] = self.url # if self.revocation_list: # ret['revocationList'] = self.revocation_list return ret def issued_json(self): return [assertion.to_json() for assertion in self.issued]
apache-2.0
WatanabeYasumasa/edx-platform
lms/djangoapps/instructor_task/views.py
69
10964
import json import logging from django.http import HttpResponse from django.utils.translation import ugettext as _ from celery.states import FAILURE, REVOKED, READY_STATES from instructor_task.api_helper import (get_status_from_instructor_task, get_updated_instructor_task) from instructor_task.models import PROGRESS log = logging.getLogger(__name__) # return status for completed tasks and tasks in progress STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS] def _get_instructor_task_status(task_id): """ Returns status for a specific task. Written as an internal method here (rather than as a helper) so that get_task_completion_info() can be called without causing a circular dependency (since it's also called directly). """ instructor_task = get_updated_instructor_task(task_id) status = get_status_from_instructor_task(instructor_task) if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS: succeeded, message = get_task_completion_info(instructor_task) status['message'] = message status['succeeded'] = succeeded return status def instructor_task_status(request): """ View method that returns the status of a course-related task or tasks. Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse. The task_id can be specified to this view in one of two ways: * by making a request containing 'task_id' as a parameter with a single value Returns a dict containing status information for the specified task_id * by making a request containing 'task_ids' as a parameter, with a list of task_id values. Returns a dict of dicts, with the task_id as key, and the corresponding dict containing status information for the specified task_id Task_id values that are unrecognized are skipped. The dict with status information for a task contains the following keys: 'message': on complete tasks, status message reporting on final progress, or providing exception message if failed. For tasks in progress, indicates the current progress. 'succeeded': on complete tasks or tasks in progress, boolean value indicates if the task outcome was successful: did it achieve what it set out to do. This is in contrast with a successful task_state, which indicates that the task merely completed. 'task_id': id assigned by LMS and used by celery. 'task_state': state of task as stored in celery's result store. 'in_progress': boolean indicating if task is still running. 'task_progress': dict containing progress information. This includes: 'attempted': number of attempts made 'succeeded': number of attempts that "succeeded" 'total': number of possible subtasks to attempt 'action_name': user-visible verb to use in status messages. Should be past-tense. 'duration_ms': how long the task has (or had) been running. 'exception': name of exception class raised in failed tasks. 'message': returned for failed and revoked tasks. 'traceback': optional, returned if task failed and produced a traceback. """ output = {} if 'task_id' in request.REQUEST: task_id = request.REQUEST['task_id'] output = _get_instructor_task_status(task_id) elif 'task_ids[]' in request.REQUEST: tasks = request.REQUEST.getlist('task_ids[]') for task_id in tasks: task_output = _get_instructor_task_status(task_id) if task_output is not None: output[task_id] = task_output return HttpResponse(json.dumps(output, indent=4)) def get_task_completion_info(instructor_task): """ Construct progress message from progress information in InstructorTask entry. Returns (boolean, message string) duple, where the boolean indicates whether the task completed without incident. (It is possible for a task to attempt many sub-tasks, such as rescoring many students' problem responses, and while the task runs to completion, some of the students' responses could not be rescored.) Used for providing messages to instructor_task_status(), as well as external calls for providing course task submission history information. """ succeeded = False if instructor_task.task_state not in STATES_WITH_STATUS: return (succeeded, _("No status information available")) # we're more surprised if there is no output for a completed task, but just warn: if instructor_task.task_output is None: log.warning(_("No task_output information found for instructor_task {0}").format(instructor_task.task_id)) return (succeeded, _("No status information available")) try: task_output = json.loads(instructor_task.task_output) except ValueError: fmt = _("No parsable task_output information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output)) return (succeeded, _("No parsable status information available")) if instructor_task.task_state in [FAILURE, REVOKED]: return (succeeded, task_output.get('message', _('No message provided'))) if any([key not in task_output for key in ['action_name', 'attempted', 'total']]): fmt = _("Invalid task_output information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output)) return (succeeded, _("No progress status information available")) action_name = _(task_output['action_name']) num_attempted = task_output['attempted'] num_total = task_output['total'] # In earlier versions of this code, the key 'updated' was used instead of # (the more general) 'succeeded'. In order to support history that may contain # output with the old key, we check for values with both the old and the current # key, and simply sum them. num_succeeded = task_output.get('updated', 0) + task_output.get('succeeded', 0) num_skipped = task_output.get('skipped', 0) student = None problem_url = None email_id = None try: task_input = json.loads(instructor_task.task_input) except ValueError: fmt = _("No parsable task_input information found for instructor_task {0}: {1}") log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input)) else: student = task_input.get('student') problem_url = task_input.get('problem_url') email_id = task_input.get('email_id') if instructor_task.task_state == PROGRESS: # special message for providing progress updates: # Translators: {action} is a past-tense verb that is localized separately. {attempted} and {succeeded} are counts. msg_format = _("Progress: {action} {succeeded} of {attempted} so far") elif student is not None and problem_url is not None: # this reports on actions on problems for a particular student: if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Unable to find submission to be {action} for student '{student}'") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Problem failed to be {action} for student '{student}'") else: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier. msg_format = _("Problem successfully {action} for student '{student}'") elif student is None and problem_url is not None: # this reports on actions on problems for all students: if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. msg_format = _("Unable to find any students with submissions to be {action}") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Problem failed to be {action} for any of {attempted} students") elif num_succeeded == num_attempted: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Problem successfully {action} for {attempted} students") else: # num_succeeded < num_attempted # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Problem {action} for {succeeded} of {attempted} students") elif email_id is not None: # this reports on actions on bulk emails if num_attempted == 0: # Translators: {action} is a past-tense verb that is localized separately. msg_format = _("Unable to find any recipients to be {action}") elif num_succeeded == 0: # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Message failed to be {action} for any of {attempted} recipients ") elif num_succeeded == num_attempted: succeeded = True # Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count. msg_format = _("Message successfully {action} for {attempted} recipients") else: # num_succeeded < num_attempted # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Message {action} for {succeeded} of {attempted} recipients") else: # provide a default: # Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts. msg_format = _("Status: {action} {succeeded} of {attempted}") if num_skipped > 0: # Translators: {skipped} is a count. This message is appended to task progress status messages. msg_format += _(" (skipping {skipped})") if student is None and num_attempted != num_total: # Translators: {total} is a count. This message is appended to task progress status messages. msg_format += _(" (out of {total})") # Update status in task result object itself: message = msg_format.format( action=action_name, succeeded=num_succeeded, attempted=num_attempted, total=num_total, skipped=num_skipped, student=student ) return (succeeded, message)
agpl-3.0
rooshilp/CMPUT410W15-project
testenv/lib/python2.7/site-packages/PIL/FliImagePlugin.py
26
3460
# # The Python Imaging Library. # $Id$ # # FLI/FLC file handling. # # History: # 95-09-01 fl Created # 97-01-03 fl Fixed parser, setup decoder tile # 98-07-15 fl Renamed offset attribute to avoid name clash # # Copyright (c) Secret Labs AB 1997-98. # Copyright (c) Fredrik Lundh 1995-97. # # See the README file for information on usage and redistribution. # __version__ = "0.2" from PIL import Image, ImageFile, ImagePalette, _binary i8 = _binary.i8 i16 = _binary.i16le i32 = _binary.i32le o8 = _binary.o8 # # decoder def _accept(prefix): return i16(prefix[4:6]) in [0xAF11, 0xAF12] ## # Image plugin for the FLI/FLC animation format. Use the <b>seek</b> # method to load individual frames. class FliImageFile(ImageFile.ImageFile): format = "FLI" format_description = "Autodesk FLI/FLC Animation" def _open(self): # HEAD s = self.fp.read(128) magic = i16(s[4:6]) if not (magic in [0xAF11, 0xAF12] and i16(s[14:16]) in [0, 3] and # flags s[20:22] == b"\x00\x00"): # reserved raise SyntaxError("not an FLI/FLC file") # image characteristics self.mode = "P" self.size = i16(s[8:10]), i16(s[10:12]) # animation speed duration = i32(s[16:20]) if magic == 0xAF11: duration = (duration * 1000) / 70 self.info["duration"] = duration # look for palette palette = [(a, a, a) for a in range(256)] s = self.fp.read(16) self.__offset = 128 if i16(s[4:6]) == 0xF100: # prefix chunk; ignore it self.__offset = self.__offset + i32(s) s = self.fp.read(16) if i16(s[4:6]) == 0xF1FA: # look for palette chunk s = self.fp.read(6) if i16(s[4:6]) == 11: self._palette(palette, 2) elif i16(s[4:6]) == 4: self._palette(palette, 0) palette = [o8(r)+o8(g)+o8(b) for (r, g, b) in palette] self.palette = ImagePalette.raw("RGB", b"".join(palette)) # set things up to decode first frame self.frame = -1 self.__fp = self.fp self.seek(0) def _palette(self, palette, shift): # load palette i = 0 for e in range(i16(self.fp.read(2))): s = self.fp.read(2) i = i + i8(s[0]) n = i8(s[1]) if n == 0: n = 256 s = self.fp.read(n * 3) for n in range(0, len(s), 3): r = i8(s[n]) << shift g = i8(s[n+1]) << shift b = i8(s[n+2]) << shift palette[i] = (r, g, b) i += 1 def seek(self, frame): if frame != self.frame + 1: raise ValueError("cannot seek to frame %d" % frame) self.frame = frame # move to next frame self.fp = self.__fp self.fp.seek(self.__offset) s = self.fp.read(4) if not s: raise EOFError framesize = i32(s) self.decodermaxblock = framesize self.tile = [("fli", (0, 0)+self.size, self.__offset, None)] self.__offset = self.__offset + framesize def tell(self): return self.frame # # registry Image.register_open("FLI", FliImageFile, _accept) Image.register_extension("FLI", ".fli") Image.register_extension("FLI", ".flc")
gpl-2.0
danny200309/BuildingMachineLearningSystemsWithPython
ch03/rel_post_20news.py
24
3903
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import sklearn.datasets import scipy as sp new_post = \ """Disk drive problems. Hi, I have a problem with my hard disk. After 1 year it is working only sporadically now. I tried to format it, but now it doesn't boot any more. Any ideas? Thanks. """ print("""\ Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'! For the 2nd edition we introduced a couple of changes that will result into results that differ from the results in the 1st edition. E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring you to download the data manually from MLCOMP. If you have any questions, please ask at http://www.twotoreal.com """) all_data = sklearn.datasets.fetch_20newsgroups(subset="all") print("Number of total posts: %i" % len(all_data.filenames)) # Number of total posts: 18846 groups = [ 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space'] train_data = sklearn.datasets.fetch_20newsgroups(subset="train", categories=groups) print("Number of training posts in tech groups:", len(train_data.filenames)) # Number of training posts in tech groups: 3529 labels = train_data.target num_clusters = 50 # sp.unique(labels).shape[0] import nltk.stem english_stemmer = nltk.stem.SnowballStemmer('english') from sklearn.feature_extraction.text import TfidfVectorizer class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc)) vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', decode_error='ignore' ) vectorized = vectorizer.fit_transform(train_data.data) num_samples, num_features = vectorized.shape print("#samples: %d, #features: %d" % (num_samples, num_features)) # samples: 3529, #features: 4712 from sklearn.cluster import KMeans km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3) clustered = km.fit(vectorized) print("km.labels_=%s" % km.labels_) # km.labels_=[ 6 34 22 ..., 2 21 26] print("km.labels_.shape=%s" % km.labels_.shape) # km.labels_.shape=3529 from sklearn import metrics print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) # Homogeneity: 0.400 print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) # Completeness: 0.206 print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) # V-measure: 0.272 print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels, km.labels_)) # Adjusted Rand Index: 0.064 print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels, km.labels_)) # Adjusted Mutual Information: 0.197 print(("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(vectorized, labels, sample_size=1000))) # Silhouette Coefficient: 0.006 new_post_vec = vectorizer.transform([new_post]) new_post_label = km.predict(new_post_vec)[0] similar_indices = (km.labels_ == new_post_label).nonzero()[0] similar = [] for i in similar_indices: dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray()) similar.append((dist, train_data.data[i])) similar = sorted(similar) print("Count similar: %i" % len(similar)) show_at_1 = similar[0] show_at_2 = similar[int(len(similar) / 10)] show_at_3 = similar[int(len(similar) / 2)] print("=== #1 ===") print(show_at_1) print() print("=== #2 ===") print(show_at_2) print() print("=== #3 ===") print(show_at_3)
mit
lwiecek/django
tests/template_tests/syntax_tests/test_static.py
335
2614
from django.conf import settings from django.test import SimpleTestCase, override_settings from django.utils.six.moves.urllib.parse import urljoin from ..utils import setup @override_settings(MEDIA_URL="/media/", STATIC_URL="/static/") class StaticTagTests(SimpleTestCase): libraries = {'static': 'django.templatetags.static'} @setup({'static-prefixtag01': '{% load static %}{% get_static_prefix %}'}) def test_static_prefixtag01(self): output = self.engine.render_to_string('static-prefixtag01') self.assertEqual(output, settings.STATIC_URL) @setup({'static-prefixtag02': '{% load static %}' '{% get_static_prefix as static_prefix %}{{ static_prefix }}'}) def test_static_prefixtag02(self): output = self.engine.render_to_string('static-prefixtag02') self.assertEqual(output, settings.STATIC_URL) @setup({'static-prefixtag03': '{% load static %}{% get_media_prefix %}'}) def test_static_prefixtag03(self): output = self.engine.render_to_string('static-prefixtag03') self.assertEqual(output, settings.MEDIA_URL) @setup({'static-prefixtag04': '{% load static %}' '{% get_media_prefix as media_prefix %}{{ media_prefix }}'}) def test_static_prefixtag04(self): output = self.engine.render_to_string('static-prefixtag04') self.assertEqual(output, settings.MEDIA_URL) @setup({'static-statictag01': '{% load static %}{% static "admin/base.css" %}'}) def test_static_statictag01(self): output = self.engine.render_to_string('static-statictag01') self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag02': '{% load static %}{% static base_css %}'}) def test_static_statictag02(self): output = self.engine.render_to_string('static-statictag02', {'base_css': 'admin/base.css'}) self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag03': '{% load static %}{% static "admin/base.css" as foo %}{{ foo }}'}) def test_static_statictag03(self): output = self.engine.render_to_string('static-statictag03') self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css')) @setup({'static-statictag04': '{% load static %}{% static base_css as foo %}{{ foo }}'}) def test_static_statictag04(self): output = self.engine.render_to_string('static-statictag04', {'base_css': 'admin/base.css'}) self.assertEqual(output, urljoin(settings.STATIC_URL, 'admin/base.css'))
bsd-3-clause
wjwwood/parse_cmake
tests/test_parsing.py
1
6153
# Copyright 2015 Open Source Robotics Foundation, Inc. # Copyright 2013 Willow Garage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import parse_cmake.parsing from parse_cmake.parsing import ( File, Command, Comment, BlankLine, Arg, parse, FormattingOptions) def prettify(src): opts = FormattingOptions() opts.indent = ' ' return parse_cmake.parsing.prettify(src, opts) class ParsingTestCase(unittest.TestCase): def setUp(self): self.maxDiff = None def test_parse_empty_raises_exception(self): self.assertEqual(File([]), parse('')) def test_parse_nonempty1(self): input = 'FIND_PACKAGE(ITK REQUIRED)' output = parse(input) expected = File([Command('FIND_PACKAGE', [Arg('ITK'), Arg('REQUIRED')])]) msg = '\nexpected\n%s\ngot\n%s' % (repr(expected), repr(output)) self.assertEqual(expected, output, msg) def test_parse_nonempty2(self): input = '''\ # Top level comment FIND_PACKAGE(ITK REQUIRED) INCLUDE(${ITK_USE_FILE}) ADD_EXECUTABLE(CastImageFilter CastImageFilter.cxx) TARGET_LINK_LIBRARIES(CastImageFilter # inline comment 1 vtkHybrid #inline comment 2 ITKIO ITKBasicFilters ITKCommon ) ''' output = parse(input) expected = File([ Comment('# Top level comment'), Command('FIND_PACKAGE', [Arg('ITK'), Arg('REQUIRED')]), Command('INCLUDE', [Arg('${ITK_USE_FILE}')]), BlankLine(), Command('ADD_EXECUTABLE', [Arg('CastImageFilter'), Arg('CastImageFilter.cxx')]), Command('TARGET_LINK_LIBRARIES', [ Arg('CastImageFilter', comments=['# inline comment 1']), Arg('vtkHybrid', comments=['#inline comment 2']), Arg('ITKIO'), Arg('ITKBasicFilters'), Arg('ITKCommon'), ]) ]) msg = '\nexpected\n%s\ngot\n%s' % (expected, output) self.assertEqual(expected, output, msg) def test_idempotency_of_parsing_and_unparsing(self): input = '''\ # Top level comment FIND_PACKAGE(ITK REQUIRED) INCLUDE(${ITK_USE_FILE}) ''' round_trip = lambda s: str(parse(s)) self.assertEqual(round_trip(input), round_trip(round_trip(input))) def test_invalid_format_raises_an_exception(self): input = 'FIND_PACKAGE(' self.assertRaises(Exception, parse, input) def test_line_numbers_in_exceptions(self): input = '''\ FIND_PACKAGE(ITK) INCLUDE( ''' try: parse(input) self.fail('Expected an exception, but none was raised.') except Exception as e: self.assertTrue('line 2' in str(e)) def test_arg_with_a_slash(self): tree = parse('include_directories (${HELLO_SOURCE_DIR}/Hello)') expected = File([ Command('include_directories', [Arg('${HELLO_SOURCE_DIR}/Hello')]) ]) self.assertEqual(expected, tree) def test_command_with_no_args(self): tree = parse('cmd()') expected = File([Command('cmd', [])]) self.assertEqual(expected, tree) def assertStringEqualIgnoreSpace(self, a, b): a2 = ''.join(a.split()) b2 = ''.join(b.split()) msg = '\nExpected\n%s\ngot\n%s\n(ignoring whitespace details)' % (a, b) self.assertEqual(a2, b2, msg) def test_arg_comments_preserved(self): input = ''' some_command(x # inline comment about x ) ''' self.assertMultiLineEqual(input, prettify(input)) def test_comments_preserved(self): input = '''\ # file comment # more about the file # comment above Command1 command1(VERSION 2.6) # inline comment for Command1 command2(x # inline comment about x "y" # inline comment about a quoted string "y" ) # inline comment for Command2 ''' self.assertMultiLineEqual(input, prettify(input)) def test_multiline_string(self): s = ''' string containing newlines ''' input = '''\ set (MY_STRING "%s") ''' % s tree = parse(input) expected = File([Command('set', [Arg('MY_STRING'), Arg('"' + s + '"')])]) self.assertEqual(expected, tree) # TODO: test macros and functions def test_ifs_indented(self): input = ''' if(a) if(b) set(X 1) endif() elseif(a) if(foo) set(Z 3) endif() else(a) if(c) set(Y 2) endif(c) endif(a) ''' self.assertMultiLineEqual(input, prettify(input)) def test_macros_indented(self): input = ''' macro(hello MESSAGE) message(${MESSAGE}) endmacro(hello) # call the macro with the string "hello world" hello("hello world") ''' self.assertUnchangedByPrettyPrinting(input) def test_functions_indented(self): input = ''' function(hello MESSAGE) message(${MESSAGE}) endfunction(hello) # call the macro with the string "hello world" hello("hello world") ''' self.assertUnchangedByPrettyPrinting(input) def test_loops_indented(self): input = ''' foreach(var ${LIST}) command(var) endforeach() while(cond) command(var) endwhile() ''' self.assertUnchangedByPrettyPrinting(input) def test_breaks_commands_at_parameter_names(self): input = ''' set_source_files_properties(source_file.cpp PROPERTIES COMPILE_FLAGS "-Wsome-error -Wanother-error -fsome-flag") ''' self.assertMultiLineEqual(input, prettify(input)) def assertUnchangedByPrettyPrinting(self, input): self.assertMultiLineEqual(input, prettify(input)) if __name__ == '__main__': unittest.main()
apache-2.0
carlvlewis/bokeh
bokeh/server/app.py
29
1288
from __future__ import absolute_import import flask from os import walk from os.path import join from bokeh.settings import settings class BokehBlueprint(flask.Blueprint): def __init__(self, *args, **kwargs): super(BokehBlueprint, self).__init__(*args, **kwargs) self.debugjs = None def setup(self, backend, backbone_storage, servermodel_storage, authentication): self.backend = backend self.backbone_storage = backbone_storage self.servermodel_storage = servermodel_storage self.authentication = authentication self.bokehjsdir = settings.bokehjsdir() self.bokehjssrcdir = settings.bokehjssrcdir() def current_user(self): return self.authentication.current_user() def js_files(self): bokehjsdir = self.bokehjsdir js_files = [] for root, dirnames, files in walk(bokehjsdir): for fname in files: if fname.endswith(".js") and 'vendor' not in root: js_files.append(join(root, fname)) return js_files bokeh_app = BokehBlueprint( 'bokeh.server', 'bokeh.server', static_folder='static', static_url_path='/bokeh/static', template_folder='templates' ) app = flask.Flask("bokeh.server")
bsd-3-clause
alexkolar/home-assistant
homeassistant/components/modbus.py
10
2759
""" homeassistant.components.modbus ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Modbus component, using pymodbus (python3 branch). Configuration: To use the Modbus component you will need to add something like the following to your configuration.yaml file. #Modbus TCP modbus: type: tcp host: 127.0.0.1 port: 2020 #Modbus RTU modbus: type: serial method: rtu port: /dev/ttyUSB0 baudrate: 9600 stopbits: 1 bytesize: 8 parity: N """ import logging from homeassistant.const import (EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP) DOMAIN = "modbus" DEPENDENCIES = [] REQUIREMENTS = ['https://github.com/bashwork/pymodbus/archive/' 'd7fc4f1cc975631e0a9011390e8017f64b612661.zip#pymodbus==1.2.0'] # Type of network MEDIUM = "type" # if MEDIUM == "serial" METHOD = "method" SERIAL_PORT = "port" BAUDRATE = "baudrate" STOPBITS = "stopbits" BYTESIZE = "bytesize" PARITY = "parity" # if MEDIUM == "tcp" or "udp" HOST = "host" IP_PORT = "port" _LOGGER = logging.getLogger(__name__) NETWORK = None TYPE = None def setup(hass, config): """ Setup Modbus component. """ # Modbus connection type # pylint: disable=global-statement, import-error global TYPE TYPE = config[DOMAIN][MEDIUM] # Connect to Modbus network # pylint: disable=global-statement, import-error global NETWORK if TYPE == "serial": from pymodbus.client.sync import ModbusSerialClient as ModbusClient NETWORK = ModbusClient(method=config[DOMAIN][METHOD], port=config[DOMAIN][SERIAL_PORT], baudrate=config[DOMAIN][BAUDRATE], stopbits=config[DOMAIN][STOPBITS], bytesize=config[DOMAIN][BYTESIZE], parity=config[DOMAIN][PARITY]) elif TYPE == "tcp": from pymodbus.client.sync import ModbusTcpClient as ModbusClient NETWORK = ModbusClient(host=config[DOMAIN][HOST], port=config[DOMAIN][IP_PORT]) elif TYPE == "udp": from pymodbus.client.sync import ModbusUdpClient as ModbusClient NETWORK = ModbusClient(host=config[DOMAIN][HOST], port=config[DOMAIN][IP_PORT]) else: return False def stop_modbus(event): """ Stop Modbus service. """ NETWORK.close() def start_modbus(event): """ Start Modbus service. """ NETWORK.connect() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_modbus) hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_modbus) # Tells the bootstrapper that the component was successfully initialized return True
mit
shacker/django
django/db/backends/base/features.py
4
10696
from django.db.models.aggregates import StdDev from django.db.utils import NotSupportedError, ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True can_use_chunked_reads = True can_return_id_from_insert = False can_return_ids_from_bulk_insert = False has_bulk_insert = True uses_savepoints = False can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend determine reliably if a field is nullable? # Note that this is separate from interprets_empty_strings_as_nulls, # although the latter feature, when true, interferes with correct # setting (and introspection) of CharFields' nullability. # This is True for all core backends. can_introspect_null = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Can the backend introspect an AutoField, instead of an IntegerField? can_introspect_autofield = False # Can the backend introspect a BigIntegerField, instead of an IntegerField? can_introspect_big_integer_field = True # Can the backend introspect an BinaryField, instead of an TextField? can_introspect_binary_field = True # Can the backend introspect an DecimalField, instead of an FloatField? can_introspect_decimal_field = True # Can the backend introspect an IPAddressField, instead of an CharField? can_introspect_ip_address_field = False # Can the backend introspect a PositiveIntegerField, instead of an IntegerField? can_introspect_positive_integer_field = False # Can the backend introspect a SmallIntegerField, instead of an IntegerField? can_introspect_small_integer_field = False # Can the backend introspect a TimeField, instead of a DateTimeField? can_introspect_time_field = True # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend decide to commit before SAVEPOINT statements # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965 autocommits_when_autocommit_is_off = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Does the backend require the sqlparse library for splitting multi-line # statements before executing them? requires_sqlparse_for_splitting = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False uppercases_column_names = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backed support window expressions (expression OVER (...))? supports_over_clause = False # Does the backend support CAST with precision? supports_cast_with_precision = True # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False def __init__(self, connection): self.connection = connection @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 @cached_property def supports_stddev(self): """Confirm support for STDDEV and related stats functions.""" try: self.connection.ops.check_expression_support(StdDev(1)) except NotSupportedError: return False return True def introspected_boolean_field_type(self, field=None): """ What is the type returned when the backend introspects a BooleanField? The `field` argument may be used to give further details of the field to be introspected. The return value from this function is compared by tests against actual introspection results; it should provide expectations, not run an introspection itself. """ if self.can_introspect_null and field and field.null: return 'NullBooleanField' return 'BooleanField'
bsd-3-clause
jaywreddy/django
django/core/management/commands/runserver.py
203
7383
from __future__ import unicode_literals import errno import os import re import socket import sys from datetime import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand, CommandError from django.core.servers.basehttp import get_internal_wsgi_application, run from django.db import DEFAULT_DB_ALIAS, connections from django.db.migrations.exceptions import MigrationSchemaMissing from django.db.migrations.executor import MigrationExecutor from django.utils import autoreload, six from django.utils.encoding import force_text, get_system_encoding naiveip_re = re.compile(r"""^(?: (?P<addr> (?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address (?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address (?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN ):)?(?P<port>\d+)$""", re.X) class Command(BaseCommand): help = "Starts a lightweight Web server for development." # Validation is called explicitly each time the server is reloaded. requires_system_checks = False leave_locale_alone = True default_port = '8000' def add_arguments(self, parser): parser.add_argument('addrport', nargs='?', help='Optional port number, or ipaddr:port') parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False, help='Tells Django to use an IPv6 address.') parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True, help='Tells Django to NOT use threading.') parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True, help='Tells Django to NOT use the auto-reloader.') def execute(self, *args, **options): if options.get('no_color'): # We rely on the environment because it's currently the only # way to reach WSGIRequestHandler. This seems an acceptable # compromise considering `runserver` runs indefinitely. os.environ[str("DJANGO_COLORS")] = str("nocolor") super(Command, self).execute(*args, **options) def get_handler(self, *args, **options): """ Returns the default WSGI handler for the runner. """ return get_internal_wsgi_application() def handle(self, *args, **options): from django.conf import settings if not settings.DEBUG and not settings.ALLOWED_HOSTS: raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.') self.use_ipv6 = options.get('use_ipv6') if self.use_ipv6 and not socket.has_ipv6: raise CommandError('Your Python does not support IPv6.') self._raw_ipv6 = False if not options.get('addrport'): self.addr = '' self.port = self.default_port else: m = re.match(naiveip_re, options['addrport']) if m is None: raise CommandError('"%s" is not a valid port number ' 'or address:port pair.' % options['addrport']) self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups() if not self.port.isdigit(): raise CommandError("%r is not a valid port number." % self.port) if self.addr: if _ipv6: self.addr = self.addr[1:-1] self.use_ipv6 = True self._raw_ipv6 = True elif self.use_ipv6 and not _fqdn: raise CommandError('"%s" is not a valid IPv6 address.' % self.addr) if not self.addr: self.addr = '::1' if self.use_ipv6 else '127.0.0.1' self._raw_ipv6 = bool(self.use_ipv6) self.run(**options) def run(self, **options): """ Runs the server, using the autoreloader if needed """ use_reloader = options.get('use_reloader') if use_reloader: autoreload.main(self.inner_run, None, options) else: self.inner_run(None, **options) def inner_run(self, *args, **options): # If an exception was silenced in ManagementUtility.execute in order # to be raised in the child process, raise it now. autoreload.raise_last_exception() threading = options.get('use_threading') shutdown_message = options.get('shutdown_message', '') quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C' self.stdout.write("Performing system checks...\n\n") self.check(display_num_errors=True) self.check_migrations() now = datetime.now().strftime('%B %d, %Y - %X') if six.PY2: now = now.decode(get_system_encoding()) self.stdout.write(now) self.stdout.write(( "Django version %(version)s, using settings %(settings)r\n" "Starting development server at http://%(addr)s:%(port)s/\n" "Quit the server with %(quit_command)s.\n" ) % { "version": self.get_version(), "settings": settings.SETTINGS_MODULE, "addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr, "port": self.port, "quit_command": quit_command, }) try: handler = self.get_handler(*args, **options) run(self.addr, int(self.port), handler, ipv6=self.use_ipv6, threading=threading) except socket.error as e: # Use helpful error messages instead of ugly tracebacks. ERRORS = { errno.EACCES: "You don't have permission to access that port.", errno.EADDRINUSE: "That port is already in use.", errno.EADDRNOTAVAIL: "That IP address can't be assigned to.", } try: error_text = ERRORS[e.errno] except KeyError: error_text = force_text(e) self.stderr.write("Error: %s" % error_text) # Need to use an OS exit because sys.exit doesn't work in a thread os._exit(1) except KeyboardInterrupt: if shutdown_message: self.stdout.write(shutdown_message) sys.exit(0) def check_migrations(self): """ Checks to see if the set of migrations on disk matches the migrations in the database. Prints a warning if they don't match. """ try: executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS]) except ImproperlyConfigured: # No databases are configured (or the dummy one) return except MigrationSchemaMissing: self.stdout.write(self.style.NOTICE( "\nNot checking migrations as it is not possible to access/create the django_migrations table." )) return plan = executor.migration_plan(executor.loader.graph.leaf_nodes()) if plan: self.stdout.write(self.style.NOTICE( "\nYou have unapplied migrations; your app may not work properly until they are applied." )) self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n")) # Kept for backward compatibility BaseRunserverCommand = Command
bsd-3-clause
rackerlabs/melange
melange/db/sqlalchemy/migrate_repo/versions/006_single_table_allocation.py
1
1343
#!/usr/bin/env python # Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from sqlalchemy.schema import Table from melange.db.sqlalchemy.migrate_repo.schema import Boolean def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine ip_addresses = Table('ip_addresses', meta, autoload=True) allocated = Column('allocated', Boolean(), default=False) ip_addresses.create_column(allocated) migrate_engine.execute(ip_addresses.update().values(allocated=True)) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine ip_addresses = Table('ip_addresses', meta, autoload=True) ip_addresses.drop_column('allocated')
apache-2.0
tonybaloney/st2
st2api/tests/unit/controllers/v1/test_triggertypes.py
1
6234
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from tests import FunctionalTest http_client = six.moves.http_client TRIGGER_0 = { 'name': 'st2.test.triggertype0', 'pack': 'dummy_pack_1', 'description': 'test trigger', 'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None}, 'parameters_schema': {} } TRIGGER_1 = { 'name': 'st2.test.triggertype1', 'pack': 'dummy_pack_2', 'description': 'test trigger', 'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None}, } TRIGGER_2 = { 'name': 'st2.test.triggertype3', 'pack': 'dummy_pack_3', 'description': 'test trigger', 'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None}, 'parameters_schema': {'param1': {'type': 'object'}} } class TestTriggerTypeController(FunctionalTest): @classmethod def setUpClass(cls): # super's setUpClass does the following: # - create DB connections, sets up a fresh DB etc. # - creates all the controllers by instantiating the pecan app. # The WebHookController ends up registering a TriggerType in its __init__ # which is why when this test is run individually it simply falls apart. # When run in a suite the pecan app creation is somehow optimized and since # this is not the first test to run its all good as some other test performs # the DB cleanup. This is the unfortunate story of why these two lines in this # exact order are needed. There are perhaps other ways to fix the problem # however this is the most localized solution for now. super(TestTriggerTypeController, cls).setUpClass() cls._establish_connection_and_re_create_db() def test_get_all(self): post_resp = self.__do_post(TRIGGER_0) trigger_id_0 = self.__get_trigger_id(post_resp) post_resp = self.__do_post(TRIGGER_1) trigger_id_1 = self.__get_trigger_id(post_resp) resp = self.app.get('/v1/triggertypes') self.assertEqual(resp.status_int, http_client.OK) self.assertEqual(len(resp.json), 2, 'Get all failure.') # ?pack query filter resp = self.app.get('/v1/triggertypes?pack=doesnt-exist-invalid') self.assertEqual(resp.status_int, http_client.OK) self.assertEqual(len(resp.json), 0) resp = self.app.get('/v1/triggertypes?pack=%s' % (TRIGGER_0['pack'])) self.assertEqual(resp.status_int, http_client.OK) self.assertEqual(len(resp.json), 1) self.assertEqual(resp.json[0]['pack'], TRIGGER_0['pack']) self.__do_delete(trigger_id_0) self.__do_delete(trigger_id_1) def test_get_one(self): post_resp = self.__do_post(TRIGGER_1) trigger_id = self.__get_trigger_id(post_resp) get_resp = self.__do_get_one(trigger_id) self.assertEqual(get_resp.status_int, http_client.OK) self.assertEqual(self.__get_trigger_id(get_resp), trigger_id) self.__do_delete(trigger_id) def test_get_one_fail(self): resp = self.__do_get_one('1') self.assertEqual(resp.status_int, http_client.NOT_FOUND) def test_post(self): post_resp = self.__do_post(TRIGGER_1) self.assertEqual(post_resp.status_int, http_client.CREATED) self.__do_delete(self.__get_trigger_id(post_resp)) def test_post_with_params(self): post_resp = self.__do_post(TRIGGER_2) self.assertEqual(post_resp.status_int, http_client.CREATED) self.__do_delete(self.__get_trigger_id(post_resp)) def test_post_duplicate(self): post_resp = self.__do_post(TRIGGER_1) org_id = self.__get_trigger_id(post_resp) self.assertEqual(post_resp.status_int, http_client.CREATED) post_resp_2 = self.__do_post(TRIGGER_1) self.assertEqual(post_resp_2.status_int, http_client.CONFLICT) self.assertEqual(post_resp_2.json['conflict-id'], org_id) self.__do_delete(org_id) def test_put(self): post_resp = self.__do_post(TRIGGER_1) update_input = post_resp.json update_input['description'] = 'updated description.' put_resp = self.__do_put(self.__get_trigger_id(post_resp), update_input) self.assertEqual(put_resp.status_int, http_client.OK) self.__do_delete(self.__get_trigger_id(put_resp)) def test_put_fail(self): post_resp = self.__do_post(TRIGGER_1) update_input = post_resp.json # If the id in the URL is incorrect the update will fail since id in the body is ignored. put_resp = self.__do_put(1, update_input) self.assertEqual(put_resp.status_int, http_client.NOT_FOUND) self.__do_delete(self.__get_trigger_id(post_resp)) def test_delete(self): post_resp = self.__do_post(TRIGGER_1) del_resp = self.__do_delete(self.__get_trigger_id(post_resp)) self.assertEqual(del_resp.status_int, http_client.NO_CONTENT) @staticmethod def __get_trigger_id(resp): return resp.json['id'] def __do_get_one(self, trigger_id): return self.app.get('/v1/triggertypes/%s' % trigger_id, expect_errors=True) def __do_post(self, trigger): return self.app.post_json('/v1/triggertypes', trigger, expect_errors=True) def __do_put(self, trigger_id, trigger): return self.app.put_json('/v1/triggertypes/%s' % trigger_id, trigger, expect_errors=True) def __do_delete(self, trigger_id): return self.app.delete('/v1/triggertypes/%s' % trigger_id)
apache-2.0
tmerrick1/spack
lib/spack/external/_pytest/outcomes.py
26
4366
""" exception classes and constants handling test outcomes as well as functions creating them """ from __future__ import absolute_import, division, print_function import py import sys class OutcomeException(BaseException): """ OutcomeException and its subclass instances indicate and contain info about test and collection outcomes. """ def __init__(self, msg=None, pytrace=True): BaseException.__init__(self, msg) self.msg = msg self.pytrace = pytrace def __repr__(self): if self.msg: val = self.msg if isinstance(val, bytes): val = py._builtin._totext(val, errors='replace') return val return "<%s instance>" % (self.__class__.__name__,) __str__ = __repr__ TEST_OUTCOME = (OutcomeException, Exception) class Skipped(OutcomeException): # XXX hackish: on 3k we fake to live in the builtins # in order to have Skipped exception printing shorter/nicer __module__ = 'builtins' def __init__(self, msg=None, pytrace=True, allow_module_level=False): OutcomeException.__init__(self, msg=msg, pytrace=pytrace) self.allow_module_level = allow_module_level class Failed(OutcomeException): """ raised from an explicit call to pytest.fail() """ __module__ = 'builtins' class Exit(KeyboardInterrupt): """ raised for immediate program exits (no tracebacks/summaries)""" def __init__(self, msg="unknown reason"): self.msg = msg KeyboardInterrupt.__init__(self, msg) # exposed helper methods def exit(msg): """ exit testing process as if KeyboardInterrupt was triggered. """ __tracebackhide__ = True raise Exit(msg) exit.Exception = Exit def skip(msg=""): """ skip an executing test with the given message. Note: it's usually better to use the pytest.mark.skipif marker to declare a test to be skipped under certain conditions like mismatching platforms or dependencies. See the pytest_skipping plugin for details. """ __tracebackhide__ = True raise Skipped(msg=msg) skip.Exception = Skipped def fail(msg="", pytrace=True): """ explicitly fail an currently-executing test with the given Message. :arg pytrace: if false the msg represents the full failure information and no python traceback will be reported. """ __tracebackhide__ = True raise Failed(msg=msg, pytrace=pytrace) fail.Exception = Failed class XFailed(fail.Exception): """ raised from an explicit call to pytest.xfail() """ def xfail(reason=""): """ xfail an executing test or setup functions with the given reason.""" __tracebackhide__ = True raise XFailed(reason) xfail.Exception = XFailed def importorskip(modname, minversion=None): """ return imported module if it has at least "minversion" as its __version__ attribute. If no minversion is specified the a skip is only triggered if the module can not be imported. """ import warnings __tracebackhide__ = True compile(modname, '', 'eval') # to catch syntaxerrors should_skip = False with warnings.catch_warnings(): # make sure to ignore ImportWarnings that might happen because # of existing directories with the same name we're trying to # import but without a __init__.py file warnings.simplefilter('ignore') try: __import__(modname) except ImportError: # Do not raise chained exception here(#1485) should_skip = True if should_skip: raise Skipped("could not import %r" % (modname,), allow_module_level=True) mod = sys.modules[modname] if minversion is None: return mod verattr = getattr(mod, '__version__', None) if minversion is not None: try: from pkg_resources import parse_version as pv except ImportError: raise Skipped("we have a required version for %r but can not import " "pkg_resources to parse version strings." % (modname,), allow_module_level=True) if verattr is None or pv(verattr) < pv(minversion): raise Skipped("module %r has __version__ %r, required is: %r" % ( modname, verattr, minversion), allow_module_level=True) return mod
lgpl-2.1
serge-sans-paille/pythran
pythran/tests/test_named_parameters.py
1
3516
from pythran.tests import TestEnv from pythran.syntax import PythranSyntaxError class TestNamedParameters(TestEnv): def test_call_with_named_argument(self): self.run_test(""" def foo(a): return a def call_with_named_argument(n): return foo(a=n)""", 1, call_with_named_argument=[int]) def test_call_with_named_arguments(self): self.run_test(""" def foo(a,b): return a / b def call_with_named_arguments(n): return foo(b=n, a=2*n)""", 1, call_with_named_arguments=[int]) def test_call_with_args_and_named_argument(self): self.run_test(""" def foo(a, b): return a - b def call_with_args_and_named_argument(m,n): return foo(m, b=n)""", 1, 2, call_with_args_and_named_argument=[int, int]) def test_call_with_args_and_named_arguments(self): self.run_test(""" def foo(a,b,c): return c + a / b def call_with_args_and_named_arguments(n, m): return foo(m, c=2*n, b=n)""", 1, 2, call_with_args_and_named_arguments=[int, int]) def test_call_with_default_and_named_argument(self): self.run_test(""" def foo(a, b=1): return a - b def call_with_default_and_named_argument(m,n): return foo(a=m)""", 1, 2, call_with_default_and_named_argument=[int, int]) def test_call_with_default_and_named_arguments(self): self.run_test(""" def foo(a,b,c=1): return c + a / b def call_with_default_and_named_arguments(n, m): return foo(m, b=n)""", 1, 2, call_with_default_and_named_arguments=[int, int]) def test_intrinsic_named_argument(self): """ Check named arguments with attributes as value. """ self.run_test(""" def intrinsic_named_argument(n): import numpy return numpy.ones(n, dtype=numpy.uint8).nbytes""", 4, intrinsic_named_argument=[int]) def test_nested_function_with_named_arguments(self): self.run_test(''' def nested_function_with_named_arguments(a): b = a * 2 def foo(c): return b + c return foo(c=a)''', 4, nested_function_with_named_arguments=[int]) def test_nested_function_with_several_named_arguments(self): self.run_test(''' def nested_function_with_several_named_arguments(a): b = a * 2 def foo(c, e): return b + c + e return foo(e = 4, c=a)''', 4, nested_function_with_several_named_arguments=[int]) def test_aliasing_functions_with_named_arguments(self): self.run_test(''' def aliasing_functions_with_named_arguments(n): import numpy if n > 10: my = numpy.ones else: my = numpy.zeros return my(n, dtype=numpy.uint8).nbytes''', 4, aliasing_functions_with_named_arguments=[int]) def test_aliasing_functions_with_different_structural_types(self): with self.assertRaises(PythranSyntaxError): self.run_test(''' def aliasing_functions_with_different_structural_types(n): import numpy if n > 10: my = sum else: my = numpy.zeros return my(n, dtype=numpy.uint8).nbytes''', 4, aliasing_functions_with_different_structural_types=[int]) def test_default_argument_all_filled(self): code = ''' def default_argument_all_filled(x): return test2(x,2) def test2(a, b=3): return a, b''' self.run_test(code, 10, default_argument_all_filled=[int])
bsd-3-clause
ess/dd-agent
utils/pidfile.py
33
1675
# stdlib import logging import os.path import tempfile log = logging.getLogger(__name__) class PidFile(object): """ A small helper class for pidfiles. """ @classmethod def get_dir(cls, run_dir=None): if run_dir is None: my_dir = os.path.dirname(os.path.abspath(__file__)) run_dir = os.path.realpath(os.path.join(my_dir, '..', '..', 'run')) if os.path.exists(run_dir) and os.access(run_dir, os.W_OK): return os.path.realpath(run_dir) else: return tempfile.gettempdir() def __init__(self, program, pid_dir=None): self.pid_file = "%s.pid" % program self.pid_dir = self.get_dir(pid_dir) self.pid_path = os.path.join(self.pid_dir, self.pid_file) def get_path(self): # if all else fails if os.access(self.pid_dir, os.W_OK): log.info("Pid file is: %s" % self.pid_path) return self.pid_path else: # Can't save pid file, bail out log.error("Cannot save pid file: %s" % self.pid_path) raise Exception("Cannot save pid file: %s" % self.pid_path) def clean(self): try: path = self.get_path() log.debug("Cleaning up pid file %s" % path) os.remove(path) return True except Exception: log.warn("Could not clean up pid file") return False def get_pid(self): "Retrieve the actual pid" try: pf = open(self.get_path()) pid_s = pf.read() pf.close() return int(pid_s.strip()) except Exception: return None
bsd-3-clause
nilsgrabbert/spark
python/pyspark/sql/group.py
60
8297
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyspark import since from pyspark.rdd import ignore_unicode_prefix from pyspark.sql.column import Column, _to_seq, _to_java_column, _create_column_from_literal from pyspark.sql.dataframe import DataFrame from pyspark.sql.types import * __all__ = ["GroupedData"] def dfapi(f): def _api(self): name = f.__name__ jdf = getattr(self._jgd, name)() return DataFrame(jdf, self.sql_ctx) _api.__name__ = f.__name__ _api.__doc__ = f.__doc__ return _api def df_varargs_api(f): def _api(self, *cols): name = f.__name__ jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols)) return DataFrame(jdf, self.sql_ctx) _api.__name__ = f.__name__ _api.__doc__ = f.__doc__ return _api class GroupedData(object): """ A set of methods for aggregations on a :class:`DataFrame`, created by :func:`DataFrame.groupBy`. .. note:: Experimental .. versionadded:: 1.3 """ def __init__(self, jgd, sql_ctx): self._jgd = jgd self.sql_ctx = sql_ctx @ignore_unicode_prefix @since(1.3) def agg(self, *exprs): """Compute aggregates and returns the result as a :class:`DataFrame`. The available aggregate functions are `avg`, `max`, `min`, `sum`, `count`. If ``exprs`` is a single :class:`dict` mapping from string to string, then the key is the column to perform aggregation on, and the value is the aggregate function. Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions. :param exprs: a dict mapping from column name (string) to aggregate functions (string), or a list of :class:`Column`. >>> gdf = df.groupBy(df.name) >>> sorted(gdf.agg({"*": "count"}).collect()) [Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)] >>> from pyspark.sql import functions as F >>> sorted(gdf.agg(F.min(df.age)).collect()) [Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)] """ assert exprs, "exprs should not be empty" if len(exprs) == 1 and isinstance(exprs[0], dict): jdf = self._jgd.agg(exprs[0]) else: # Columns assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column" jdf = self._jgd.agg(exprs[0]._jc, _to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]])) return DataFrame(jdf, self.sql_ctx) @dfapi @since(1.3) def count(self): """Counts the number of records for each group. >>> sorted(df.groupBy(df.age).count().collect()) [Row(age=2, count=1), Row(age=5, count=1)] """ @df_varargs_api @since(1.3) def mean(self, *cols): """Computes average values for each numeric columns for each group. :func:`mean` is an alias for :func:`avg`. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().mean('age').collect() [Row(avg(age)=3.5)] >>> df3.groupBy().mean('age', 'height').collect() [Row(avg(age)=3.5, avg(height)=82.5)] """ @df_varargs_api @since(1.3) def avg(self, *cols): """Computes average values for each numeric columns for each group. :func:`mean` is an alias for :func:`avg`. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().avg('age').collect() [Row(avg(age)=3.5)] >>> df3.groupBy().avg('age', 'height').collect() [Row(avg(age)=3.5, avg(height)=82.5)] """ @df_varargs_api @since(1.3) def max(self, *cols): """Computes the max value for each numeric columns for each group. >>> df.groupBy().max('age').collect() [Row(max(age)=5)] >>> df3.groupBy().max('age', 'height').collect() [Row(max(age)=5, max(height)=85)] """ @df_varargs_api @since(1.3) def min(self, *cols): """Computes the min value for each numeric column for each group. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().min('age').collect() [Row(min(age)=2)] >>> df3.groupBy().min('age', 'height').collect() [Row(min(age)=2, min(height)=80)] """ @df_varargs_api @since(1.3) def sum(self, *cols): """Compute the sum for each numeric columns for each group. :param cols: list of column names (string). Non-numeric columns are ignored. >>> df.groupBy().sum('age').collect() [Row(sum(age)=7)] >>> df3.groupBy().sum('age', 'height').collect() [Row(sum(age)=7, sum(height)=165)] """ @since(1.6) def pivot(self, pivot_col, values=None): """ Pivots a column of the current [[DataFrame]] and perform the specified aggregation. There are two versions of pivot function: one that requires the caller to specify the list of distinct values to pivot on, and one that does not. The latter is more concise but less efficient, because Spark needs to first compute the list of distinct values internally. :param pivot_col: Name of the column to pivot. :param values: List of values that will be translated to columns in the output DataFrame. # Compute the sum of earnings for each year by course with each course as a separate column >>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect() [Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)] # Or without specifying column values (less efficient) >>> df4.groupBy("year").pivot("course").sum("earnings").collect() [Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)] """ if values is None: jgd = self._jgd.pivot(pivot_col) else: jgd = self._jgd.pivot(pivot_col, values) return GroupedData(jgd, self.sql_ctx) def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.group globs = pyspark.sql.group.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.group tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80), Row(name='Bob', age=5, height=85)]).toDF() globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000), Row(course="Java", year=2012, earnings=20000), Row(course="dotNET", year=2012, earnings=5000), Row(course="dotNET", year=2013, earnings=48000), Row(course="Java", year=2013, earnings=30000)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.group, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
apache-2.0
Layle77/codecombat
scripts/analytics/mixpanelGetEvent.py
97
7517
# Get mixpanel event data via export API # Useful for debugging Mixpanel data weirdness targetLevels = ['dungeons-of-kithgard', 'the-raised-sword', 'endangered-burl'] targetLevels = ['dungeons-of-kithgard'] eventFunnel = ['Started Level', 'Saw Victory'] # eventFunnel = ['Saw Victory'] # eventFunnel = ['Started Level'] import sys from pprint import pprint from datetime import datetime, timedelta from mixpanel import Mixpanel try: import json except ImportError: import simplejson as json # NOTE: mixpanel dates are by day and inclusive # E.g. '2014-12-08' is any date that day, up to 2014-12-09 12am if __name__ == '__main__': if not len(sys.argv) is 3: print "Script format: <script> <api_key> <api_secret>" else: scriptStart = datetime.now() api_key = sys.argv[1] api_secret = sys.argv[2] api = Mixpanel( api_key = api_key, api_secret = api_secret ) startDate = '2015-01-01' endDate = '2015-01-26' startEvent = eventFunnel[0] endEvent = eventFunnel[-1] print("Requesting data for {0} to {1}".format(startDate, endDate)) data = api.request(['export'], { # 'where': '"539c630f30a67c3b05d98d95" == properties["id"]', # 'where': "('539c630f30a67c3b05d98d95' == properties['id'] or '539c630f30a67c3b05d98d95' == properties['distinct_id'])", 'event': eventFunnel, 'from_date': startDate, 'to_date': endDate }) weirdUserIDs = [] eventUsers = {} levelEventUserDayMap = {} levelUserEventDayMap = {} lines = data.split('\n') print "Received %d entries" % len(lines) for line in lines: try: if len(line) is 0: continue eventData = json.loads(line) # pprint(eventData) # break eventName = eventData['event'] if not eventName in eventFunnel: print 'Unexpected event ' + eventName break if not 'properties' in eventData: print('no properties, skpping') continue properties = eventData['properties'] if not 'distinct_id' in properties: print('no distinct_id, skpping') continue user = properties['distinct_id'] if not 'time' in properties: print('no time, skpping') continue time = properties['time'] pst = datetime.fromtimestamp(int(properties['time'])) utc = pst + timedelta(0, 8 * 60 * 60) dateCreated = utc.isoformat() day = dateCreated[0:10] if day < startDate or day > endDate: print "Skipping {0}".format(day) continue if 'levelID' in properties: level = properties['levelID'] elif 'level' in properties: level = properties['level'].lower().replace(' ', '-') else: print("Unkonwn level for", eventName) print(properties) break if not level in targetLevels: continue # if user != "539c630f30a67c3b05d98d95": continue pprint(eventData) # if user == "54c1fc3a08652d5305442c6b": # pprint(eventData) # break # if '-' in user: # weirdUserIDs.append(user) # # pprint(eventData) # # break # continue # print level if not level in levelEventUserDayMap: levelEventUserDayMap[level] = {} if not eventName in levelEventUserDayMap[level]: levelEventUserDayMap[level][eventName] = {} if not user in levelEventUserDayMap[level][eventName] or levelEventUserDayMap[level][eventName][user] > day: levelEventUserDayMap[level][eventName][user] = day if not user in eventUsers: eventUsers[user] = True if not level in levelUserEventDayMap: levelUserEventDayMap[level] = {} if not user in levelUserEventDayMap[level]: levelUserEventDayMap[level][user] = {} if not eventName in levelUserEventDayMap[level][user] or levelUserEventDayMap[level][user][eventName] > day: levelUserEventDayMap[level][user][eventName] = day except: print "Unexpected error:", sys.exc_info()[0] print line break # pprint(levelEventUserDayMap) print("Weird user IDs: {0}".format(len(weirdUserIDs))) for level in levelEventUserDayMap: for event in levelEventUserDayMap[level]: print("{0} {1} {2}".format(level, event, len(levelEventUserDayMap[level][event]))) print("Users: {0}".format(len(eventUsers))) noStartDayUsers = [] levelFunnelData = {} for level in levelUserEventDayMap: for user in levelUserEventDayMap[level]: # 6455 # for event in levelUserEventDayMap[level][user]: # day = levelUserEventDayMap[level][user][event] # if not level in levelFunnelData: levelFunnelData[level] = {} # if not day in levelFunnelData[level]: levelFunnelData[level][day] = {} # if not event in levelFunnelData[level][day]: levelFunnelData[level][day][event] = 0 # levelFunnelData[level][day][event] += 1 # 5382 funnelStartDay = None for event in levelUserEventDayMap[level][user]: day = levelUserEventDayMap[level][user][event] if not level in levelFunnelData: levelFunnelData[level] = {} if not day in levelFunnelData[level]: levelFunnelData[level][day] = {} if not event in levelFunnelData[level][day]: levelFunnelData[level][day][event] = 0 if eventFunnel[0] == event: levelFunnelData[level][day][event] += 1 funnelStartDay = day break if funnelStartDay: for event in levelUserEventDayMap[level][user]: if not event in levelFunnelData[level][funnelStartDay]: levelFunnelData[level][funnelStartDay][event] = 0 if eventFunnel[0] != event: levelFunnelData[level][funnelStartDay][event] += 1 for i in range(1, len(eventFunnel)): event = eventFunnel[i] if not event in levelFunnelData[level][funnelStartDay]: levelFunnelData[level][funnelStartDay][event] = 0 else: noStartDayUsers.append(user) pprint(levelFunnelData) print("No start day count: {0}".format(len(noStartDayUsers))) noStartDayUsers.sort() for i in range(len(noStartDayUsers)): if i > 50: break print(noStartDayUsers[i]) print("Script runtime: {0}".format(datetime.now() - scriptStart))
mit
MQQiang/kbengine
kbe/src/lib/python/Lib/lib2to3/tests/test_main.py
112
5740
# -*- coding: utf-8 -*- import codecs import io import logging import os import re import shutil import sys import tempfile import unittest from lib2to3 import main TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py") class TestMain(unittest.TestCase): def setUp(self): self.temp_dir = None # tearDown() will rmtree this directory if set. def tearDown(self): # Clean up logging configuration down by main. del logging.root.handlers[:] if self.temp_dir: shutil.rmtree(self.temp_dir) def run_2to3_capture(self, args, in_capture, out_capture, err_capture): save_stdin = sys.stdin save_stdout = sys.stdout save_stderr = sys.stderr sys.stdin = in_capture sys.stdout = out_capture sys.stderr = err_capture try: return main.main("lib2to3.fixes", args) finally: sys.stdin = save_stdin sys.stdout = save_stdout sys.stderr = save_stderr def test_unencodable_diff(self): input_stream = io.StringIO("print 'nothing'\nprint u'über'\n") out = io.BytesIO() out_enc = codecs.getwriter("ascii")(out) err = io.StringIO() ret = self.run_2to3_capture(["-"], input_stream, out_enc, err) self.assertEqual(ret, 0) output = out.getvalue().decode("ascii") self.assertIn("-print 'nothing'", output) self.assertIn("WARNING: couldn't encode <stdin>'s diff for " "your terminal", err.getvalue()) def setup_test_source_trees(self): """Setup a test source tree and output destination tree.""" self.temp_dir = tempfile.mkdtemp() # tearDown() cleans this up. self.py2_src_dir = os.path.join(self.temp_dir, "python2_project") self.py3_dest_dir = os.path.join(self.temp_dir, "python3_project") os.mkdir(self.py2_src_dir) os.mkdir(self.py3_dest_dir) # Turn it into a package with a few files. self.setup_files = [] open(os.path.join(self.py2_src_dir, "__init__.py"), "w").close() self.setup_files.append("__init__.py") shutil.copy(PY2_TEST_MODULE, self.py2_src_dir) self.setup_files.append(os.path.basename(PY2_TEST_MODULE)) self.trivial_py2_file = os.path.join(self.py2_src_dir, "trivial.py") self.init_py2_file = os.path.join(self.py2_src_dir, "__init__.py") with open(self.trivial_py2_file, "w") as trivial: trivial.write("print 'I need a simple conversion.'") self.setup_files.append("trivial.py") def test_filename_changing_on_output_single_dir(self): """2to3 a single directory with a new output dir and suffix.""" self.setup_test_source_trees() out = io.StringIO() err = io.StringIO() suffix = "TEST" ret = self.run_2to3_capture( ["-n", "--add-suffix", suffix, "--write-unchanged-files", "--no-diffs", "--output-dir", self.py3_dest_dir, self.py2_src_dir], io.StringIO(""), out, err) self.assertEqual(ret, 0) stderr = err.getvalue() self.assertIn(" implies -w.", stderr) self.assertIn( "Output in %r will mirror the input directory %r layout" % ( self.py3_dest_dir, self.py2_src_dir), stderr) self.assertEqual(set(name+suffix for name in self.setup_files), set(os.listdir(self.py3_dest_dir))) for name in self.setup_files: self.assertIn("Writing converted %s to %s" % ( os.path.join(self.py2_src_dir, name), os.path.join(self.py3_dest_dir, name+suffix)), stderr) sep = re.escape(os.sep) self.assertRegex( stderr, r"No changes to .*/__init__\.py".replace("/", sep)) self.assertNotRegex( stderr, r"No changes to .*/trivial\.py".replace("/", sep)) def test_filename_changing_on_output_two_files(self): """2to3 two files in one directory with a new output dir.""" self.setup_test_source_trees() err = io.StringIO() py2_files = [self.trivial_py2_file, self.init_py2_file] expected_files = set(os.path.basename(name) for name in py2_files) ret = self.run_2to3_capture( ["-n", "-w", "--write-unchanged-files", "--no-diffs", "--output-dir", self.py3_dest_dir] + py2_files, io.StringIO(""), io.StringIO(), err) self.assertEqual(ret, 0) stderr = err.getvalue() self.assertIn( "Output in %r will mirror the input directory %r layout" % ( self.py3_dest_dir, self.py2_src_dir), stderr) self.assertEqual(expected_files, set(os.listdir(self.py3_dest_dir))) def test_filename_changing_on_output_single_file(self): """2to3 a single file with a new output dir.""" self.setup_test_source_trees() err = io.StringIO() ret = self.run_2to3_capture( ["-n", "-w", "--no-diffs", "--output-dir", self.py3_dest_dir, self.trivial_py2_file], io.StringIO(""), io.StringIO(), err) self.assertEqual(ret, 0) stderr = err.getvalue() self.assertIn( "Output in %r will mirror the input directory %r layout" % ( self.py3_dest_dir, self.py2_src_dir), stderr) self.assertEqual(set([os.path.basename(self.trivial_py2_file)]), set(os.listdir(self.py3_dest_dir))) if __name__ == '__main__': unittest.main()
lgpl-3.0
PeterFaiman/ruby-grpc-minimal
tools/line_count/collect-history.py
14
1451
#!/usr/bin/env python # Copyright 2017 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import datetime # this script is only of historical interest: it's the script that was used to # bootstrap the dataset def daterange(start, end): for n in range(int((end - start).days)): yield start + datetime.timedelta(n) start_date = datetime.date(2017, 3, 26) end_date = datetime.date(2017, 3, 29) for dt in daterange(start_date, end_date): dmy = dt.strftime('%Y-%m-%d') sha1 = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before=%s' % dmy, 'master']).strip() subprocess.check_call(['git', 'checkout', sha1]) subprocess.check_call(['git', 'submodule', 'update']) subprocess.check_call(['git', 'clean', '-f', '-x', '-d']) subprocess.check_call(['cloc', '--vcs=git', '--by-file', '--yaml', '--out=../count/%s.yaml' % dmy, '.'])
apache-2.0
google-research/weakly_supervised_control
weakly_supervised_control/disentanglement/tensorsketch/modules/affine.py
1
8248
# Copyright 2020 The Weakly-Supervised Control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file was modified from `https://github.com/google-research/google-research/blob/master/weak_disentangle`. """Affine modules. """ import numpy as np import tensorflow as tf from collections import OrderedDict from weakly_supervised_control.disentanglement.tensorsketch import utils as tsu from weakly_supervised_control.disentanglement.tensorsketch.modules.base import build_with_name_scope from weakly_supervised_control.disentanglement.tensorsketch.modules.base import Module def initializer(kernel, bias): fan_in, fan_out = tsu.compute_fan(kernel) limit = np.sqrt(6 / (fan_in + fan_out)) kernel.assign(tf.random.uniform(kernel.shape, -limit, limit)) if bias is not None: bias.assign(tf.zeros(bias.shape)) class Affine(Module): """Abstract class for modules that apply an affine transformation to input. Affine includes several special functionalities to ensure that classes that extend it are amenable to the injection of kernel normalizers (based on the respects_kernel_norm flag). All classes that extend Affine should adhere to the following contract: Never access self.orig_kernel directly in forward call, and parameter initialization/building. """ def __init__(self, bias=True, name=None): super().__init__(name=name) self.use_bias = bias self.kernel = None self.bias = None self.initializer = initializer self.kernel_normalizers = OrderedDict() @property def normalized_kernel(self): kernel = self.kernel for km in self.kernel_normalizers.values(): kernel = km(kernel) return kernel @build_with_name_scope def build_parameters(self, x): raise NotImplementedError( "Implement parameter building for Affine class") def reset_parameters(self): if self.initializer is not None: self.initializer(self.kernel, self.bias) return # By default, all affine layers are initialized via # Unif(-a, a), where a = sqrt(1 / fan_in) fan_in, _ = tsu.compute_fan(self.kernel) limit = np.sqrt(1 / fan_in) self.kernel.assign(tf.random.uniform(self.kernel.shape, -limit, limit)) if self.use_bias: self.bias.assign(tf.random.uniform(self.bias.shape, -limit, limit)) class Dense(Affine): """Applies a dense affine transformation to input. """ def __init__(self, out_dims, bias=True, name=None): super().__init__(bias=bias, name=name) self.out_dims = out_dims @build_with_name_scope def build_parameters(self, x): self.in_dims = int(x.shape[-1]) self.kernel = tf.Variable(tf.random.normal((self.in_dims, self.out_dims)), trainable=True) if self.use_bias: self.bias = tf.Variable(tf.random.normal( [self.out_dims]), trainable=True) self.reset_parameters() def forward(self, x): x = tf.matmul(x, self.normalized_kernel) if self.bias is not None: x = tf.nn.bias_add(x, self.bias) return x def extra_repr(self): return "({}, bias={})".format(self.out_dims, self.use_bias) class Conv2d(Affine): """Applies 2d convolutional transformation (and bias) to input. """ def __init__(self, out_channels, kernel_size, strides, padding="same", dilation=1, bias=True, name=None): super().__init__(bias=bias, name=name) self.out_channels = out_channels self.kernel_size = kernel_size self.strides = strides self.padding = padding self.dilation = dilation @build_with_name_scope def build_parameters(self, x): self.in_channels = int(x.shape[-1]) self.kernel = tf.Variable(tf.random.normal((self.kernel_size, self.kernel_size, self.in_channels, self.out_channels)), trainable=True) if self.use_bias: self.bias = tf.Variable(tf.random.normal([self.out_channels]), trainable=True) self.reset_parameters() def forward(self, x): x = tf.nn.conv2d( x, filter=self.normalized_kernel, strides=self.strides, padding=self.padding.upper(), dilations=self.dilation) if self.use_bias: x = tf.nn.bias_add(x, self.bias) return x def extra_repr(self): return "({}, {}, {}, {}, bias={})".format(self.out_channels, self.kernel_size, self.strides, self.padding, self.use_bias) class ConvTranspose2d(Affine): """Applies 2d transposed convolutional transformation (and bias) to input. """ def __init__(self, out_channels, kernel_size, strides, padding="same", output_padding=None, dilation=1, bias=True, name=None): super().__init__(bias=bias, name=name) self.out_channels = out_channels self.kernel_size = kernel_size self.strides = strides self.padding = padding self.output_padding = output_padding self.dilation = dilation @build_with_name_scope def build_parameters(self, x): self.in_channels = int(x.shape[-1]) self.kernel = tf.Variable(tf.random.normal((self.kernel_size, self.kernel_size, self.out_channels, self.in_channels)), trainable=True) if self.use_bias: self.bias = tf.Variable(tf.random.normal([self.out_channels]), trainable=True) self.reset_parameters() def forward(self, x): n, h, w, _ = x.shape h = tsu.compute_out_dims(h, self.kernel_size, self.strides, self.padding, self.output_padding, self.dilation) w = tsu.compute_out_dims(w, self.kernel_size, self.strides, self.padding, self.output_padding, self.dilation) output_shape = (n, h, w, self.out_channels) x = tf.nn.conv2d_transpose( x, filter=self.normalized_kernel, strides=self.strides, padding=self.padding.upper(), output_shape=output_shape, dilations=self.dilation) if self.use_bias: x = tf.nn.bias_add(x, self.bias) return x def extra_repr(self): return "({}, {}, {}, {}, bias={})".format(self.out_channels, self.kernel_size, self.strides, self.padding, self.use_bias)
apache-2.0
jimsimon/sky_engine
sky/engine/build/scripts/make_event_factory.py
9
2602
#!/usr/bin/env python # Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os.path import sys import shutil from in_file import InFile import name_macros import name_utilities import template_expander def case_insensitive_matching(name): return (name == ('HTMLEvents') or name == 'Event' or name == 'Events' or name.startswith('UIEvent')) class EventFactoryWriter(name_macros.Writer): defaults = { 'ImplementedAs': None, 'Conditional': None, 'RuntimeEnabled': None, } default_parameters = { 'namespace': '', 'suffix': '', } filters = { 'cpp_name': name_utilities.cpp_name, 'enable_conditional': name_utilities.enable_conditional_if_endif, 'lower_first': name_utilities.lower_first, 'case_insensitive_matching': case_insensitive_matching, 'script_name': name_utilities.script_name, } def __init__(self, in_file_path): super(EventFactoryWriter, self).__init__(in_file_path) if __name__ == "__main__": name_macros.Maker(EventFactoryWriter).main(sys.argv)
bsd-3-clause
prarthitm/edxplatform
lms/djangoapps/student_profile/views.py
21
4548
""" Views for a student's profile information. """ from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.http import Http404 from django.views.decorators.http import require_http_methods from django_countries import countries from django.contrib.staticfiles.storage import staticfiles_storage from badges.utils import badges_enabled from edxmako.shortcuts import render_to_response, marketing_link from openedx.core.djangoapps.user_api.accounts.api import get_account_settings from openedx.core.djangoapps.user_api.errors import UserNotFound, UserNotAuthorized from openedx.core.djangoapps.user_api.preferences.api import get_user_preferences from student.models import User from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers @login_required @require_http_methods(['GET']) def learner_profile(request, username): """Render the profile page for the specified username. Args: request (HttpRequest) username (str): username of user whose profile is requested. Returns: HttpResponse: 200 if the page was sent successfully HttpResponse: 302 if not logged in (redirect to login page) HttpResponse: 405 if using an unsupported HTTP method Raises: Http404: 404 if the specified user is not authorized or does not exist Example usage: GET /account/profile """ try: return render_to_response( 'student_profile/learner_profile.html', learner_profile_context(request, username, request.user.is_staff) ) except (UserNotAuthorized, UserNotFound, ObjectDoesNotExist): raise Http404 def learner_profile_context(request, profile_username, user_is_staff): """Context for the learner profile page. Args: logged_in_user (object): Logged In user. profile_username (str): username of user whose profile is requested. user_is_staff (bool): Logged In user has staff access. build_absolute_uri_func (): Returns: dict Raises: ObjectDoesNotExist: the specified profile_username does not exist. """ profile_user = User.objects.get(username=profile_username) logged_in_user = request.user own_profile = (logged_in_user.username == profile_username) account_settings_data = get_account_settings(request, [profile_username])[0] preferences_data = get_user_preferences(profile_user, profile_username) context = { 'data': { 'profile_user_id': profile_user.id, 'default_public_account_fields': settings.ACCOUNT_VISIBILITY_CONFIGURATION['public_fields'], 'default_visibility': settings.ACCOUNT_VISIBILITY_CONFIGURATION['default_visibility'], 'accounts_api_url': reverse("accounts_api", kwargs={'username': profile_username}), 'preferences_api_url': reverse('preferences_api', kwargs={'username': profile_username}), 'preferences_data': preferences_data, 'account_settings_data': account_settings_data, 'profile_image_upload_url': reverse('profile_image_upload', kwargs={'username': profile_username}), 'profile_image_remove_url': reverse('profile_image_remove', kwargs={'username': profile_username}), 'profile_image_max_bytes': settings.PROFILE_IMAGE_MAX_BYTES, 'profile_image_min_bytes': settings.PROFILE_IMAGE_MIN_BYTES, 'account_settings_page_url': reverse('account_settings'), 'has_preferences_access': (logged_in_user.username == profile_username or user_is_staff), 'own_profile': own_profile, 'country_options': list(countries), 'find_courses_url': marketing_link('COURSES'), 'language_options': settings.ALL_LANGUAGES, 'badges_logo': staticfiles_storage.url('certificates/images/backpack-logo.png'), 'badges_icon': staticfiles_storage.url('certificates/images/ico-mozillaopenbadges.png'), 'backpack_ui_img': staticfiles_storage.url('certificates/images/backpack-ui.png'), 'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME), }, 'disable_courseware_js': True, } if badges_enabled(): context['data']['badges_api_url'] = reverse("badges_api:user_assertions", kwargs={'username': profile_username}) return context
agpl-3.0
EWol234/osmc
package/mediacenter-addon-osmc/src/script.module.elementtree/lib/elementtree/ElementIron.py
24
6457
# # ElementTree # $Id: ElementIron.py 443 2006-11-18 18:47:34Z effbot $ # # an experimental ElementTree driver for IronPython. # # Copyright (c) 2006 by Fredrik Lundh. All rights reserved. # # fredrik@pythonware.com # http://www.pythonware.com # # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2007 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- import clr clr.AddReference("System.Xml") from System.IO import StringReader, TextReader from System.Xml import XmlReader, XmlNodeType # node types/categories START = XmlNodeType.Element DATA_TEXT = XmlNodeType.Text DATA_CDATA = XmlNodeType.CDATA DATA_SPACE = XmlNodeType.Whitespace END = XmlNodeType.EndElement def _ironparse(source): # internal event generator. takes a TextReader subclass, a file- # like object, or a filename, and generates an event stream. use # the parse() and iterparse() adapters to access this from user- # code. if isinstance(source, TextReader): pass # use as is elif hasattr(source, "read"): # FIXME: implement TextReader wrapper for Python I/O objects source = StringReader(source.read()) # FIXME: handle settings here? (disable comments, etc) reader = XmlReader.Create(source) # tag cache tags = {} namespaces = [] def gettag(): key = reader.NamespaceURI, reader.LocalName try: tag = tags[key] except KeyError: if key[0]: tag = "{%s}%s" % key else: tag = key[1] tags[key] = tag return tag while reader.Read(): node = reader.NodeType if node == START: tag = gettag() attrib = {} ns = 0 # count namespace declarations while reader.MoveToNextAttribute(): if reader.LocalName == "xmlns": ns += 1 # default namespace yield "start-ns", ("", reader.Value) elif reader.Prefix == "xmlns": ns += 1 # prefixed namespace yield "start-ns", (reader.LocalName, reader.Value) else: attrib[gettag()] = reader.Value namespaces.append(ns) reader.MoveToElement() yield "start", tag, attrib if reader.IsEmptyElement: yield "end", tag for i in xrange(namespaces.pop()): yield "end-ns", None elif node == END: yield "end", tags[reader.NamespaceURI, reader.LocalName] for i in xrange(namespaces.pop()): yield "end-ns", None elif node == DATA_TEXT or node == DATA_SPACE or node == DATA_CDATA: yield "data", reader.Value else: pass # yield "unknown", node reader.Close() class _iterparse: # iterparse generator. we could use a generator method for this, # but we need to expose a custom attribute as well, and generators # cannot have arbitrary attributes def __init__(self, source, target, events): self.root = None self.source = source self.target = target self.events = events def __iter__(self): source = self.source target = self.target events = self.events if not events: events = ["end"] start = end = start_ns = end_ns = None for event in events: # use the passed-in objects as event codes if event == "start": start = event elif event == "end": end = event elif event == "start-ns": start_ns = event elif event == "end-ns": end_ns = event for event in _ironparse(source): code = event[0] if code == "start": elem = target.start(event[1], event[2]) if start: yield start, elem elif code == "end": elem = target.end(event[1]) if end: yield end, elem elif code == "data": target.data(event[1]) elif code == "start-ns": if start_ns: yield start_ns, event[1] elif code == "end-ns": if end_ns: yield end_ns, event[1] self.root = target.close() class ParserAPI(object): def __init__(self, target_factory): self.target_factory = target_factory def parse(self, source): target = self.target_factory() for event in _ironparse(source): code = event[0] if code == "start": target.start(event[1], event[2]) elif code == "end": target.end(event[1]) elif code == "data": target.data(event[1]) return target.close() def iterparse(self, source, events=None): target = self.target_factory() return _iterparse(source, target, events) def fromstring(self, source): return self.parse(StringReader(source))
gpl-2.0
ak2703/edx-platform
common/lib/xmodule/xmodule/tests/test_course_module.py
55
16839
import unittest from datetime import datetime, timedelta from fs.memoryfs import MemoryFS from mock import Mock, patch import itertools from xblock.runtime import KvsFieldData, DictKeyValueStore import xmodule.course_module from xmodule.modulestore.xml import ImportSystem, XMLModuleStore from opaque_keys.edx.locations import SlashSeparatedCourseKey from django.utils.timezone import UTC ORG = 'test_org' COURSE = 'test_course' NOW = datetime.strptime('2013-01-01T01:00:00', '%Y-%m-%dT%H:%M:00').replace(tzinfo=UTC()) _TODAY = datetime.now(UTC()) _LAST_WEEK = _TODAY - timedelta(days=7) _NEXT_WEEK = _TODAY + timedelta(days=7) class CourseFieldsTestCase(unittest.TestCase): def test_default_start_date(self): self.assertEqual( xmodule.course_module.CourseFields.start.default, datetime(2030, 1, 1, tzinfo=UTC()) ) class DummySystem(ImportSystem): @patch('xmodule.modulestore.xml.OSFS', lambda dir: MemoryFS()) def __init__(self, load_error_modules): xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules) course_id = SlashSeparatedCourseKey(ORG, COURSE, 'test_run') course_dir = "test_dir" error_tracker = Mock() super(DummySystem, self).__init__( xmlstore=xmlstore, course_id=course_id, course_dir=course_dir, error_tracker=error_tracker, load_error_modules=load_error_modules, field_data=KvsFieldData(DictKeyValueStore()), ) def get_dummy_course(start, announcement=None, is_new=None, advertised_start=None, end=None, certs='end'): """Get a dummy course""" system = DummySystem(load_error_modules=True) def to_attrb(n, v): return '' if v is None else '{0}="{1}"'.format(n, v).lower() is_new = to_attrb('is_new', is_new) announcement = to_attrb('announcement', announcement) advertised_start = to_attrb('advertised_start', advertised_start) end = to_attrb('end', end) start_xml = ''' <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display" graceperiod="1 day" url_name="test" start="{start}" {announcement} {is_new} {advertised_start} {end} certificates_display_behavior="{certs}"> <chapter url="hi" url_name="ch" display_name="CH"> <html url_name="h" display_name="H">Two houses, ...</html> </chapter> </course> '''.format( org=ORG, course=COURSE, start=start, is_new=is_new, announcement=announcement, advertised_start=advertised_start, end=end, certs=certs, ) return system.process_xml(start_xml) class HasEndedMayCertifyTestCase(unittest.TestCase): """Double check the semantics around when to finalize courses.""" def setUp(self): super(HasEndedMayCertifyTestCase, self).setUp() system = DummySystem(load_error_modules=True) #sample_xml = """ # <course org="{org}" course="{course}" display_organization="{org}_display" display_coursenumber="{course}_display" # graceperiod="1 day" url_name="test" # start="2012-01-01T12:00" # {end} # certificates_show_before_end={cert}> # <chapter url="hi" url_name="ch" display_name="CH"> # <html url_name="h" display_name="H">Two houses, ...</html> # </chapter> # </course> #""".format(org=ORG, course=COURSE) past_end = (datetime.now() - timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00") future_end = (datetime.now() + timedelta(days=12)).strftime("%Y-%m-%dT%H:%M:00") self.past_show_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_with_info') self.past_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=past_end, certs='early_no_info') self.past_noshow_certs = get_dummy_course("2012-01-01T12:00", end=past_end, certs='end') self.future_show_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_with_info') self.future_show_certs_no_info = get_dummy_course("2012-01-01T12:00", end=future_end, certs='early_no_info') self.future_noshow_certs = get_dummy_course("2012-01-01T12:00", end=future_end, certs='end') #self.past_show_certs = system.process_xml(sample_xml.format(end=past_end, cert=True)) #self.past_noshow_certs = system.process_xml(sample_xml.format(end=past_end, cert=False)) #self.future_show_certs = system.process_xml(sample_xml.format(end=future_end, cert=True)) #self.future_noshow_certs = system.process_xml(sample_xml.format(end=future_end, cert=False)) def test_has_ended(self): """Check that has_ended correctly tells us when a course is over.""" self.assertTrue(self.past_show_certs.has_ended()) self.assertTrue(self.past_show_certs_no_info.has_ended()) self.assertTrue(self.past_noshow_certs.has_ended()) self.assertFalse(self.future_show_certs.has_ended()) self.assertFalse(self.future_show_certs_no_info.has_ended()) self.assertFalse(self.future_noshow_certs.has_ended()) def test_may_certify(self): """Check that may_certify correctly tells us when a course may wrap.""" self.assertTrue(self.past_show_certs.may_certify()) self.assertTrue(self.past_noshow_certs.may_certify()) self.assertTrue(self.past_show_certs_no_info.may_certify()) self.assertTrue(self.future_show_certs.may_certify()) self.assertTrue(self.future_show_certs_no_info.may_certify()) self.assertFalse(self.future_noshow_certs.may_certify()) class IsNewCourseTestCase(unittest.TestCase): """Make sure the property is_new works on courses""" def setUp(self): super(IsNewCourseTestCase, self).setUp() # Needed for test_is_newish datetime_patcher = patch.object( xmodule.course_module, 'datetime', Mock(wraps=datetime) ) mocked_datetime = datetime_patcher.start() mocked_datetime.now.return_value = NOW self.addCleanup(datetime_patcher.stop) @patch('xmodule.course_module.datetime.now') def test_sorting_score(self, gmtime_mock): gmtime_mock.return_value = NOW day1 = '2012-01-01T12:00' day2 = '2012-01-02T12:00' dates = [ # Announce date takes priority over actual start # and courses announced on a later date are newer # than courses announced for an earlier date ((day1, day2, None), (day1, day1, None), self.assertLess), ((day1, day1, None), (day2, day1, None), self.assertEqual), # Announce dates take priority over advertised starts ((day1, day2, day1), (day1, day1, day1), self.assertLess), ((day1, day1, day2), (day2, day1, day2), self.assertEqual), # Later start == newer course ((day2, None, None), (day1, None, None), self.assertLess), ((day1, None, None), (day1, None, None), self.assertEqual), # Non-parseable advertised starts are ignored in preference to actual starts ((day2, None, "Spring"), (day1, None, "Fall"), self.assertLess), ((day1, None, "Spring"), (day1, None, "Fall"), self.assertEqual), # Partially parsable advertised starts should take priority over start dates ((day2, None, "October 2013"), (day2, None, "October 2012"), self.assertLess), ((day2, None, "October 2013"), (day1, None, "October 2013"), self.assertEqual), # Parseable advertised starts take priority over start dates ((day1, None, day2), (day1, None, day1), self.assertLess), ((day2, None, day2), (day1, None, day2), self.assertEqual), ] for a, b, assertion in dates: a_score = get_dummy_course(start=a[0], announcement=a[1], advertised_start=a[2]).sorting_score b_score = get_dummy_course(start=b[0], announcement=b[1], advertised_start=b[2]).sorting_score print "Comparing %s to %s" % (a, b) assertion(a_score, b_score) start_advertised_settings = [ # start, advertised, result, is_still_default, date_time_result ('2012-12-02T12:00', None, 'Dec 02, 2012', False, u'Dec 02, 2012 at 12:00 UTC'), ('2012-12-02T12:00', '2011-11-01T12:00', 'Nov 01, 2011', False, u'Nov 01, 2011 at 12:00 UTC'), ('2012-12-02T12:00', 'Spring 2012', 'Spring 2012', False, 'Spring 2012'), ('2012-12-02T12:00', 'November, 2011', 'November, 2011', False, 'November, 2011'), (xmodule.course_module.CourseFields.start.default, None, 'TBD', True, 'TBD'), (xmodule.course_module.CourseFields.start.default, 'January 2014', 'January 2014', False, 'January 2014'), ] @patch('xmodule.course_module.datetime.now') def test_start_date_text(self, gmtime_mock): gmtime_mock.return_value = NOW for s in self.start_advertised_settings: d = get_dummy_course(start=s[0], advertised_start=s[1]) print "Checking start=%s advertised=%s" % (s[0], s[1]) self.assertEqual(d.start_datetime_text(), s[2]) @patch('xmodule.course_module.datetime.now') def test_start_date_time_text(self, gmtime_mock): gmtime_mock.return_value = NOW for setting in self.start_advertised_settings: course = get_dummy_course(start=setting[0], advertised_start=setting[1]) print "Checking start=%s advertised=%s" % (setting[0], setting[1]) self.assertEqual(course.start_datetime_text("DATE_TIME"), setting[4]) def test_start_date_is_default(self): for s in self.start_advertised_settings: d = get_dummy_course(start=s[0], advertised_start=s[1]) self.assertEqual(d.start_date_is_still_default, s[3]) def test_display_organization(self): descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True) self.assertNotEqual(descriptor.location.org, descriptor.display_org_with_default) self.assertEqual(descriptor.display_org_with_default, "{0}_display".format(ORG)) def test_display_coursenumber(self): descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True) self.assertNotEqual(descriptor.location.course, descriptor.display_number_with_default) self.assertEqual(descriptor.display_number_with_default, "{0}_display".format(COURSE)) def test_is_newish(self): descriptor = get_dummy_course(start='2012-12-02T12:00', is_new=True) assert descriptor.is_newish is True descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=False) assert descriptor.is_newish is False descriptor = get_dummy_course(start='2013-02-02T12:00', is_new=True) assert descriptor.is_newish is True descriptor = get_dummy_course(start='2013-01-15T12:00') assert descriptor.is_newish is True descriptor = get_dummy_course(start='2013-03-01T12:00') assert descriptor.is_newish is True descriptor = get_dummy_course(start='2012-10-15T12:00') assert descriptor.is_newish is False descriptor = get_dummy_course(start='2012-12-31T12:00') assert descriptor.is_newish is True def test_end_date_text(self): # No end date set, returns empty string. d = get_dummy_course('2012-12-02T12:00') self.assertEqual('', d.end_datetime_text()) d = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00') self.assertEqual('Sep 04, 2014', d.end_datetime_text()) def test_end_date_time_text(self): # No end date set, returns empty string. course = get_dummy_course('2012-12-02T12:00') self.assertEqual('', course.end_datetime_text("DATE_TIME")) course = get_dummy_course('2012-12-02T12:00', end='2014-9-04T12:00') self.assertEqual('Sep 04, 2014 at 12:00 UTC', course.end_datetime_text("DATE_TIME")) class DiscussionTopicsTestCase(unittest.TestCase): def test_default_discussion_topics(self): d = get_dummy_course('2012-12-02T12:00') self.assertEqual({'General': {'id': 'i4x-test_org-test_course-course-test'}}, d.discussion_topics) class TeamsConfigurationTestCase(unittest.TestCase): """ Tests for the configuration of teams and the helper methods for accessing them. """ def setUp(self): super(TeamsConfigurationTestCase, self).setUp() self.course = get_dummy_course('2012-12-02T12:00') self.course.teams_configuration = dict() self.count = itertools.count() def add_team_configuration(self, max_team_size=3, topics=None): """ Add a team configuration to the course. """ teams_configuration = {} teams_configuration["topics"] = [] if topics is None else topics if max_team_size is not None: teams_configuration["max_team_size"] = max_team_size self.course.teams_configuration = teams_configuration def make_topic(self): """ Make a sample topic dictionary. """ next_num = self.count.next() topic_id = "topic_id_{}".format(next_num) name = "Name {}".format(next_num) description = "Description {}".format(next_num) return {"name": name, "description": description, "id": topic_id} def test_teams_enabled_new_course(self): # Make sure we can detect when no teams exist. self.assertFalse(self.course.teams_enabled) # add topics self.add_team_configuration(max_team_size=4, topics=[self.make_topic()]) self.assertTrue(self.course.teams_enabled) # remove them again self.add_team_configuration(max_team_size=4, topics=[]) self.assertFalse(self.course.teams_enabled) def test_teams_enabled_max_size_only(self): self.add_team_configuration(max_team_size=4) self.assertFalse(self.course.teams_enabled) def test_teams_enabled_no_max_size(self): self.add_team_configuration(max_team_size=None, topics=[self.make_topic()]) self.assertTrue(self.course.teams_enabled) def test_teams_max_size_no_teams_configuration(self): self.assertIsNone(self.course.teams_max_size) def test_teams_max_size_with_teams_configured(self): size = 4 self.add_team_configuration(max_team_size=size, topics=[self.make_topic(), self.make_topic()]) self.assertTrue(self.course.teams_enabled) self.assertEqual(size, self.course.teams_max_size) def test_teams_topics_no_teams(self): self.assertIsNone(self.course.teams_topics) def test_teams_topics_no_topics(self): self.add_team_configuration(max_team_size=4) self.assertEqual(self.course.teams_topics, []) def test_teams_topics_with_topics(self): topics = [self.make_topic(), self.make_topic()] self.add_team_configuration(max_team_size=4, topics=topics) self.assertTrue(self.course.teams_enabled) self.assertEqual(self.course.teams_topics, topics) class CourseDescriptorTestCase(unittest.TestCase): """ Tests for a select few functions from CourseDescriptor. I wrote these test functions in order to satisfy the coverage checker for PR #8484, which modified some code within CourseDescriptor. However, this class definitely isn't a comprehensive test case for CourseDescriptor, as writing a such a test case was out of the scope of the PR. """ def setUp(self): """ Initialize dummy testing course. """ super(CourseDescriptorTestCase, self).setUp() self.course = get_dummy_course(start=_TODAY) def test_clean_id(self): """ Test CourseDescriptor.clean_id. """ self.assertEqual( self.course.clean_id(), "course_ORSXG5C7N5ZGOL3UMVZXIX3DN52XE43FF52GK43UL5ZHK3Q=" ) self.assertEqual( self.course.clean_id(padding_char='$'), "course_ORSXG5C7N5ZGOL3UMVZXIX3DN52XE43FF52GK43UL5ZHK3Q$" ) def test_has_started(self): """ Test CourseDescriptor.has_started. """ self.course.start = _LAST_WEEK self.assertTrue(self.course.has_started()) self.course.start = _NEXT_WEEK self.assertFalse(self.course.has_started()) def test_number(self): """ Test CourseDescriptor.number. """ self.assertEqual(self.course.number, COURSE)
agpl-3.0
google/earthenterprise
earth_enterprise/src/server/wsgi/serve/snippets/data/masked_snippets.py
5
5746
#!/usr/bin/env python2.7 # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """End snippets defaults if they were untouched by the user. We want to do this to mask out protobuf-defined defaults which will be in effect in that case. A common case is a URL that points to a Google server. Another one is that authentication is on by default, which is undesirable. We do allow users to see and set these fields, they can point urls at Google, but it's usually the user who doesn't want that. """ masked_snippets = { # [7] active. "end_snippet.reverse_geocoder_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [11] active. empty string by default. "end_snippet.default_web_page_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [14] active. "end_snippet.user_guide_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [15] active. "end_snippet.support_center_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [17] active. "end_snippet.support_answer_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [18] active. "end_snippet.support_topic_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [19] active. "end_snippet.support_request_intl_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [20] active. "end_snippet.earth_intl_url.value": "", # [32] The time machine options. # Note: option is not available to user, but located in masked snippets, # since it may be set by Fusion. So, we mask time machine option if it is # not set. # active "end_snippet.time_machine_options.server_url": "", # active "end_snippet.time_machine_options.is_timemachine": False, # [40] active. # If not specified, default values in the client will be used. "end_snippet.bbs_server_info.name.value": "", "end_snippet.bbs_server_info.base_url.value": "", "end_snippet.bbs_server_info.post_wizard_path.value": "", "end_snippet.bbs_server_info.file_submit_path.value": "", # [46] active. # If empty, service will be unavailable. # This should be set to empty for EC clients to disable connection to google # services. "end_snippet.elevation_service_base_url": "", # [47] unnecessary. [default = 500]. # "end_snippet.elevation_profile_query_delay": 500, # TODO: move to hard_masked_snippets (make not available # for user)? # [48] active. # If not specified, this URL is built from user_guide_intl_url as # user_guide_intl_url + "tutorials/index.html". "end_snippet.tutorial_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [49] active. # If not specified, this URL is built from user_guide_intl_url as # user_guide_intl_url + "ug_keyboard.html". "end_snippet.keyboard_shortcuts_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [50] active. # If not specified, this URL is built from support_answer_intl_url "end_snippet.release_notes_url.value": "", # [54] active. URL of a page that will be shown when a KML search is # performed. # Note: for GEE, set to local path in order to override default settings. # Note: for GEE, set by default to kmlrender since we don't need to support # any searchlets or oneboxes. "end_snippet.search_config.kml_search_url.value": "/earth/client/kmlrender/index_$[hl].html", # [54] active. URL of a page that will be shown when KML is rendered in # the search panel. # Note: for GEE, set to local path in order to override default settings. "end_snippet.search_config.kml_render_url.value": "/earth/client/kmlrender/index_$[hl].html", # [54] active. URL of a page that will be displayed if a network error or # other local error occurs while performing a search. "end_snippet.search_config.error_page_url.value": "about:blank", # [54] active. URL of a page that will be shown when # the search history is requested. "end_snippet.search_config.search_history_url.value": "about:blank", # [57] active. # This should be set to empty for EC clients to disable connection to # google services. If nothing is specified, the client uses # "http://maps.google.com/". "end_snippet.google_maps_url.value": "", # TODO: move to hard_masked_snippets (make not available # for user)? # [59] active. "end_snippet.privacy_policy_url.value": "", # [63] active. "end_snippet.show_signin_button": False, # TODO: move to hard_masked_snippets (make not available # for user)? # [64] active. "end_snippet.startup_tips_intl_url.value": "", }
apache-2.0
banre123/shadowsocks
tests/coverage_server.py
1072
1655
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. if __name__ == '__main__': import tornado.ioloop import tornado.web import urllib class MainHandler(tornado.web.RequestHandler): def get(self, project): try: with open('/tmp/%s-coverage' % project, 'rb') as f: coverage = f.read().strip() n = int(coverage.strip('%')) if n >= 80: color = 'brightgreen' else: color = 'yellow' self.redirect(('https://img.shields.io/badge/' 'coverage-%s-%s.svg' '?style=flat') % (urllib.quote(coverage), color)) except IOError: raise tornado.web.HTTPError(404) application = tornado.web.Application([ (r"/([a-zA-Z0-9\-_]+)", MainHandler), ]) if __name__ == "__main__": application.listen(8888, address='127.0.0.1') tornado.ioloop.IOLoop.instance().start()
apache-2.0
vedujoshi/tempest
tempest/api/volume/admin/test_volume_quotas.py
1
8018
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base from tempest.common import tempest_fixtures as fixtures from tempest.common import waiters from tempest.lib.common.utils import data_utils from tempest.lib import decorators QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes', 'backups'] QUOTA_USAGE_KEYS = ['reserved', 'limit', 'in_use'] class BaseVolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest): force_tenant_isolation = True credentials = ['primary', 'alt', 'admin'] def setUp(self): # NOTE(jeremy.zhang): Avoid conflicts with volume quota class tests. self.useFixture(fixtures.LockFixture('volume_quotas')) super(BaseVolumeQuotasAdminTestJSON, self).setUp() @classmethod def setup_credentials(cls): super(BaseVolumeQuotasAdminTestJSON, cls).setup_credentials() cls.demo_tenant_id = cls.os_primary.credentials.tenant_id cls.alt_client = cls.os_alt.volumes_client_latest @classmethod def setup_clients(cls): super(BaseVolumeQuotasAdminTestJSON, cls).setup_clients() cls.transfer_client = cls.os_primary.volume_transfers_v2_client cls.alt_transfer_client = cls.os_alt.volume_transfers_v2_client @decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0') def test_list_quotas(self): quotas = (self.admin_quotas_client.show_quota_set(self.demo_tenant_id) ['quota_set']) for key in QUOTA_KEYS: self.assertIn(key, quotas) @decorators.idempotent_id('2be020a2-5fdd-423d-8d35-a7ffbc36e9f7') def test_list_default_quotas(self): quotas = self.admin_quotas_client.show_default_quota_set( self.demo_tenant_id)['quota_set'] for key in QUOTA_KEYS: self.assertIn(key, quotas) @decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6') def test_update_all_quota_resources_for_tenant(self): # Admin can update all the resource quota limits for a tenant default_quota_set = self.admin_quotas_client.show_default_quota_set( self.demo_tenant_id)['quota_set'] new_quota_set = {'gigabytes': 1009, 'volumes': 11, 'snapshots': 11, 'backups': 11} # Update limits for all quota resources quota_set = self.admin_quotas_client.update_quota_set( self.demo_tenant_id, **new_quota_set)['quota_set'] cleanup_quota_set = dict( (k, v) for k, v in default_quota_set.items() if k in QUOTA_KEYS) self.addCleanup(self.admin_quotas_client.update_quota_set, self.demo_tenant_id, **cleanup_quota_set) # test that the specific values we set are actually in # the final result. There is nothing here that ensures there # would be no other values in there. self.assertDictContainsSubset(new_quota_set, quota_set) @decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed') def test_show_quota_usage(self): quota_usage = self.admin_quotas_client.show_quota_set( self.os_admin.credentials.tenant_id, params={'usage': True})['quota_set'] for key in QUOTA_KEYS: self.assertIn(key, quota_usage) for usage_key in QUOTA_USAGE_KEYS: self.assertIn(usage_key, quota_usage[key]) @decorators.idempotent_id('ae8b6091-48ad-4bfa-a188-bbf5cc02115f') def test_quota_usage(self): quota_usage = self.admin_quotas_client.show_quota_set( self.demo_tenant_id, params={'usage': True})['quota_set'] volume = self.create_volume() self.addCleanup(self.delete_volume, self.admin_volume_client, volume['id']) new_quota_usage = self.admin_quotas_client.show_quota_set( self.demo_tenant_id, params={'usage': True})['quota_set'] self.assertEqual(quota_usage['volumes']['in_use'] + 1, new_quota_usage['volumes']['in_use']) self.assertEqual(quota_usage['gigabytes']['in_use'] + volume["size"], new_quota_usage['gigabytes']['in_use']) @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3') def test_delete_quota(self): # Admin can delete the resource quota set for a project project_name = data_utils.rand_name('quota_tenant') description = data_utils.rand_name('desc_') project = self.identity_utils.create_project(project_name, description=description) project_id = project['id'] self.addCleanup(self.identity_utils.delete_project, project_id) quota_set_default = self.admin_quotas_client.show_default_quota_set( project_id)['quota_set'] volume_default = quota_set_default['volumes'] self.admin_quotas_client.update_quota_set( project_id, volumes=(volume_default + 5)) self.admin_quotas_client.delete_quota_set(project_id) quota_set_new = (self.admin_quotas_client.show_quota_set(project_id) ['quota_set']) self.assertEqual(volume_default, quota_set_new['volumes']) @decorators.idempotent_id('8911036f-9d54-4720-80cc-a1c9796a8805') def test_quota_usage_after_volume_transfer(self): # Create a volume for transfer volume = self.create_volume() self.addCleanup(self.delete_volume, self.admin_volume_client, volume['id']) # List of tenants quota usage pre-transfer primary_quota = self.admin_quotas_client.show_quota_set( self.demo_tenant_id, params={'usage': True})['quota_set'] alt_quota = self.admin_quotas_client.show_quota_set( self.alt_client.tenant_id, params={'usage': True})['quota_set'] # Creates a volume transfer transfer = self.transfer_client.create_volume_transfer( volume_id=volume['id'])['transfer'] transfer_id = transfer['id'] auth_key = transfer['auth_key'] # Accepts a volume transfer self.alt_transfer_client.accept_volume_transfer( transfer_id, auth_key=auth_key) # Verify volume transferred is available waiters.wait_for_volume_resource_status( self.alt_client, volume['id'], 'available') # List of tenants quota usage post transfer new_primary_quota = self.admin_quotas_client.show_quota_set( self.demo_tenant_id, params={'usage': True})['quota_set'] new_alt_quota = self.admin_quotas_client.show_quota_set( self.alt_client.tenant_id, params={'usage': True})['quota_set'] # Verify tenants quota usage was updated self.assertEqual(primary_quota['volumes']['in_use'] - new_primary_quota['volumes']['in_use'], new_alt_quota['volumes']['in_use'] - alt_quota['volumes']['in_use']) self.assertEqual(alt_quota['gigabytes']['in_use'] + volume['size'], new_alt_quota['gigabytes']['in_use']) self.assertEqual(primary_quota['gigabytes']['in_use'] - volume['size'], new_primary_quota['gigabytes']['in_use'])
apache-2.0
mohamedadaly/trex
python/astra/matlab.py
3
3290
#----------------------------------------------------------------------- #Copyright 2013 Centrum Wiskunde & Informatica, Amsterdam # #Author: Daniel M. Pelt #Contact: D.M.Pelt@cwi.nl #Website: http://dmpelt.github.io/pyastratoolbox/ # # #This file is part of the Python interface to the #All Scale Tomographic Reconstruction Antwerp Toolbox ("ASTRA Toolbox"). # #The Python interface to the ASTRA Toolbox is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #The Python interface to the ASTRA Toolbox is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with the Python interface to the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>. # #----------------------------------------------------------------------- """This module implements a MATLAB-like interface to the ASTRA Toolbox. Note that all functions are called with a :class:`string` as the first argument, specifying the operation to perform. This un-pythonic way is used to make transitioning from MATLAB code to Python code easier, as the MATLAB interface uses the same type of method calling. After an initial ``import astra``, these functions can be accessed in the ``astra.m`` module. """ from . import astra_c from . import data2d_c from . import data3d_c from . import projector_c from . import algorithm_c from . import matrix_c import numpy as np def astra(command, *args): """MATLAB-like interface to the :mod:`astra.astra` module For example: ``astra.m.astra('use_cuda')`` -- Check if CUDA is enabled. """ return getattr(astra_c, command)(*args) def data2d(command, *args): """MATLAB-like interface to the :mod:`astra.data2d` module For example: ``astra.m.data2d('create',type,geometry,data)`` -- Create a 2D object. """ return getattr(data2d_c, command)(*args) def data3d(command, *args): """MATLAB-like interface to the :mod:`astra.data3d` module For example: ``astra.m.data3d('get',i)`` -- Get 3D object data. """ return getattr(data3d_c, command)(*args) def projector(command, *args): """MATLAB-like interface to the :mod:`astra.projector` module For example: ``astra.m.projector('volume_geometry',i)`` -- Get volume geometry. """ return getattr(projector_c, command)(*args) def matrix(command, *args): """MATLAB-like interface to the :mod:`astra.matrix` module For example: ``astra.m.matrix('delete',i)`` -- Delete a matrix. """ return getattr(matrix_c, command)(*args) def algorithm(command, *args): """MATLAB-like interface to the :mod:`astra.algorithm` module For example: ``astra.m.algorithm('run',i,1000)`` -- Run an algorithm with 1000 iterations. """ if command == 'iterate': command = 'run' return getattr(algorithm_c, command)(*args)
gpl-3.0
bsmrstu-warriors/Moytri---The-Drone-Aider
Lib/cgitb.py
113
12073
"""More comprehensive traceback formatting for Python scripts. To enable this module, do: import cgitb; cgitb.enable() at the top of your script. The optional arguments to enable() are: display - if true, tracebacks are displayed in the web browser logdir - if set, tracebacks are written to files in this directory context - number of lines of source code to show for each stack frame format - 'text' or 'html' controls the output format By default, tracebacks are displayed but not saved, the context is 5 lines and the output format is 'html' (for backwards compatibility with the original use of this module) Alternatively, if you have caught an exception and want cgitb to display it for you, call cgitb.handler(). The optional argument to handler() is a 3-item tuple (etype, evalue, etb) just like the value of sys.exc_info(). The default handler displays output as HTML. """ import inspect import keyword import linecache import os import pydoc import sys import tempfile import time import tokenize import traceback import types def reset(): """Return a string that resets the CGI and browser to a known state.""" return '''<!--: spam Content-Type: text/html <body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> <body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> --> </font> </font> </font> </script> </object> </blockquote> </pre> </table> </table> </table> </table> </table> </font> </font> </font>''' __UNDEF__ = [] # a special sentinel object def small(text): if text: return '<small>' + text + '</small>' else: return '' def strong(text): if text: return '<strong>' + text + '</strong>' else: return '' def grey(text): if text: return '<font color="#909090">' + text + '</font>' else: return '' def lookup(name, frame, locals): """Find the value for a given name in the given environment.""" if name in locals: return 'local', locals[name] if name in frame.f_globals: return 'global', frame.f_globals[name] if '__builtins__' in frame.f_globals: builtins = frame.f_globals['__builtins__'] if type(builtins) is type({}): if name in builtins: return 'builtin', builtins[name] else: if hasattr(builtins, name): return 'builtin', getattr(builtins, name) return None, __UNDEF__ def scanvars(reader, frame, locals): """Scan one logical line of Python and look up values of variables used.""" vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ for ttype, token, start, end, line in tokenize.generate_tokens(reader): if ttype == tokenize.NEWLINE: break if ttype == tokenize.NAME and token not in keyword.kwlist: if lasttoken == '.': if parent is not __UNDEF__: value = getattr(parent, token, __UNDEF__) vars.append((prefix + token, prefix, value)) else: where, value = lookup(token, frame, locals) vars.append((token, where, value)) elif token == '.': prefix += lasttoken + '.' parent = value else: parent, prefix = None, '' lasttoken = token return vars def html(einfo, context=5): """Return a nice HTML document describing a given traceback.""" etype, evalue, etb = einfo if type(etype) is types.ClassType: etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading( '<big><big>%s</big></big>' % strong(pydoc.html.escape(str(etype))), '#ffffff', '#6622aa', pyver + '<br>' + date) + ''' <p>A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred.</p>''' indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: if file: file = os.path.abspath(file) link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file)) else: file = link = '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + strong(func) + \ inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.html.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' % ('<big>&nbsp;</big>', link, call)] if index is not None: i = lnum - index for line in lines: num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;' if i in highlight: line = '<tt>=&gt;%s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line) else: line = '<tt>&nbsp;&nbsp;%s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td>%s</td></tr>' % grey(line)) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where in ('global', 'builtin'): name = ('<em>%s</em> ' % where) + strong(name) elif where == 'local': name = strong(name) else: name = where + strong(name.split('.')[-1]) dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value))) else: dump.append(name + ' <em>undefined</em>') rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump)))) frames.append(''' <table width="100%%" cellspacing=0 cellpadding=0 border=0> %s</table>''' % '\n'.join(rows)) exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))), pydoc.html.escape(str(evalue)))] if isinstance(evalue, BaseException): for name in dir(evalue): if name[:1] == '_': continue value = pydoc.html.repr(getattr(evalue, name)) exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' <!-- The above is a description of an error in a Python program, formatted for a Web browser because the 'cgitb' module was enabled. In case you are not reading this in a Web browser, here is the original traceback: %s --> ''' % pydoc.html.escape( ''.join(traceback.format_exception(etype, evalue, etb))) def text(einfo, context=5): """Return a plain text document describing a given traceback.""" etype, evalue, etb = einfo if type(etype) is types.ClassType: etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred. ''' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: file = file and os.path.abspath(file) or '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + func + \ inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.text.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = [' %s %s' % (file, call)] if index is not None: i = lnum - index for line in lines: num = '%5d ' % i rows.append(num+line.rstrip()) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where == 'global': name = 'global ' + name elif where != 'local': name = where + name.split('.')[-1] dump.append('%s = %s' % (name, pydoc.text.repr(value))) else: dump.append(name + ' undefined') rows.append('\n'.join(dump)) frames.append('\n%s\n' % '\n'.join(rows)) exception = ['%s: %s' % (str(etype), str(evalue))] if isinstance(evalue, BaseException): for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (" "*4, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' The above is a description of an error in a Python program. Here is the original traceback: %s ''' % ''.join(traceback.format_exception(etype, evalue, etb)) class Hook: """A hook to replace sys.excepthook that shows tracebacks in HTML.""" def __init__(self, display=1, logdir=None, context=5, file=None, format="html"): self.display = display # send tracebacks to browser if true self.logdir = logdir # log tracebacks to files if not None self.context = context # number of source code lines per frame self.file = file or sys.stdout # place to send the output self.format = format def __call__(self, etype, evalue, etb): self.handle((etype, evalue, etb)) def handle(self, info=None): info = info or sys.exc_info() if self.format == "html": self.file.write(reset()) formatter = (self.format=="html") and html or text plain = False try: doc = formatter(info, self.context) except: # just in case something goes wrong doc = ''.join(traceback.format_exception(*info)) plain = True if self.display: if plain: doc = doc.replace('&', '&amp;').replace('<', '&lt;') self.file.write('<pre>' + doc + '</pre>\n') else: self.file.write(doc + '\n') else: self.file.write('<p>A problem occurred in a Python script.\n') if self.logdir is not None: suffix = ['.txt', '.html'][self.format=="html"] (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) try: file = os.fdopen(fd, 'w') file.write(doc) file.close() msg = '<p> %s contains the description of this error.' % path except: msg = '<p> Tried to save traceback to %s, but failed.' % path self.file.write(msg + '\n') try: self.file.flush() except: pass handler = Hook().handle def enable(display=1, logdir=None, context=5, format="html"): """Install an exception handler that formats tracebacks as HTML. The optional argument 'display' can be set to 0 to suppress sending the traceback to the browser, and 'logdir' can be set to a directory to cause tracebacks to be written to files there.""" sys.excepthook = Hook(display=display, logdir=logdir, context=context, format=format)
gpl-3.0
arju88nair/projectCulminate
venv/lib/python3.5/site-packages/nltk/tokenize/toktok.py
7
8031
# -*- coding: utf-8 -*- # Natural Language Toolkit: Python port of the tok-tok.pl tokenizer. # # Copyright (C) 2001-2015 NLTK Project # Author: Jon Dehdari # Contributors: Liling Tan, Selcuk Ayguney, ikegami, Martijn Pieters # # URL: <http://nltk.sourceforge.net> # For license information, see LICENSE.TXT """ The tok-tok tokenizer is a simple, general tokenizer, where the input has one sentence per line; thus only final period is tokenized. Tok-tok has been tested on, and gives reasonably good results for English, Persian, Russian, Czech, French, German, Vietnamese, Tajik, and a few others. The input should be in UTF-8 encoding. Reference: Jon Dehdari. 2014. A Neurophysiologically-Inspired Statistical Language Model (Doctoral dissertation). Columbus, OH, USA: The Ohio State University. """ import re from six import text_type from nltk.tokenize.api import TokenizerI class ToktokTokenizer(TokenizerI): """ This is a Python port of the tok-tok.pl from https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl >>> toktok = ToktokTokenizer() >>> text = u'Is 9.5 or 525,600 my favorite number?' >>> print (toktok.tokenize(text, return_str=True)) Is 9.5 or 525,600 my favorite number ? >>> text = u'The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things' >>> print (toktok.tokenize(text, return_str=True)) The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things >>> text = u'\xa1This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf' >>> expected = u'\xa1 This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf' >>> assert toktok.tokenize(text, return_str=True) == expected >>> toktok.tokenize(text) == [u'\xa1', u'This', u',', u'is', u'a', u'sentence', u'with', u'weird', u'\xbb', u'symbols', u'\u2026', u'appearing', u'everywhere', u'\xbf'] True """ # Replace non-breaking spaces with normal spaces. NON_BREAKING = re.compile(u"\u00A0"), " " # Pad some funky punctuation. FUNKY_PUNCT_1 = re.compile(u'([،;؛¿!"\])}»›”؟¡%٪°±©®।॥…])'), r" \1 " # Pad more funky punctuation. FUNKY_PUNCT_2 = re.compile(u'([({\[“‘„‚«‹「『])'), r" \1 " # Pad En dash and em dash EN_EM_DASHES = re.compile(u'([–—])'), r" \1 " # Replace problematic character with numeric character reference. AMPERCENT = re.compile('& '), '&amp; ' TAB = re.compile('\t'), ' &#9; ' PIPE = re.compile('\|'), ' &#124; ' # Pad numbers with commas to keep them from further tokenization. COMMA_IN_NUM = re.compile(r'(?<!,)([,،])(?![,\d])'), r' \1 ' # Just pad problematic (often neurotic) hyphen/single quote, etc. PROB_SINGLE_QUOTES = re.compile(r"(['’`])"), r' \1 ' # Group ` ` stupid quotes ' ' into a single token. STUPID_QUOTES_1 = re.compile(r" ` ` "), r" `` " STUPID_QUOTES_2 = re.compile(r" ' ' "), r" '' " # Don't tokenize period unless it ends the line and that it isn't # preceded by another period, e.g. # "something ..." -> "something ..." # "something." -> "something ." FINAL_PERIOD_1 = re.compile(r"(?<!\.)\.$"), r" ." # Don't tokenize period unless it ends the line eg. # " ... stuff." -> "... stuff ." FINAL_PERIOD_2 = re.compile(r"""(?<!\.)\.\s*(["'’»›”]) *$"""), r" . \1" # Treat continuous commas as fake German,Czech, etc.: „ MULTI_COMMAS = re.compile(r'(,{2,})'), r' \1 ' # Treat continuous dashes as fake en-dash, etc. MULTI_DASHES = re.compile(r'(-{2,})'), r' \1 ' # Treat multiple periods as a thing (eg. ellipsis) MULTI_DOTS = re.compile(r'(\.{2,})'), r' \1 ' # This is the \p{Open_Punctuation} from Perl's perluniprops # see http://perldoc.perl.org/perluniprops.html OPEN_PUNCT = text_type(u'([{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d' u'\u208d\u2329\u2768\u276a\u276c\u276e\u2770\u2772' u'\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983' u'\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993' u'\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26' u'\u2e28\u3008\u300a\u300c\u300e\u3010\u3014\u3016' u'\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39' u'\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b' u'\ufe5d\uff08\uff3b\uff5b\uff5f\uff62') # This is the \p{Close_Punctuation} from Perl's perluniprops CLOSE_PUNCT = text_type(u')]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a' u'\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6' u'\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988' u'\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998' u'\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009' u'\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b' u'\u301e\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c' u'\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e' u'\uff09\uff3d\uff5d\uff60\uff63') # This is the \p{Close_Punctuation} from Perl's perluniprops CURRENCY_SYM = text_type(u'$\xa2\xa3\xa4\xa5\u058f\u060b\u09f2\u09f3\u09fb' u'\u0af1\u0bf9\u0e3f\u17db\u20a0\u20a1\u20a2\u20a3' u'\u20a4\u20a5\u20a6\u20a7\u20a8\u20a9\u20aa\u20ab' u'\u20ac\u20ad\u20ae\u20af\u20b0\u20b1\u20b2\u20b3' u'\u20b4\u20b5\u20b6\u20b7\u20b8\u20b9\u20ba\ua838' u'\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6') # Pad spaces after opening punctuations. OPEN_PUNCT_RE = re.compile(u'([{}])'.format(OPEN_PUNCT)), r'\1 ' # Pad spaces before closing punctuations. CLOSE_PUNCT_RE = re.compile(u'([{}])'.format(CLOSE_PUNCT)), r'\1 ' # Pad spaces after currency symbols. CURRENCY_SYM_RE = re.compile(u'([{}])'.format(CURRENCY_SYM)), r'\1 ' # Use for tokenizing URL-unfriendly characters: [:/?#] URL_FOE_1 = re.compile(r':(?!//)'), r' : ' # in perl s{:(?!//)}{ : }g; URL_FOE_2 = re.compile(r'\?(?!\S)'), r' ? ' # in perl s{\?(?!\S)}{ ? }g; # in perl: m{://} or m{\S+\.\S+/\S+} or s{/}{ / }g; URL_FOE_3 = re.compile(r'(:\/\/)[\S+\.\S+\/\S+][\/]'), ' / ' URL_FOE_4 = re.compile(r' /'), r' / ' # s{ /}{ / }g; # Left/Right strip, i.e. remove heading/trailing spaces. # These strip regexes should NOT be used, # instead use str.lstrip(), str.rstrip() or str.strip() # (They are kept for reference purposes to the original toktok.pl code) LSTRIP = re.compile(r'^ +'), '' RSTRIP = re.compile(r'\s+$'),'\n' # Merge multiple spaces. ONE_SPACE = re.compile(r' {2,}'), ' ' TOKTOK_REGEXES = [NON_BREAKING, FUNKY_PUNCT_1, URL_FOE_1, URL_FOE_2, URL_FOE_3, URL_FOE_4, AMPERCENT, TAB, PIPE, OPEN_PUNCT_RE, CLOSE_PUNCT_RE, MULTI_COMMAS, COMMA_IN_NUM, FINAL_PERIOD_2, PROB_SINGLE_QUOTES, STUPID_QUOTES_1, STUPID_QUOTES_2, CURRENCY_SYM_RE, EN_EM_DASHES, MULTI_DASHES, MULTI_DOTS, FINAL_PERIOD_1, FINAL_PERIOD_2, ONE_SPACE] def tokenize(self, text, return_str=False): text = text_type(text) # Converts input string into unicode. for regexp, subsitution in self.TOKTOK_REGEXES: text = regexp.sub(subsitution, text) # Finally, strips heading and trailing spaces # and converts output string into unicode. text = text_type(text.strip()) return text if return_str else text.split()
apache-2.0
vertical-knowledge/django-ripozo
testapp/models.py
1
1760
from django.db import models class MyModel(models.Model): """ Doesn't include relationships or files for now """ biginteger = models.BigIntegerField() boolean = models.BooleanField(default=False) char = models.CharField(max_length=100) csi = models.CommaSeparatedIntegerField(max_length=100) date_a = models.DateField() datetime_a = models.DateTimeField() decimal_a = models.DecimalField(max_digits=5, decimal_places=2) email = models.EmailField() float_a = models.FloatField() integer = models.IntegerField() ipaddress = models.IPAddressField() genericip = models.GenericIPAddressField() nullbool = models.NullBooleanField() positiveint = models.PositiveIntegerField() positivesmallint = models.PositiveSmallIntegerField() slug = models.SlugField() smallint = models.SmallIntegerField() time_a = models.TimeField() url = models.URLField() class OneToMany(models.Model): """ This one model has many ManyToOne models. """ one_value = models.CharField(max_length=63) class ManyToOne(models.Model): """ Many of this model have one OneToMany models """ one = models.ForeignKey('OneToMany', related_name='manies') many_value = models.CharField(max_length=63) class ManyToManyFirst(models.Model): value = models.CharField(max_length=63) class ManyToManySecond(models.Model): value = models.CharField(max_length=63) many_to_many = models.ManyToManyField(ManyToManyFirst, related_name='all_the_manies') class OneFirst(models.Model): value = models.CharField(max_length=63) class OneSecond(models.Model): value = models.CharField(max_length=63) first = models.OneToOneField(OneFirst, related_name='second')
gpl-2.0
w1ll1am23/home-assistant
homeassistant/components/n26/sensor.py
3
8420
"""Support for N26 bank account sensors.""" from homeassistant.components.sensor import SensorEntity from . import DEFAULT_SCAN_INTERVAL, DOMAIN, timestamp_ms_to_date from .const import DATA SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL ATTR_IBAN = "account" ATTR_USABLE_BALANCE = "usable_balance" ATTR_BANK_BALANCE = "bank_balance" ATTR_ACC_OWNER_TITLE = "owner_title" ATTR_ACC_OWNER_FIRST_NAME = "owner_first_name" ATTR_ACC_OWNER_LAST_NAME = "owner_last_name" ATTR_ACC_OWNER_GENDER = "owner_gender" ATTR_ACC_OWNER_BIRTH_DATE = "owner_birth_date" ATTR_ACC_OWNER_EMAIL = "owner_email" ATTR_ACC_OWNER_PHONE_NUMBER = "owner_phone_number" ICON_ACCOUNT = "mdi:currency-eur" ICON_CARD = "mdi:credit-card" ICON_SPACE = "mdi:crop-square" def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the N26 sensor platform.""" if discovery_info is None: return api_list = hass.data[DOMAIN][DATA] sensor_entities = [] for api_data in api_list: sensor_entities.append(N26Account(api_data)) for card in api_data.cards: sensor_entities.append(N26Card(api_data, card)) for space in api_data.spaces["spaces"]: sensor_entities.append(N26Space(api_data, space)) add_entities(sensor_entities) class N26Account(SensorEntity): """Sensor for a N26 balance account. A balance account contains an amount of money (=balance). The amount may also be negative. """ def __init__(self, api_data) -> None: """Initialize a N26 balance account.""" self._data = api_data self._iban = self._data.balance["iban"] def update(self) -> None: """Get the current balance and currency for the account.""" self._data.update_account() @property def unique_id(self): """Return the unique ID of the entity.""" return self._iban[-4:] @property def name(self) -> str: """Friendly name of the sensor.""" return f"n26_{self._iban[-4:]}" @property def state(self) -> float: """Return the balance of the account as state.""" if self._data.balance is None: return None return self._data.balance.get("availableBalance") @property def unit_of_measurement(self) -> str: """Use the currency as unit of measurement.""" if self._data.balance is None: return None return self._data.balance.get("currency") @property def extra_state_attributes(self) -> dict: """Additional attributes of the sensor.""" attributes = { ATTR_IBAN: self._data.balance.get("iban"), ATTR_BANK_BALANCE: self._data.balance.get("bankBalance"), ATTR_USABLE_BALANCE: self._data.balance.get("usableBalance"), ATTR_ACC_OWNER_TITLE: self._data.account_info.get("title"), ATTR_ACC_OWNER_FIRST_NAME: self._data.account_info.get("kycFirstName"), ATTR_ACC_OWNER_LAST_NAME: self._data.account_info.get("kycLastName"), ATTR_ACC_OWNER_GENDER: self._data.account_info.get("gender"), ATTR_ACC_OWNER_BIRTH_DATE: timestamp_ms_to_date( self._data.account_info.get("birthDate") ), ATTR_ACC_OWNER_EMAIL: self._data.account_info.get("email"), ATTR_ACC_OWNER_PHONE_NUMBER: self._data.account_info.get( "mobilePhoneNumber" ), } for limit in self._data.limits: limit_attr_name = f"limit_{limit['limit'].lower()}" attributes[limit_attr_name] = limit["amount"] return attributes @property def icon(self) -> str: """Set the icon for the sensor.""" return ICON_ACCOUNT class N26Card(SensorEntity): """Sensor for a N26 card.""" def __init__(self, api_data, card) -> None: """Initialize a N26 card.""" self._data = api_data self._account_name = api_data.balance["iban"][-4:] self._card = card def update(self) -> None: """Get the current balance and currency for the account.""" self._data.update_cards() self._card = self._data.card(self._card["id"], self._card) @property def unique_id(self): """Return the unique ID of the entity.""" return self._card["id"] @property def name(self) -> str: """Friendly name of the sensor.""" return f"{self._account_name.lower()}_card_{self._card['id']}" @property def state(self) -> float: """Return the balance of the account as state.""" return self._card["status"] @property def extra_state_attributes(self) -> dict: """Additional attributes of the sensor.""" attributes = { "apple_pay_eligible": self._card.get("applePayEligible"), "card_activated": timestamp_ms_to_date(self._card.get("cardActivated")), "card_product": self._card.get("cardProduct"), "card_product_type": self._card.get("cardProductType"), "card_settings_id": self._card.get("cardSettingsId"), "card_Type": self._card.get("cardType"), "design": self._card.get("design"), "exceet_actual_delivery_date": self._card.get("exceetActualDeliveryDate"), "exceet_card_status": self._card.get("exceetCardStatus"), "exceet_expected_delivery_date": self._card.get( "exceetExpectedDeliveryDate" ), "exceet_express_card_delivery": self._card.get("exceetExpressCardDelivery"), "exceet_express_card_delivery_email_sent": self._card.get( "exceetExpressCardDeliveryEmailSent" ), "exceet_express_card_delivery_tracking_id": self._card.get( "exceetExpressCardDeliveryTrackingId" ), "expiration_date": timestamp_ms_to_date(self._card.get("expirationDate")), "google_pay_eligible": self._card.get("googlePayEligible"), "masked_pan": self._card.get("maskedPan"), "membership": self._card.get("membership"), "mpts_card": self._card.get("mptsCard"), "pan": self._card.get("pan"), "pin_defined": timestamp_ms_to_date(self._card.get("pinDefined")), "username_on_card": self._card.get("usernameOnCard"), } return attributes @property def icon(self) -> str: """Set the icon for the sensor.""" return ICON_CARD class N26Space(SensorEntity): """Sensor for a N26 space.""" def __init__(self, api_data, space) -> None: """Initialize a N26 space.""" self._data = api_data self._space = space def update(self) -> None: """Get the current balance and currency for the account.""" self._data.update_spaces() self._space = self._data.space(self._space["id"], self._space) @property def unique_id(self): """Return the unique ID of the entity.""" return f"space_{self._data.balance['iban'][-4:]}_{self._space['name'].lower()}" @property def name(self) -> str: """Friendly name of the sensor.""" return self._space["name"] @property def state(self) -> float: """Return the balance of the account as state.""" return self._space["balance"]["availableBalance"] @property def unit_of_measurement(self) -> str: """Use the currency as unit of measurement.""" return self._space["balance"]["currency"] @property def extra_state_attributes(self) -> dict: """Additional attributes of the sensor.""" goal_value = "" if "goal" in self._space: goal_value = self._space.get("goal").get("amount") attributes = { "name": self._space.get("name"), "goal": goal_value, "background_image_url": self._space.get("backgroundImageUrl"), "image_url": self._space.get("imageUrl"), "is_card_attached": self._space.get("isCardAttached"), "is_hidden_from_balance": self._space.get("isHiddenFromBalance"), "is_locked": self._space.get("isLocked"), "is_primary": self._space.get("isPrimary"), } return attributes @property def icon(self) -> str: """Set the icon for the sensor.""" return ICON_SPACE
apache-2.0
openforis/sepal
modules/google-earth-engine/docker/src/sepalinternal/aoi.py
1
4639
import ee from .gee import get_info class Aoi: _table_by_data_set = { 'LANDSAT': { 'table_id': 'users/wiell/SepalResources/landsatSceneAreas', 'id_column': 'name', 'coordinates': lambda sceneArea: sceneArea[0]['coordinates'][0] }, 'SENTINEL_2': { 'table_id': 'users/wiell/SepalResources/sentinel2SceneAreas', 'id_column': 'name', 'coordinates': lambda sceneArea: sceneArea[0]['coordinates'][0] } } def __init__(self, geometry, spec=None): self._geometry = geometry self.spec = spec @staticmethod def create(spec): """Creates an Aoi according to provided specs. :param spec: A dict specifying the Aoi :type spec: dict :return: An Aoi instance :rtype: Aoi """ type = { 'POLYGON': Polygon, 'FUSION_TABLE': FusionTable, 'EE_TABLE': EETable, }[spec['type']] return type(spec) def scene_areas(self, data_set): """Determines scene areas in the provided reference system this Aoi intersects. :param reference_system: The spatial reference system of the scene areas :return: A list of dicts scene areas :rtype: list """ if data_set not in self._table_by_data_set: raise ValueError('Unsupported data set: ' + data_set) table = self._table_by_data_set[data_set] aoi = self._geometry scene_area_table = get_info( ee.FeatureCollection(table['table_id']) .filterBounds(aoi) .reduceColumns(ee.Reducer.toList(2), ['.geo', table['id_column']]) .get('list') ) scene_areas = [ { 'id': scene_area[1], 'polygon': self._to_polygon(table, scene_area), } for scene_area in scene_area_table ] return scene_areas def _to_polygon(self, table, scene_area): return list(map(lambda lnglat: list(reversed(lnglat)), table['coordinates'](scene_area))) def geometry(self): """Gets the ee.Geometry of this Aoi. :return: The ee.Geometry :rtype: ee.Geometry """ return self._geometry class Polygon(Aoi): def __init__(self, spec): self.path = spec['path'] geometry = ee.Geometry(ee.Geometry.Polygon(coords=[self.path]), opt_geodesic=False) Aoi.__init__(self, geometry, spec) def __str__(self): return 'Polygon(path: ' + self.path + ')' class FusionTable(Aoi): def __init__(self, spec): self.table_name = spec['id'] self.key_column = spec['keyColumn'] table = ee.FeatureCollection('ft:' + self.table_name) number_column = is_number(spec['key']) and get_info(table)['columns'].get(self.key_column) == 'Number' self.value_column = float(spec['key']) if number_column else spec['key'] self.feature_collection = table.filterMetadata(self.key_column, 'equals', self.value_column) geometry = self.feature_collection.geometry() Aoi.__init__(self, geometry, spec) def __str__(self): return 'FusionTable(table_name: ' + self.table_name \ + ', key_column: ' + self.key_column \ + ', value_column: ' + self.value_column + ')' class EETable(Aoi): def __init__(self, spec): self.table_name = spec['id'] self.key_column = spec['keyColumn'] table = ee.FeatureCollection(self.table_name) self.value_column = spec['key'] if self.key_column: filters = [ee.Filter.eq(self.key_column, self.value_column)] if is_number(self.value_column): filters.append(ee.Filter.eq(self.key_column, float(self.value_column))) self.feature_collection = table.filter(ee.Filter.Or(*filters)) else: self.feature_collection = table geometry = self.feature_collection.geometry() Aoi.__init__(self, geometry, spec) def __str__(self): return 'EETable(table_name: ' + self.table_name \ + ', key_column: ' + self.key_column \ + ', value_column: ' + self.value_column + ')' class AssetAoi(Aoi): def __init__(self, geometry, spec): Aoi.__init__(self, geometry, spec) def is_number(s): try: float(s) return True except: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False
mit
A-deLuna/servo
tests/wpt/css-tests/tools/html5lib/html5lib/filters/optionaltags.py
1727
10500
from __future__ import absolute_import, division, unicode_literals from . import _base class Filter(_base.Filter): def slider(self): previous1 = previous2 = None for token in self.source: if previous1 is not None: yield previous2, previous1, token previous2 = previous1 previous1 = token yield previous2, previous1, None def __iter__(self): for previous, token, next in self.slider(): type = token["type"] if type == "StartTag": if (token["data"] or not self.is_optional_start(token["name"], previous, next)): yield token elif type == "EndTag": if not self.is_optional_end(token["name"], next): yield token else: yield token def is_optional_start(self, tagname, previous, next): type = next and next["type"] or None if tagname in 'html': # An html element's start tag may be omitted if the first thing # inside the html element is not a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname == 'head': # A head element's start tag may be omitted if the first thing # inside the head element is an element. # XXX: we also omit the start tag if the head element is empty if type in ("StartTag", "EmptyTag"): return True elif type == "EndTag": return next["name"] == "head" elif tagname == 'body': # A body element's start tag may be omitted if the first thing # inside the body element is not a space character or a comment, # except if the first thing inside the body element is a script # or style element and the node immediately preceding the body # element is a head element whose end tag has been omitted. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we do not look at the preceding event, so we never omit # the body element's start tag if it's followed by a script or # a style element. return next["name"] not in ('script', 'style') else: return True elif tagname == 'colgroup': # A colgroup element's start tag may be omitted if the first thing # inside the colgroup element is a col element, and if the element # is not immediately preceeded by another colgroup element whose # end tag has been omitted. if type in ("StartTag", "EmptyTag"): # XXX: we do not look at the preceding event, so instead we never # omit the colgroup element's end tag when it is immediately # followed by another colgroup element. See is_optional_end. return next["name"] == "col" else: return False elif tagname == 'tbody': # A tbody element's start tag may be omitted if the first thing # inside the tbody element is a tr element, and if the element is # not immediately preceeded by a tbody, thead, or tfoot element # whose end tag has been omitted. if type == "StartTag": # omit the thead and tfoot elements' end tag when they are # immediately followed by a tbody element. See is_optional_end. if previous and previous['type'] == 'EndTag' and \ previous['name'] in ('tbody', 'thead', 'tfoot'): return False return next["name"] == 'tr' else: return False return False def is_optional_end(self, tagname, next): type = next and next["type"] or None if tagname in ('html', 'head', 'body'): # An html element's end tag may be omitted if the html element # is not immediately followed by a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname in ('li', 'optgroup', 'tr'): # A li element's end tag may be omitted if the li element is # immediately followed by another li element or if there is # no more content in the parent element. # An optgroup element's end tag may be omitted if the optgroup # element is immediately followed by another optgroup element, # or if there is no more content in the parent element. # A tr element's end tag may be omitted if the tr element is # immediately followed by another tr element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] == tagname else: return type == "EndTag" or type is None elif tagname in ('dt', 'dd'): # A dt element's end tag may be omitted if the dt element is # immediately followed by another dt element or a dd element. # A dd element's end tag may be omitted if the dd element is # immediately followed by another dd element or a dt element, # or if there is no more content in the parent element. if type == "StartTag": return next["name"] in ('dt', 'dd') elif tagname == 'dd': return type == "EndTag" or type is None else: return False elif tagname == 'p': # A p element's end tag may be omitted if the p element is # immediately followed by an address, article, aside, # blockquote, datagrid, dialog, dir, div, dl, fieldset, # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, # nav, ol, p, pre, section, table, or ul, element, or if # there is no more content in the parent element. if type in ("StartTag", "EmptyTag"): return next["name"] in ('address', 'article', 'aside', 'blockquote', 'datagrid', 'dialog', 'dir', 'div', 'dl', 'fieldset', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hr', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul') else: return type == "EndTag" or type is None elif tagname == 'option': # An option element's end tag may be omitted if the option # element is immediately followed by another option element, # or if it is immediately followed by an <code>optgroup</code> # element, or if there is no more content in the parent # element. if type == "StartTag": return next["name"] in ('option', 'optgroup') else: return type == "EndTag" or type is None elif tagname in ('rt', 'rp'): # An rt element's end tag may be omitted if the rt element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. # An rp element's end tag may be omitted if the rp element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('rt', 'rp') else: return type == "EndTag" or type is None elif tagname == 'colgroup': # A colgroup element's end tag may be omitted if the colgroup # element is not immediately followed by a space character or # a comment. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we also look for an immediately following colgroup # element. See is_optional_start. return next["name"] != 'colgroup' else: return True elif tagname in ('thead', 'tbody'): # A thead element's end tag may be omitted if the thead element # is immediately followed by a tbody or tfoot element. # A tbody element's end tag may be omitted if the tbody element # is immediately followed by a tbody or tfoot element, or if # there is no more content in the parent element. # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] in ['tbody', 'tfoot'] elif tagname == 'tbody': return type == "EndTag" or type is None else: return False elif tagname == 'tfoot': # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] == 'tbody' else: return type == "EndTag" or type is None elif tagname in ('td', 'th'): # A td element's end tag may be omitted if the td element is # immediately followed by a td or th element, or if there is # no more content in the parent element. # A th element's end tag may be omitted if the th element is # immediately followed by a td or th element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('td', 'th') else: return type == "EndTag" or type is None return False
mpl-2.0
flatangle/flatlib
flatlib/tools/planetarytime.py
1
4632
""" This file is part of flatlib - (C) FlatAngle Author: João Ventura (flatangleweb@gmail.com) This module provides useful functions for handling planetary times. The most import element is the HourTable class which handles all queries to the planetary rulers and hour rulers, including the start and ending datetimes of each hour ruler. """ from flatlib import const from flatlib.ephem import ephem from flatlib.datetime import Datetime # Planetary rulers starting at Sunday DAY_RULERS = [ const.SUN, const.MOON, const.MARS, const.MERCURY, const.JUPITER, const.VENUS, const.SATURN ] NIGHT_RULERS = [ const.JUPITER, const.VENUS, const.SATURN, const.SUN, const.MOON, const.MARS, const.MERCURY ] # Planetary hours round list starting # at Sunday's sunrise ROUND_LIST = [ const.SUN, const.VENUS, const.MERCURY, const.MOON, const.SATURN, const.JUPITER, const.MARS ] # === Private functions === # def nthRuler(n, dow): """ Returns the n-th hour ruler since last sunrise by day of week. Both arguments are zero based. """ index = (dow * 24 + n) % 7 return ROUND_LIST[index] def hourTable(date, pos): """ Creates the planetary hour table for a date and position. The table includes both diurnal and nocturnal hour sequences and each of the 24 entries (12 * 2) are like (startJD, endJD, ruler). """ lastSunrise = ephem.lastSunrise(date, pos) middleSunset = ephem.nextSunset(lastSunrise, pos) nextSunrise = ephem.nextSunrise(date, pos) table = [] # Create diurnal hour sequence length = (middleSunset.jd - lastSunrise.jd) / 12.0 for i in range(12): start = lastSunrise.jd + i * length end = start + length ruler = nthRuler(i, lastSunrise.date.dayofweek()) table.append([start, end, ruler]) # Create nocturnal hour sequence length = (nextSunrise.jd - middleSunset.jd) / 12.0 for i in range(12): start = middleSunset.jd + i * length end = start + length ruler = nthRuler(i + 12, lastSunrise.date.dayofweek()) table.append([start, end, ruler]) return table def getHourTable(date, pos): """ Returns an HourTable object. """ table = hourTable(date, pos) return HourTable(table, date) # ------------------- # # HourTable Class # # ------------------- # class HourTable: """ This class represents a Planetary Hour Table and includes methods to access its properties. """ def __init__(self, table, date): self.table = table self.date = date self.currIndex = self.index(date) def index(self, date): """ Returns the index of a date in the table. """ for (i, (start, end, ruler)) in enumerate(self.table): if start <= date.jd <= end: return i return None # === Properties === # def dayRuler(self): """ Returns the current day ruler. """ return self.table[0][2] def nightRuler(self): """ Returns the current night ruler. """ return self.table[12][2] def currRuler(self): """ Returns the current day or night ruler considering if it's day or night. """ if self.currIndex < 12: return self.dayRuler() else: return self.nightRuler() def hourRuler(self): """ Returns the current hour ruler. """ return self.table[self.currIndex][2] def currInfo(self): """ Returns information about the current planetary time. """ return self.indexInfo(self.currIndex) def indexInfo(self, index): """ Returns information about a specific planetary time. """ entry = self.table[index] info = { # Default is diurnal 'mode': 'Day', 'ruler': self.dayRuler(), 'dayRuler': self.dayRuler(), 'nightRuler': self.nightRuler(), 'hourRuler': entry[2], 'hourNumber': index + 1, 'tableIndex': index, 'start': Datetime.fromJD(entry[0], self.date.utcoffset), 'end': Datetime.fromJD(entry[1], self.date.utcoffset) } if index >= 12: # Set information as nocturnal info.update({ 'mode': 'Night', 'ruler': info['nightRuler'], 'hourNumber': index + 1 - 12 }) return info
mit
aam-at/tensorflow
tensorflow/compiler/tests/stateless_random_ops_test.py
3
8230
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stateless random-number generation ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.compiler.xla import xla from tensorflow.python.eager import def_function from tensorflow.python.framework import config from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.kernel_tests.random import util as \ random_test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import stateless_random_ops as stateless from tensorflow.python.ops import variables from tensorflow.python.platform import test class StatelessRandomOpsTest(xla_test.XLATestCase): """Test cases for stateless random-number generator operators.""" def _random_types(self, include_int=False): allowed_types = {dtypes.float64, dtypes.float32, dtypes.bfloat16} if include_int: allowed_types.update({dtypes.int32, dtypes.int64}) return self.all_tf_types & allowed_types @test_util.run_v2_only def testForcedCompile(self): """Tests whole-function forced-compilation. This test checks that stateless_random_* can be used in forced-compilation scenarios (e.g. TPU). The new version of stateless_random_* requires the intermediate tensor `alg` to be compile-time constant, so we need to check that this requirement is met. We use xla.compile instead of tf.function's experimental_compile because the latter doesn't throw an error even if the compile-time-constant constraint is not met. """ if config.list_logical_devices('TPU'): self.skipTest('To accommodate OSS, xla.compile support for TPU is not ' 'linked in.') @def_function.function def f(x): return xla.compile( lambda x: stateless.stateless_random_normal([], seed=x), [x]) f([1, 2]) def testDeterminism(self): # Stateless values should be equal iff the seeds are equal (roughly) with self.session(), self.test_scope(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension for stateless_op in [ stateless.stateless_random_uniform, stateless.stateless_random_normal ]: for shape in (), (3,), (2, 5): for dtype in self._random_types(): # Skip bfloat16. The result of bfloat16 is truncated from 32-bit # result. With different seeds, the 32-bit results are different, # but the truncated 16-bit results might be the same. if dtype == dtypes.bfloat16: continue pure = stateless_op(shape, seed=seed_t, dtype=dtype) values = [(seed, pure.eval(feed_dict={ seed_t: seed })) for seed in seeds] for s0, v0 in values: for s1, v1 in values: self.assertEqual(s0 == s1, np.all(v0 == v1)) def testRandomUniformIsInRange(self): with self.session() as sess, self.test_scope(): for dtype in self._random_types(include_int=True): maxval = 1 if dtype.is_integer: maxval = 100 seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) x = stateless.stateless_random_uniform( shape=[1000], seed=seed_t, maxval=maxval, dtype=dtype) y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]}) self.assertTrue(np.all(y >= 0)) self.assertTrue(np.all(y < maxval)) def testDistributionOfStatelessRandomUniform(self): """Use Pearson's Chi-squared test to test for uniformity.""" with self.session() as sess, self.test_scope(): for dtype in self._random_types(include_int=True): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) n = 1000 maxval = 1 if dtype.is_integer: maxval = 100 x = stateless.stateless_random_uniform( shape=[n], seed=seed_t, maxval=maxval, dtype=dtype) y = sess.run(x, {seed_t: [565656, 121212]}) # Convert y to float and normalize its value to range [0, 1) when # maxval != 1. y = y.astype(float) / maxval # Tests that the values are distributed amongst 10 bins with equal # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with # p=0.05. This test is probabilistic and would be flaky if the random # seed were not fixed. self.assertLess(random_test_util.chi_squared(y, 10), 16.92) def testRandomNormalIsFinite(self): with self.session() as sess, self.test_scope(): for dtype in self._random_types(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) x = stateless.stateless_random_normal( shape=[10000], seed=seed_t, dtype=dtype) y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]}) self.assertTrue(np.all(np.isfinite(y))) def testDistributionOfStatelessRandomNormal(self): """Use Anderson-Darling test to test distribution appears normal.""" with self.session() as sess, self.test_scope(): for dtype in self._random_types(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) n = 1000 x = stateless.stateless_random_normal( shape=[n], seed=seed_t, dtype=dtype) y = sess.run(x, {seed_t: [25252, 314159]}) # The constant 2.492 is the 5% critical value for the Anderson-Darling # test where the mean and variance are known. This test is probabilistic # so to avoid flakiness the seed is fixed. self.assertLess( random_test_util.anderson_darling(y.astype(float)), 2.492) def testTruncatedNormal(self): for dtype in self._random_types(): with self.session() as sess, self.test_scope(): seed_t = array_ops.placeholder(dtypes.int32, shape=[2]) n = 10000000 x = stateless.stateless_truncated_normal( shape=[n], seed=seed_t, dtype=dtype) y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]}) random_test_util.test_truncated_normal( self.assertEqual, self.assertAllClose, n, y, variance_rtol=6e-3 if dtype == dtypes.bfloat16 else 1e-3) class StatelessRandomOpsBenchmark(test.Benchmark): """Microbenchmarks for the stateless random ops.""" def _benchmarkUniform(self, name, dtype, use_xla_jit): def builder_fn(): shape = (10, 1000, 1000) seed_var = variables.Variable((312, 456), dtype=dtypes.int32, name='input') random_t = stateless.stateless_random_uniform( shape, seed=seed_var, dtype=dtype) return '%s.shape%s' % (name, shape), [random_t] xla_test.Benchmark(self, builder_fn, use_xla_jit=use_xla_jit, device='cpu') def benchmarkUniformF32(self): self._benchmarkUniform( 'uniform_f32', dtype=dtypes.float32, use_xla_jit=False) def benchmarkUniformF64(self): self._benchmarkUniform( 'uniform_f64', dtype=dtypes.float64, use_xla_jit=False) def benchmarkUniformF32XLA(self): self._benchmarkUniform( 'uniform_f32', dtype=dtypes.float32, use_xla_jit=True) def benchmarkUniformF64XLA(self): self._benchmarkUniform( 'uniform_f64', dtype=dtypes.float64, use_xla_jit=True) if __name__ == '__main__': config.set_soft_device_placement(False) test.main()
apache-2.0
synopat/pyload
module/plugins/hooks/EventMapper.py
8
1988
# -*- coding: utf-8 -*- from ..internal.Addon import Addon class EventMapper(Addon): __name__ = "EventMapper" __type__ = "hook" __version__ = "0.02" __status__ = "testing" __config__ = [("activated", "bool", "Activated", True)] __description__ = """Map old events to new events""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] def activate(self, *args): self.manager.dispatchEvent("activate", *args) def exit(self, *args): self.manager.dispatchEvent("exit", *args) def config_changed(self, *args): self.manager.dispatchEvent("config_changed", *args) def all_downloads_finished(self, *args): self.manager.dispatchEvent("all_downloads_finished", *args) def all_downloads_processed(self, *args): self.manager.dispatchEvent("all_downloads_processed", *args) def links_added(self, *args): self.manager.dispatchEvent("links_added", *args) def download_preparing(self, *args): self.manager.dispatchEvent("download_preparing", *args) def download_finished(self, *args): self.manager.dispatchEvent("download_finished", *args) def download_failed(self, *args): self.manager.dispatchEvent("download_failed", *args) def package_deleted(self, *args): self.manager.dispatchEvent("package_deleted", *args) def package_finished(self, *args): self.manager.dispatchEvent("package_finished", *args) def before_reconnect(self, *args): self.manager.dispatchEvent("before_reconnect", *args) def after_reconnect(self, *args): self.manager.dispatchEvent("after_reconnect", *args) def captcha_task(self, *args): self.manager.dispatchEvent("captcha_task", *args) def captcha_correct(self, *args): self.manager.dispatchEvent("captcha_correct", *args) def captcha_invalid(self, *args): self.manager.dispatchEvent("captcha_invalid", *args)
gpl-3.0
GitYiheng/reinforcement_learning_test
test01_cartpendulum/t01_qlearning.py
1
4374
from __future__ import print_function, division from builtins import range import gym import os import sys import numpy as np import matplotlib.pyplot as plt from gym import wrappers from datetime import datetime from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import StandardScaler from sklearn.kernel_approximation import RBFSampler class SGDRegressor: def __init__(self, D): self.w = np.random.randn(D) / np.sqrt(D) self.lr = 10e-2 def partial_fit(self, X, Y): self.w += self.lr*(Y - X.dot(self.w)).dot(X) def predict(self, X): return X.dot(self.w) class FeatureTransformer: def __init__(self, env): # observation_examples = np.array([env.observation_space.sample() for x in range(10000)]) # NOTE!! state samples are poor, b/c you get velocities --> infinity observation_examples = np.random.random((20000, 4))*2 - 1 scaler = StandardScaler() scaler.fit(observation_examples) # Used to converte a state to a featurizes represenation. # We use RBF kernels with different variances to cover different parts of the space featurizer = FeatureUnion([ ("rbf1", RBFSampler(gamma=0.05, n_components=1000)), ("rbf2", RBFSampler(gamma=1.0, n_components=1000)), ("rbf3", RBFSampler(gamma=0.5, n_components=1000)), ("rbf4", RBFSampler(gamma=0.1, n_components=1000)) ]) feature_examples = featurizer.fit_transform(scaler.transform(observation_examples)) self.dimensions = feature_examples.shape[1] self.scaler = scaler self.featurizer = featurizer def transform(self, observations): scaled = self.scaler.transform(observations) return self.featurizer.transform(scaled) # Holds one SGDRegressor for each action class Model: def __init__(self, env, feature_transformer): self.env = env self.models = [] self.feature_transformer = feature_transformer for i in range(env.action_space.n): model = SGDRegressor(feature_transformer.dimensions) self.models.append(model) def predict(self, s): X = self.feature_transformer.transform(np.atleast_2d(s)) return np.array([m.predict(X)[0] for m in self.models]) def update(self, s, a, G): X = self.feature_transformer.transform(np.atleast_2d(s)) self.models[a].partial_fit(X, [G]) def sample_action(self, s, eps): if np.random.random() < eps: return self.env.action_space.sample() else: return np.argmax(self.predict(s)) def play_one(env, model, eps, gamma): observation = env.reset() done = False totalreward = 0 iters = 0 while not done and iters < 2000: # if we reach 2000, just quit, don't want this going forever # the 200 limit seems a bit early action = model.sample_action(observation, eps) prev_observation = observation observation, reward, done, info = env.step(action) if done: reward = -200 # update the model next = model.predict(observation) assert(len(next.shape) == 1) G = reward + gamma*np.max(next) model.update(prev_observation, action, G) if reward == 1: # if we changed the reward to -200 totalreward += reward iters += 1 return totalreward def plot_running_avg(totalrewards): N = len(totalrewards) running_avg = np.empty(N) for t in range(N): running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean() plt.plot(running_avg) plt.title("Running Average") plt.show() def main(): env = gym.make('CartPole-v0') ft = FeatureTransformer(env) model = Model(env, ft) gamma = 0.99 if 'monitor' in sys.argv: filename = os.path.basename(__file__).split('.')[0] monitor_dir = './' + filename + '_' + str(datetime.now()) env = wrappers.Monitor(env, monitor_dir) N = 500 totalrewards = np.empty(N) costs = np.empty(N) for n in range(N): eps = 1.0/np.sqrt(n+1) totalreward = play_one(env, model, eps, gamma) totalrewards[n] = totalreward if n % 100 == 0: print("episode:", n, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean()) print("avg reward for last 100 episodes:", totalrewards[-100:].mean()) print("total steps:", totalrewards.sum()) plt.plot(totalrewards) plt.title("Rewards") plt.show() plot_running_avg(totalrewards) if __name__ == '__main__': main()
mit
marc-sensenich/ansible
lib/ansible/modules/cloud/vmware/vmware_datastore_facts.py
32
11065
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com> # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_datastore_facts short_description: Gather facts about datastores available in given vCenter description: - This module can be used to gather facts about datastores in VMWare infrastructure. - All values and VMware object names are case sensitive. version_added: 2.5 author: - Tim Rightnour (@garbled1) notes: - Tested on vSphere 5.5, 6.0 and 6.5 requirements: - "python >= 2.6" - PyVmomi options: name: description: - Name of the datastore to match. - If set, facts of specific datastores are returned. required: False datacenter: description: - Datacenter to search for datastores. - This parameter is required, if C(cluster) is not supplied. required: False aliases: ['datacenter_name'] cluster: description: - Cluster to search for datastores. - If set, facts of datastores belonging this clusters will be returned. - This parameter is required, if C(datacenter) is not supplied. required: False gather_nfs_mount_info: description: - Gather mount information of NFS datastores. - Disabled per default because this slows down the execution if you have a lot of datastores. type: bool default: false version_added: 2.8 gather_vmfs_mount_info: description: - Gather mount information of VMFS datastores. - Disabled per default because this slows down the execution if you have a lot of datastores. type: bool default: false version_added: 2.8 extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Gather facts from standalone ESXi server having datacenter as 'ha-datacenter' vmware_datastore_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' validate_certs: no delegate_to: localhost register: facts - name: Gather facts from datacenter about specific datastore vmware_datastore_facts: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' name: datastore1 delegate_to: localhost register: facts ''' RETURN = """ datastores: description: metadata about the available datastores returned: always type: list sample: [ { "accessible": false, "capacity": 42681237504, "datastore_cluster": "datacluster0", "freeSpace": 39638269952, "maintenanceMode": "normal", "multipleHostAccess": false, "name": "datastore2", "provisioned": 12289211488, "type": "VMFS", "uncommitted": 9246243936, "url": "ds:///vmfs/volumes/5a69b18a-c03cd88c-36ae-5254001249ce/", "vmfs_blockSize": 1024, "vmfs_uuid": "5a69b18a-c03cd88c-36ae-5254001249ce", "vmfs_version": "6.81" }, { "accessible": true, "capacity": 5497558138880, "datastore_cluster": "datacluster0", "freeSpace": 4279000641536, "maintenanceMode": "normal", "multipleHostAccess": true, "name": "datastore3", "nfs_path": "/vol/datastore3", "nfs_server": "nfs_server1", "provisioned": 1708109410304, "type": "NFS", "uncommitted": 489551912960, "url": "ds:///vmfs/volumes/420b3e73-67070776/" }, ] """ try: from pyVmomi import vim except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import (PyVmomi, vmware_argument_spec, get_all_objs, find_cluster_by_name, get_parent_datacenter) class VMwareHostDatastore(PyVmomi): """ This class populates the datastore list """ def __init__(self, module): super(VMwareHostDatastore, self).__init__(module) self.gather_nfs_mount_info = self.module.params['gather_nfs_mount_info'] self.gather_vmfs_mount_info = self.module.params['gather_vmfs_mount_info'] def check_datastore_host(self, esxi_host, datastore): """ Get all datastores of specified ESXi host """ esxi = self.find_hostsystem_by_name(esxi_host) if esxi is None: self.module.fail_json(msg="Failed to find ESXi hostname %s " % esxi_host) storage_system = esxi.configManager.storageSystem host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo for host_mount_info in host_file_sys_vol_mount_info: if host_mount_info.volume.name == datastore: return host_mount_info return None def build_datastore_list(self, datastore_list): """ Build list with datastores """ datastores = list() for datastore in datastore_list: summary = datastore.summary datastore_summary = dict() datastore_summary['accessible'] = summary.accessible datastore_summary['capacity'] = summary.capacity datastore_summary['name'] = summary.name datastore_summary['freeSpace'] = summary.freeSpace datastore_summary['maintenanceMode'] = summary.maintenanceMode datastore_summary['multipleHostAccess'] = summary.multipleHostAccess datastore_summary['type'] = summary.type if self.gather_nfs_mount_info or self.gather_vmfs_mount_info: if self.gather_nfs_mount_info and summary.type.startswith("NFS"): # get mount info from the first ESXi host attached to this NFS datastore host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name) datastore_summary['nfs_server'] = host_mount_info.volume.remoteHost datastore_summary['nfs_path'] = host_mount_info.volume.remotePath if self.gather_vmfs_mount_info and summary.type == "VMFS": # get mount info from the first ESXi host attached to this VMFS datastore host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name) datastore_summary['vmfs_blockSize'] = host_mount_info.volume.blockSize datastore_summary['vmfs_version'] = host_mount_info.volume.version datastore_summary['vmfs_uuid'] = host_mount_info.volume.uuid # vcsim does not return uncommitted if not summary.uncommitted: summary.uncommitted = 0 datastore_summary['uncommitted'] = summary.uncommitted datastore_summary['url'] = summary.url # Calculated values datastore_summary['provisioned'] = summary.capacity - summary.freeSpace + summary.uncommitted datastore_summary['datastore_cluster'] = 'N/A' if isinstance(datastore.parent, vim.StoragePod): datastore_summary['datastore_cluster'] = datastore.parent.name if self.module.params['name']: if datastore_summary['name'] == self.module.params['name']: datastores.extend([datastore_summary]) else: datastores.extend([datastore_summary]) return datastores class PyVmomiCache(object): """ This class caches references to objects which are requested multiples times but not modified """ def __init__(self, content, dc_name=None): self.content = content self.dc_name = dc_name self.clusters = {} self.parent_datacenters = {} def get_all_objs(self, content, types, confine_to_datacenter=True): """ Wrapper around get_all_objs to set datacenter context """ objects = get_all_objs(content, types) if confine_to_datacenter: if hasattr(objects, 'items'): # resource pools come back as a dictionary for k, v in objects.items(): parent_dc = get_parent_datacenter(k) if parent_dc.name != self.dc_name: objects.pop(k, None) else: # everything else should be a list objects = [x for x in objects if get_parent_datacenter(x).name == self.dc_name] return objects class PyVmomiHelper(PyVmomi): """ This class gets datastores """ def __init__(self, module): super(PyVmomiHelper, self).__init__(module) self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter']) def lookup_datastore(self): """ Get datastore(s) per ESXi host or vCenter server """ datastores = self.cache.get_all_objs(self.content, [vim.Datastore], confine_to_datacenter=True) return datastores def lookup_datastore_by_cluster(self): """ Get datastore(s) per cluster """ cluster = find_cluster_by_name(self.content, self.params['cluster']) if not cluster: self.module.fail_json(msg='Failed to find cluster "%(cluster)s"' % self.params) c_dc = cluster.datastore return c_dc def main(): """ Main """ argument_spec = vmware_argument_spec() argument_spec.update( name=dict(type='str'), datacenter=dict(type='str', aliases=['datacenter_name']), cluster=dict(type='str'), gather_nfs_mount_info=dict(type='bool', default=False), gather_vmfs_mount_info=dict(type='bool', default=False) ) module = AnsibleModule(argument_spec=argument_spec, required_one_of=[ ['cluster', 'datacenter'], ], supports_check_mode=True ) result = dict(changed=False) pyv = PyVmomiHelper(module) if module.params['cluster']: dxs = pyv.lookup_datastore_by_cluster() else: dxs = pyv.lookup_datastore() vmware_host_datastore = VMwareHostDatastore(module) datastores = vmware_host_datastore.build_datastore_list(dxs) result['datastores'] = datastores # found a datastore if datastores: module.exit_json(**result) else: msg = "Unable to gather datastore facts" if module.params['name']: msg += " for %(name)s" % module.params msg += " in datacenter %(datacenter)s" % module.params module.fail_json(msg=msg) if __name__ == '__main__': main()
gpl-3.0
davidharrigan/django
django/apps/registry.py
170
16569
import sys import threading import warnings from collections import Counter, OrderedDict, defaultdict from functools import partial from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured from django.utils import lru_cache from .config import AppConfig class Apps(object): """ A registry that stores the configuration of installed applications. It also keeps track of models eg. to provide reverse-relations. """ def __init__(self, installed_apps=()): # installed_apps is set to None when creating the master registry # because it cannot be populated at that point. Other registries must # provide a list of installed apps and are populated immediately. if installed_apps is None and hasattr(sys.modules[__name__], 'apps'): raise RuntimeError("You must supply an installed_apps argument.") # Mapping of app labels => model names => model classes. Every time a # model is imported, ModelBase.__new__ calls apps.register_model which # creates an entry in all_models. All imported models are registered, # regardless of whether they're defined in an installed application # and whether the registry has been populated. Since it isn't possible # to reimport a module safely (it could reexecute initialization code) # all_models is never overridden or reset. self.all_models = defaultdict(OrderedDict) # Mapping of labels to AppConfig instances for installed apps. self.app_configs = OrderedDict() # Stack of app_configs. Used to store the current state in # set_available_apps and set_installed_apps. self.stored_app_configs = [] # Whether the registry is populated. self.apps_ready = self.models_ready = self.ready = False # Lock for thread-safe population. self._lock = threading.Lock() # Maps ("app_label", "modelname") tuples to lists of functions to be # called when the corresponding model is ready. Used by this class's # `lazy_model_operation()` and `do_pending_operations()` methods. self._pending_operations = defaultdict(list) # Populate apps and models, unless it's the master registry. if installed_apps is not None: self.populate(installed_apps) def populate(self, installed_apps=None): """ Loads application configurations and models. This method imports each application module and then each model module. It is thread safe and idempotent, but not reentrant. """ if self.ready: return # populate() might be called by two threads in parallel on servers # that create threads before initializing the WSGI callable. with self._lock: if self.ready: return # app_config should be pristine, otherwise the code below won't # guarantee that the order matches the order in INSTALLED_APPS. if self.app_configs: raise RuntimeError("populate() isn't reentrant") # Load app configs and app modules. for entry in installed_apps: if isinstance(entry, AppConfig): app_config = entry else: app_config = AppConfig.create(entry) if app_config.label in self.app_configs: raise ImproperlyConfigured( "Application labels aren't unique, " "duplicates: %s" % app_config.label) self.app_configs[app_config.label] = app_config # Check for duplicate app names. counts = Counter( app_config.name for app_config in self.app_configs.values()) duplicates = [ name for name, count in counts.most_common() if count > 1] if duplicates: raise ImproperlyConfigured( "Application names aren't unique, " "duplicates: %s" % ", ".join(duplicates)) self.apps_ready = True # Load models. for app_config in self.app_configs.values(): all_models = self.all_models[app_config.label] app_config.import_models(all_models) self.clear_cache() self.models_ready = True for app_config in self.get_app_configs(): app_config.ready() self.ready = True def check_apps_ready(self): """ Raises an exception if all apps haven't been imported yet. """ if not self.apps_ready: raise AppRegistryNotReady("Apps aren't loaded yet.") def check_models_ready(self): """ Raises an exception if all models haven't been imported yet. """ if not self.models_ready: raise AppRegistryNotReady("Models aren't loaded yet.") def get_app_configs(self): """ Imports applications and returns an iterable of app configs. """ self.check_apps_ready() return self.app_configs.values() def get_app_config(self, app_label): """ Imports applications and returns an app config for the given label. Raises LookupError if no application exists with this label. """ self.check_apps_ready() try: return self.app_configs[app_label] except KeyError: message = "No installed app with label '%s'." % app_label for app_config in self.get_app_configs(): if app_config.name == app_label: message += " Did you mean '%s'?" % app_config.label break raise LookupError(message) # This method is performance-critical at least for Django's test suite. @lru_cache.lru_cache(maxsize=None) def get_models(self, include_auto_created=False, include_deferred=False, include_swapped=False): """ Returns a list of all installed models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models created to satisfy deferred attribute queries, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. """ self.check_models_ready() result = [] for app_config in self.app_configs.values(): result.extend(list(app_config.get_models( include_auto_created, include_deferred, include_swapped))) return result def get_model(self, app_label, model_name=None): """ Returns the model matching the given app_label and model_name. As a shortcut, this function also accepts a single argument in the form <app_label>.<model_name>. model_name is case-insensitive. Raises LookupError if no application exists with this label, or no model exists with this name in the application. Raises ValueError if called with a single argument that doesn't contain exactly one dot. """ self.check_models_ready() if model_name is None: app_label, model_name = app_label.split('.') return self.get_app_config(app_label).get_model(model_name.lower()) def register_model(self, app_label, model): # Since this method is called when models are imported, it cannot # perform imports because of the risk of import loops. It mustn't # call get_app_config(). model_name = model._meta.model_name app_models = self.all_models[app_label] if model_name in app_models: if (model.__name__ == app_models[model_name].__name__ and model.__module__ == app_models[model_name].__module__): warnings.warn( "Model '%s.%s' was already registered. " "Reloading models is not advised as it can lead to inconsistencies, " "most notably with related models." % (app_label, model_name), RuntimeWarning, stacklevel=2) else: raise RuntimeError( "Conflicting '%s' models in application '%s': %s and %s." % (model_name, app_label, app_models[model_name], model)) app_models[model_name] = model self.do_pending_operations(model) self.clear_cache() def is_installed(self, app_name): """ Checks whether an application with this name exists in the registry. app_name is the full name of the app eg. 'django.contrib.admin'. """ self.check_apps_ready() return any(ac.name == app_name for ac in self.app_configs.values()) def get_containing_app_config(self, object_name): """ Look for an app config containing a given object. object_name is the dotted Python path to the object. Returns the app config for the inner application in case of nesting. Returns None if the object isn't in any registered app config. """ self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name):] if subpath == '' or subpath[0] == '.': candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] def get_registered_model(self, app_label, model_name): """ Similar to get_model(), but doesn't require that an app exists with the given app_label. It's safe to call this method at import time, even while the registry is being populated. """ model = self.all_models[app_label].get(model_name.lower()) if model is None: raise LookupError( "Model '%s.%s' not registered." % (app_label, model_name)) return model @lru_cache.lru_cache(maxsize=None) def get_swappable_settings_name(self, to_string): """ For a given model string (e.g. "auth.User"), return the name of the corresponding settings name if it refers to a swappable model. If the referred model is not swappable, return None. This method is decorated with lru_cache because it's performance critical when it comes to migrations. Since the swappable settings don't change after Django has loaded the settings, there is no reason to get the respective settings attribute over and over again. """ for model in self.get_models(include_swapped=True): swapped = model._meta.swapped # Is this model swapped out for the model given by to_string? if swapped and swapped == to_string: return model._meta.swappable # Is this model swappable and the one given by to_string? if model._meta.swappable and model._meta.label == to_string: return model._meta.swappable return None def set_available_apps(self, available): """ Restricts the set of installed apps used by get_app_config[s]. available must be an iterable of application names. set_available_apps() must be balanced with unset_available_apps(). Primarily used for performance optimization in TransactionTestCase. This method is safe is the sense that it doesn't trigger any imports. """ available = set(available) installed = set(app_config.name for app_config in self.get_app_configs()) if not available.issubset(installed): raise ValueError("Available apps isn't a subset of installed " "apps, extra apps: %s" % ", ".join(available - installed)) self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict( (label, app_config) for label, app_config in self.app_configs.items() if app_config.name in available) self.clear_cache() def unset_available_apps(self): """ Cancels a previous call to set_available_apps(). """ self.app_configs = self.stored_app_configs.pop() self.clear_cache() def set_installed_apps(self, installed): """ Enables a different set of installed apps for get_app_config[s]. installed must be an iterable in the same format as INSTALLED_APPS. set_installed_apps() must be balanced with unset_installed_apps(), even if it exits with an exception. Primarily used as a receiver of the setting_changed signal in tests. This method may trigger new imports, which may add new models to the registry of all imported models. They will stay in the registry even after unset_installed_apps(). Since it isn't possible to replay imports safely (eg. that could lead to registering listeners twice), models are registered when they're imported and never removed. """ if not self.ready: raise AppRegistryNotReady("App registry isn't ready yet.") self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict() self.apps_ready = self.models_ready = self.ready = False self.clear_cache() self.populate(installed) def unset_installed_apps(self): """ Cancels a previous call to set_installed_apps(). """ self.app_configs = self.stored_app_configs.pop() self.apps_ready = self.models_ready = self.ready = True self.clear_cache() def clear_cache(self): """ Clears all internal caches, for methods that alter the app registry. This is mostly used in tests. """ # Call expire cache on each model. This will purge # the relation tree and the fields cache. self.get_models.cache_clear() if self.ready: # Circumvent self.get_models() to prevent that the cache is refilled. # This particularly prevents that an empty value is cached while cloning. for app_config in self.app_configs.values(): for model in app_config.get_models(include_auto_created=True): model._meta._expire_cache() def lazy_model_operation(self, function, *model_keys): """ Take a function and a number of ("app_label", "modelname") tuples, and when all the corresponding models have been imported and registered, call the function with the model classes as its arguments. The function passed to this method must accept exactly n models as arguments, where n=len(model_keys). """ # If this function depends on more than one model, we recursively turn # it into a chain of functions that accept a single model argument and # pass each in turn to lazy_model_operation. model_key, more_models = model_keys[0], model_keys[1:] if more_models: supplied_fn = function def function(model): next_function = partial(supplied_fn, model) # Annotate the function with its field for retrieval in # migrations.state.StateApps. if getattr(supplied_fn, 'keywords', None): next_function.field = supplied_fn.keywords.get('field') self.lazy_model_operation(next_function, *more_models) # If the model is already loaded, pass it to the function immediately. # Otherwise, delay execution until the class is prepared. try: model_class = self.get_registered_model(*model_key) except LookupError: self._pending_operations[model_key].append(function) else: function(model_class) def do_pending_operations(self, model): """ Take a newly-prepared model and pass it to each function waiting for it. This is called at the very end of `Apps.register_model()`. """ key = model._meta.app_label, model._meta.model_name for function in self._pending_operations.pop(key, []): function(model) apps = Apps(installed_apps=None)
bsd-3-clause
Lh4cKg/sl4a
python/src/Lib/distutils/command/sdist.py
49
18452
"""distutils.command.sdist Implements the Distutils 'sdist' command (create a source distribution).""" # This module should be kept compatible with Python 2.1. __revision__ = "$Id: sdist.py 68968 2009-01-26 17:20:15Z tarek.ziade $" import os, string import sys from types import * from glob import glob from distutils.core import Command from distutils import dir_util, dep_util, file_util, archive_util from distutils.text_file import TextFile from distutils.errors import * from distutils.filelist import FileList from distutils import log def show_formats (): """Print all possible values for the 'formats' option (used by the "--help-formats" command-line option). """ from distutils.fancy_getopt import FancyGetopt from distutils.archive_util import ARCHIVE_FORMATS formats=[] for format in ARCHIVE_FORMATS.keys(): formats.append(("formats=" + format, None, ARCHIVE_FORMATS[format][2])) formats.sort() pretty_printer = FancyGetopt(formats) pretty_printer.print_help( "List of available source distribution formats:") class sdist (Command): description = "create a source distribution (tarball, zip file, etc.)" user_options = [ ('template=', 't', "name of manifest template file [default: MANIFEST.in]"), ('manifest=', 'm', "name of manifest file [default: MANIFEST]"), ('use-defaults', None, "include the default file set in the manifest " "[default; disable with --no-defaults]"), ('no-defaults', None, "don't include the default file set"), ('prune', None, "specifically exclude files/directories that should not be " "distributed (build tree, RCS/CVS dirs, etc.) " "[default; disable with --no-prune]"), ('no-prune', None, "don't automatically exclude anything"), ('manifest-only', 'o', "just regenerate the manifest and then stop " "(implies --force-manifest)"), ('force-manifest', 'f', "forcibly regenerate the manifest and carry on as usual"), ('formats=', None, "formats for source distribution (comma-separated list)"), ('keep-temp', 'k', "keep the distribution tree around after creating " + "archive file(s)"), ('dist-dir=', 'd', "directory to put the source distribution archive(s) in " "[default: dist]"), ] boolean_options = ['use-defaults', 'prune', 'manifest-only', 'force-manifest', 'keep-temp'] help_options = [ ('help-formats', None, "list available distribution formats", show_formats), ] negative_opt = {'no-defaults': 'use-defaults', 'no-prune': 'prune' } default_format = { 'posix': 'gztar', 'nt': 'zip' } def initialize_options (self): # 'template' and 'manifest' are, respectively, the names of # the manifest template and manifest file. self.template = None self.manifest = None # 'use_defaults': if true, we will include the default file set # in the manifest self.use_defaults = 1 self.prune = 1 self.manifest_only = 0 self.force_manifest = 0 self.formats = None self.keep_temp = 0 self.dist_dir = None self.archive_files = None def finalize_options (self): if self.manifest is None: self.manifest = "MANIFEST" if self.template is None: self.template = "MANIFEST.in" self.ensure_string_list('formats') if self.formats is None: try: self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError, \ "don't know how to create source distributions " + \ "on platform %s" % os.name bad_format = archive_util.check_archive_formats(self.formats) if bad_format: raise DistutilsOptionError, \ "unknown archive format '%s'" % bad_format if self.dist_dir is None: self.dist_dir = "dist" def run (self): # 'filelist' contains the list of files that will make up the # manifest self.filelist = FileList() # Ensure that all required meta-data is given; warn if not (but # don't die, it's not *that* serious!) self.check_metadata() # Do whatever it takes to get the list of files to process # (process the manifest template, read an existing manifest, # whatever). File list is accumulated in 'self.filelist'. self.get_file_list() # If user just wanted us to regenerate the manifest, stop now. if self.manifest_only: return # Otherwise, go ahead and create the source distribution tarball, # or zipfile, or whatever. self.make_distribution() def check_metadata (self): """Ensure that all required elements of meta-data (name, version, URL, (author and author_email) or (maintainer and maintainer_email)) are supplied by the Distribution object; warn if any are missing. """ metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: self.warn("missing required meta-data: " + string.join(missing, ", ")) if metadata.author: if not metadata.author_email: self.warn("missing meta-data: if 'author' supplied, " + "'author_email' must be supplied too") elif metadata.maintainer: if not metadata.maintainer_email: self.warn("missing meta-data: if 'maintainer' supplied, " + "'maintainer_email' must be supplied too") else: self.warn("missing meta-data: either (author and author_email) " + "or (maintainer and maintainer_email) " + "must be supplied") # check_metadata () def get_file_list (self): """Figure out the list of files to include in the source distribution, and put it in 'self.filelist'. This might involve reading the manifest template (and writing the manifest), or just reading the manifest, or just using the default file set -- it all depends on the user's options and the state of the filesystem. """ # If we have a manifest template, see if it's newer than the # manifest; if so, we'll regenerate the manifest. template_exists = os.path.isfile(self.template) if template_exists: template_newer = dep_util.newer(self.template, self.manifest) # The contents of the manifest file almost certainly depend on the # setup script as well as the manifest template -- so if the setup # script is newer than the manifest, we'll regenerate the manifest # from the template. (Well, not quite: if we already have a # manifest, but there's no template -- which will happen if the # developer elects to generate a manifest some other way -- then we # can't regenerate the manifest, so we don't.) self.debug_print("checking if %s newer than %s" % (self.distribution.script_name, self.manifest)) setup_newer = dep_util.newer(self.distribution.script_name, self.manifest) # cases: # 1) no manifest, template exists: generate manifest # (covered by 2a: no manifest == template newer) # 2) manifest & template exist: # 2a) template or setup script newer than manifest: # regenerate manifest # 2b) manifest newer than both: # do nothing (unless --force or --manifest-only) # 3) manifest exists, no template: # do nothing (unless --force or --manifest-only) # 4) no manifest, no template: generate w/ warning ("defaults only") manifest_outofdate = (template_exists and (template_newer or setup_newer)) force_regen = self.force_manifest or self.manifest_only manifest_exists = os.path.isfile(self.manifest) neither_exists = (not template_exists and not manifest_exists) # Regenerate the manifest if necessary (or if explicitly told to) if manifest_outofdate or neither_exists or force_regen: if not template_exists: self.warn(("manifest template '%s' does not exist " + "(using default file list)") % self.template) self.filelist.findall() if self.use_defaults: self.add_defaults() if template_exists: self.read_template() if self.prune: self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() # Don't regenerate the manifest, just read it in. else: self.read_manifest() # get_file_list () def add_defaults (self): """Add all the default files to self.filelist: - README or README.txt - setup.py - test/test*.py - all pure Python modules mentioned in setup script - all C sources listed as part of extensions or C libraries in the setup script (doesn't catch C headers!) Warns if (README or README.txt) or setup.py are missing; everything else is optional. """ standards = [('README', 'README.txt'), self.distribution.script_name] for fn in standards: if type(fn) is TupleType: alts = fn got_it = 0 for fn in alts: if os.path.exists(fn): got_it = 1 self.filelist.append(fn) break if not got_it: self.warn("standard file not found: should have one of " + string.join(alts, ', ')) else: if os.path.exists(fn): self.filelist.append(fn) else: self.warn("standard file '%s' not found" % fn) optional = ['test/test*.py', 'setup.cfg'] for pattern in optional: files = filter(os.path.isfile, glob(pattern)) if files: self.filelist.extend(files) if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') self.filelist.extend(build_py.get_source_files()) if self.distribution.has_ext_modules(): build_ext = self.get_finalized_command('build_ext') self.filelist.extend(build_ext.get_source_files()) if self.distribution.has_c_libraries(): build_clib = self.get_finalized_command('build_clib') self.filelist.extend(build_clib.get_source_files()) if self.distribution.has_scripts(): build_scripts = self.get_finalized_command('build_scripts') self.filelist.extend(build_scripts.get_source_files()) # add_defaults () def read_template (self): """Read and parse manifest template file named by self.template. (usually "MANIFEST.in") The parsing and processing is done by 'self.filelist', which updates itself accordingly. """ log.info("reading manifest template '%s'", self.template) template = TextFile(self.template, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1, collapse_join=1) while 1: line = template.readline() if line is None: # end of file break try: self.filelist.process_template_line(line) except DistutilsTemplateError, msg: self.warn("%s, line %d: %s" % (template.filename, template.current_line, msg)) # read_template () def prune_file_list (self): """Prune off branches that might slip into the file list as created by 'read_template()', but really don't belong there: * the build tree (typically "build") * the release tree itself (only an issue if we ran "sdist" previously with --keep-temp, or it aborted) * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories """ build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() self.filelist.exclude_pattern(None, prefix=build.build_base) self.filelist.exclude_pattern(None, prefix=base_dir) # pruning out vcs directories # both separators are used under win32 if sys.platform == 'win32': seps = r'/|\\' else: seps = '/' vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', '_darcs'] vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps) self.filelist.exclude_pattern(vcs_ptrn, is_regex=1) def write_manifest (self): """Write the file list in 'self.filelist' (presumably as filled in by 'add_defaults()' and 'read_template()') to the manifest file named by 'self.manifest'. """ self.execute(file_util.write_file, (self.manifest, self.filelist.files), "writing manifest file '%s'" % self.manifest) # write_manifest () def read_manifest (self): """Read the manifest file (named by 'self.manifest') and use it to fill in 'self.filelist', the list of files to include in the source distribution. """ log.info("reading manifest file '%s'", self.manifest) manifest = open(self.manifest) while 1: line = manifest.readline() if line == '': # end of file break if line[-1] == '\n': line = line[0:-1] self.filelist.append(line) manifest.close() # read_manifest () def make_release_tree (self, base_dir, files): """Create the directory tree that will become the source distribution archive. All directories implied by the filenames in 'files' are created under 'base_dir', and then we hard link or copy (if hard linking is unavailable) those files into place. Essentially, this duplicates the developer's source tree, but in a directory named after the distribution, containing only the files to be distributed. """ # Create all the directories under 'base_dir' necessary to # put 'files' there; the 'mkpath()' is just so we don't die # if the manifest happens to be empty. self.mkpath(base_dir) dir_util.create_tree(base_dir, files, dry_run=self.dry_run) # And walk over the list of files, either making a hard link (if # os.link exists) to each one that doesn't already exist in its # corresponding location under 'base_dir', or copying each file # that's out-of-date in 'base_dir'. (Usually, all files will be # out-of-date, because by default we blow away 'base_dir' when # we're done making the distribution archives.) if hasattr(os, 'link'): # can make hard links on this system link = 'hard' msg = "making hard links in %s..." % base_dir else: # nope, have to copy link = None msg = "copying files to %s..." % base_dir if not files: log.warn("no files to distribute -- empty manifest?") else: log.info(msg) for file in files: if not os.path.isfile(file): log.warn("'%s' not a regular file -- skipping" % file) else: dest = os.path.join(base_dir, file) self.copy_file(file, dest, link=link) self.distribution.metadata.write_pkg_info(base_dir) # make_release_tree () def make_distribution (self): """Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'. """ # Don't warn about missing meta-data here -- should be (and is!) # done elsewhere. base_dir = self.distribution.get_fullname() base_name = os.path.join(self.dist_dir, base_dir) self.make_release_tree(base_dir, self.filelist.files) archive_files = [] # remember names of files we create # tar archive must be created last to avoid overwrite and remove if 'tar' in self.formats: self.formats.append(self.formats.pop(self.formats.index('tar'))) for fmt in self.formats: file = self.make_archive(base_name, fmt, base_dir=base_dir) archive_files.append(file) self.distribution.dist_files.append(('sdist', '', file)) self.archive_files = archive_files if not self.keep_temp: dir_util.remove_tree(base_dir, dry_run=self.dry_run) def get_archive_files (self): """Return the list of archive files created when the command was run, or None if the command hasn't run yet. """ return self.archive_files # class sdist
apache-2.0
fdvarela/odoo8
addons/l10n_th/__openerp__.py
170
1533
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Thailand - Accounting', 'version': '1.0', 'category': 'Localization/Account Charts', 'description': """ Chart of Accounts for Thailand. =============================== Thai accounting chart and localization. """, 'author': 'Almacom', 'website': 'http://almacom.co.th/', 'depends': ['account_chart'], 'data': [ 'account_data.xml' ], 'installable': True, 'images': ['images/config_chart_l10n_th.jpeg','images/l10n_th_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
lucc/alot
tests/account_test.py
1
6562
# encoding=utf-8 # Copyright © 2017 Dylan Baker # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from alot import account from . import utilities class _AccountTestClass(account.Account): """Implements stubs for ABC methods.""" def send_mail(self, mail): pass class TestAccount(unittest.TestCase): """Tests for the Account class.""" def test_get_address(self): """Tests address without aliases.""" acct = _AccountTestClass(address="foo@example.com") self.assertListEqual(acct.get_addresses(), ['foo@example.com']) def test_get_address_with_aliases(self): """Tests address with aliases.""" acct = _AccountTestClass(address="foo@example.com", aliases=['bar@example.com']) self.assertListEqual(acct.get_addresses(), ['foo@example.com', 'bar@example.com']) def test_deprecated_encrypt_by_default(self): """Tests that depreacted values are still accepted.""" for each in ['true', 'yes', '1']: acct = _AccountTestClass(address='foo@example.com', encrypt_by_default=each) self.assertEqual(acct.encrypt_by_default, 'all') for each in ['false', 'no', '0']: acct = _AccountTestClass(address='foo@example.com', encrypt_by_default=each) self.assertEqual(acct.encrypt_by_default, 'none') class TestAddress(unittest.TestCase): """Tests for the Address class.""" def test_from_string(self): addr = account.Address.from_string('user@example.com') self.assertEqual(addr.username, 'user') self.assertEqual(addr.domainname, 'example.com') def test_str(self): addr = account.Address('ušer', 'example.com') self.assertEqual(str(addr), 'ušer@example.com') def test_eq_unicode(self): addr = account.Address('ušer', 'example.com') self.assertEqual(addr, 'ušer@example.com') def test_eq_address(self): addr = account.Address('ušer', 'example.com') addr2 = account.Address('ušer', 'example.com') self.assertEqual(addr, addr2) def test_ne_unicode(self): addr = account.Address('ušer', 'example.com') self.assertNotEqual(addr, 'user@example.com') def test_ne_address(self): addr = account.Address('ušer', 'example.com') addr2 = account.Address('user', 'example.com') self.assertNotEqual(addr, addr2) def test_eq_unicode_case(self): addr = account.Address('UŠer', 'example.com') self.assertEqual(addr, 'ušer@example.com') def test_ne_unicode_case(self): addr = account.Address('ušer', 'example.com') self.assertEqual(addr, 'uŠer@example.com') def test_ne_address_case(self): addr = account.Address('ušer', 'example.com') addr2 = account.Address('uŠer', 'example.com') self.assertEqual(addr, addr2) def test_eq_address_case(self): addr = account.Address('UŠer', 'example.com') addr2 = account.Address('ušer', 'example.com') self.assertEqual(addr, addr2) def test_eq_unicode_case_sensitive(self): addr = account.Address('UŠer', 'example.com', case_sensitive=True) self.assertNotEqual(addr, 'ušer@example.com') def test_eq_address_case_sensitive(self): addr = account.Address('UŠer', 'example.com', case_sensitive=True) addr2 = account.Address('ušer', 'example.com') self.assertNotEqual(addr, addr2) def test_eq_str(self): addr = account.Address('user', 'example.com', case_sensitive=True) with self.assertRaises(TypeError): addr == 1 # pylint: disable=pointless-statement def test_ne_str(self): addr = account.Address('user', 'example.com', case_sensitive=True) with self.assertRaises(TypeError): addr != 1 # pylint: disable=pointless-statement def test_repr(self): addr = account.Address('user', 'example.com', case_sensitive=True) self.assertEqual( repr(addr), "Address('user', 'example.com', case_sensitive=True)") def test_domain_name_ne(self): addr = account.Address('user', 'example.com') self.assertNotEqual(addr, 'user@example.org') def test_domain_name_eq_case(self): addr = account.Address('user', 'example.com') self.assertEqual(addr, 'user@Example.com') def test_domain_name_ne_unicode(self): addr = account.Address('user', 'éxample.com') self.assertNotEqual(addr, 'user@example.com') def test_domain_name_eq_unicode(self): addr = account.Address('user', 'éxample.com') self.assertEqual(addr, 'user@Éxample.com') def test_domain_name_eq_case_sensitive(self): addr = account.Address('user', 'example.com', case_sensitive=True) self.assertEqual(addr, 'user@Example.com') def test_domain_name_eq_unicode_sensitive(self): addr = account.Address('user', 'éxample.com', case_sensitive=True) self.assertEqual(addr, 'user@Éxample.com') def test_cmp_empty(self): addr = account.Address('user', 'éxample.com') self.assertNotEqual(addr, '') class TestSend(unittest.TestCase): @utilities.async_test async def test_logs_on_success(self): a = account.SendmailAccount(address="test@alot.dev", cmd="true") with self.assertLogs() as cm: await a.send_mail("some text") #self.assertIn(cm.output, "sent mail successfullya") self.assertIn("INFO:root:sent mail successfully", cm.output) @utilities.async_test async def test_failing_sendmail_command_is_noticed(self): a = account.SendmailAccount(address="test@alot.dev", cmd="false") with self.assertRaises(account.SendingMailFailed): await a.send_mail("some text")
gpl-3.0
uglyboxer/linear_neuron
net-p3/lib/python3.5/site-packages/sklearn/metrics/tests/test_score_objects.py
2
13890
import pickle import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_not_equal from sklearn.base import BaseEstimator from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score, log_loss, precision_score, recall_score) from sklearn.metrics.cluster import adjusted_rand_score from sklearn.metrics.scorer import (check_scoring, _PredictScorer, _passthrough_scorer) from sklearn.metrics import make_scorer, get_scorer, SCORERS from sklearn.svm import LinearSVC from sklearn.pipeline import make_pipeline from sklearn.cluster import KMeans from sklearn.dummy import DummyRegressor from sklearn.linear_model import Ridge, LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import make_blobs from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from sklearn.datasets import load_diabetes from sklearn.cross_validation import train_test_split, cross_val_score from sklearn.grid_search import GridSearchCV from sklearn.multiclass import OneVsRestClassifier REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error'] CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro', 'roc_auc', 'average_precision', 'precision', 'precision_weighted', 'precision_macro', 'precision_micro', 'recall', 'recall_weighted', 'recall_macro', 'recall_micro', 'log_loss', 'adjusted_rand_score' # not really, but works ] MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples'] class EstimatorWithoutFit(object): """Dummy estimator to test check_scoring""" pass class EstimatorWithFit(BaseEstimator): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self class EstimatorWithFitAndScore(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self def score(self, X, y): return 1.0 class EstimatorWithFitAndPredict(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): self.y = y return self def predict(self, X): return self.y class DummyScorer(object): """Dummy scorer that always returns 1.""" def __call__(self, est, X, y): return 1 def test_check_scoring(): # Test all branches of check_scoring estimator = EstimatorWithoutFit() pattern = (r"estimator should a be an estimator implementing 'fit' method," r" .* was passed") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) estimator = EstimatorWithFitAndScore() estimator.fit([[1]], [1]) scorer = check_scoring(estimator) assert_true(scorer is _passthrough_scorer) assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFitAndPredict() estimator.fit([[1]], [1]) pattern = (r"If no scoring is specified, the estimator passed should have" r" a 'score' method\. The estimator .* does not\.") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) scorer = check_scoring(estimator, "accuracy") assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFit() scorer = check_scoring(estimator, "accuracy") assert_true(isinstance(scorer, _PredictScorer)) estimator = EstimatorWithFit() scorer = check_scoring(estimator, allow_none=True) assert_true(scorer is None) def test_check_scoring_gridsearchcv(): # test that check_scoring works on GridSearchCV and pipeline. # slightly redundant non-regression test. grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]}) scorer = check_scoring(grid, "f1") assert_true(isinstance(scorer, _PredictScorer)) pipe = make_pipeline(LinearSVC()) scorer = check_scoring(pipe, "f1") assert_true(isinstance(scorer, _PredictScorer)) # check that cross_val_score definitely calls the scorer # and doesn't make any assumptions about the estimator apart from having a # fit. scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer()) assert_array_equal(scores, 1) def test_make_scorer(): # Sanity check on the make_scorer factory function. f = lambda *args: 0 assert_raises(ValueError, make_scorer, f, needs_threshold=True, needs_proba=True) def test_classification_scores(): # Test classification scorers. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LinearSVC(random_state=0) clf.fit(X_train, y_train) for prefix, metric in [('f1', f1_score), ('precision', precision_score), ('recall', recall_score)]: score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='weighted') assert_almost_equal(score1, score2) score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='macro') assert_almost_equal(score1, score2) score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='micro') assert_almost_equal(score1, score2) score1 = get_scorer('%s' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=1) assert_almost_equal(score1, score2) # test fbeta score that takes an argument scorer = make_scorer(fbeta_score, beta=2) score1 = scorer(clf, X_test, y_test) score2 = fbeta_score(y_test, clf.predict(X_test), beta=2) assert_almost_equal(score1, score2) # test that custom scorer can be pickled unpickled_scorer = pickle.loads(pickle.dumps(scorer)) score3 = unpickled_scorer(clf, X_test, y_test) assert_almost_equal(score1, score3) # smoke test the repr: repr(fbeta_score) def test_regression_scorers(): # Test regression scorers. diabetes = load_diabetes() X, y = diabetes.data, diabetes.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = Ridge() clf.fit(X_train, y_train) score1 = get_scorer('r2')(clf, X_test, y_test) score2 = r2_score(y_test, clf.predict(X_test)) assert_almost_equal(score1, score2) def test_thresholded_scorers(): # Test scorers that take thresholds. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LogisticRegression(random_state=0) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) assert_almost_equal(score1, score3) logscore = get_scorer('log_loss')(clf, X_test, y_test) logloss = log_loss(y_test, clf.predict_proba(X_test)) assert_almost_equal(-logscore, logloss) # same for an estimator without decision_function clf = DecisionTreeClassifier() clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) # Test that an exception is raised on more than two classes X, y = make_blobs(random_state=0, centers=3) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf.fit(X_train, y_train) assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test) def test_thresholded_scorers_multilabel_indicator_data(): # Test that the scorer work with multilabel-indicator format # for multilabel and multi-output multi-class classifier X, y = make_multilabel_classification(return_indicator=True, allow_unlabeled=False, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Multi-output multi-class predict_proba clf = DecisionTreeClassifier() clf.fit(X_train, y_train) y_proba = clf.predict_proba(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T) assert_almost_equal(score1, score2) # Multi-output multi-class decision_function # TODO Is there any yet? clf = DecisionTreeClassifier() clf.fit(X_train, y_train) clf._predict_proba = clf.predict_proba clf.predict_proba = None clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)] y_proba = clf.decision_function(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T) assert_almost_equal(score1, score2) # Multilabel predict_proba clf = OneVsRestClassifier(DecisionTreeClassifier()) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)) assert_almost_equal(score1, score2) # Multilabel decision function clf = OneVsRestClassifier(LinearSVC(random_state=0)) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) assert_almost_equal(score1, score2) def test_unsupervised_scorers(): # Test clustering scorers against gold standard labeling. # We don't have any real unsupervised Scorers yet. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) km = KMeans(n_clusters=3) km.fit(X_train) score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test) score2 = adjusted_rand_score(y_test, km.predict(X_test)) assert_almost_equal(score1, score2) @ignore_warnings def test_raises_on_score_list(): # Test that when a list of scores is returned, we raise proper errors. X, y = make_blobs(random_state=0) f1_scorer_no_average = make_scorer(f1_score, average=None) clf = DecisionTreeClassifier() assert_raises(ValueError, cross_val_score, clf, X, y, scoring=f1_scorer_no_average) grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average, param_grid={'max_depth': [1, 2]}) assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_scorer_sample_weight(): # Test that scorers support sample_weight or raise sensible errors # Unlike the metrics invariance test, in the scorer case it's harder # to ensure that, on the classifier output, weighted and unweighted # scores really should be unequal. X, y = make_classification(random_state=0) _, y_ml = make_multilabel_classification(n_samples=X.shape[0], return_indicator=True, random_state=0) split = train_test_split(X, y, y_ml, random_state=0) X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split sample_weight = np.ones_like(y_test) sample_weight[:10] = 0 # get sensible estimators for each metric sensible_regr = DummyRegressor(strategy='median') sensible_regr.fit(X_train, y_train) sensible_clf = DecisionTreeClassifier(random_state=0) sensible_clf.fit(X_train, y_train) sensible_ml_clf = DecisionTreeClassifier(random_state=0) sensible_ml_clf.fit(X_train, y_ml_train) estimator = dict([(name, sensible_regr) for name in REGRESSION_SCORERS] + [(name, sensible_clf) for name in CLF_SCORERS] + [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]) for name, scorer in SCORERS.items(): if name in MULTILABEL_ONLY_SCORERS: target = y_ml_test else: target = y_test try: weighted = scorer(estimator[name], X_test, target, sample_weight=sample_weight) ignored = scorer(estimator[name], X_test[10:], target[10:]) unweighted = scorer(estimator[name], X_test, target) assert_not_equal(weighted, unweighted, msg="scorer {0} behaves identically when " "called with sample weights: {1} vs " "{2}".format(name, weighted, unweighted)) assert_almost_equal(weighted, ignored, err_msg="scorer {0} behaves differently when " "ignoring samples and setting sample_weight to" " 0: {1} vs {2}".format(name, weighted, ignored)) except TypeError as e: assert_true("sample_weight" in str(e), "scorer {0} raises unhelpful exception when called " "with sample weights: {1}".format(name, str(e)))
mit
HerlanAssis/Django-AulaOsvandoSantana
lib/python2.7/site-packages/django/utils/safestring.py
478
4414
""" Functions for working with "safe strings": strings that can be displayed safely without further escaping in HTML. Marking something as a "safe string" means that the producer of the string has already turned characters that should not be interpreted by the HTML engine (e.g. '<') into the appropriate entities. """ from django.utils import six from django.utils.functional import Promise, curry class EscapeData(object): pass class EscapeBytes(bytes, EscapeData): """ A byte string that should be HTML-escaped when output. """ pass class EscapeText(six.text_type, EscapeData): """ A unicode string object that should be HTML-escaped when output. """ pass if six.PY3: EscapeString = EscapeText else: EscapeString = EscapeBytes # backwards compatibility for Python 2 EscapeUnicode = EscapeText class SafeData(object): def __html__(self): """ Returns the html representation of a string for interoperability. This allows other template engines to understand Django's SafeData. """ return self class SafeBytes(bytes, SafeData): """ A bytes subclass that has been specifically marked as "safe" (requires no further escaping) for HTML output purposes. """ def __add__(self, rhs): """ Concatenating a safe byte string with another safe byte string or safe unicode string is safe. Otherwise, the result is no longer safe. """ t = super(SafeBytes, self).__add__(rhs) if isinstance(rhs, SafeText): return SafeText(t) elif isinstance(rhs, SafeBytes): return SafeBytes(t) return t def _proxy_method(self, *args, **kwargs): """ Wrap a call to a normal unicode method up so that we return safe results. The method that is being wrapped is passed in the 'method' argument. """ method = kwargs.pop('method') data = method(self, *args, **kwargs) if isinstance(data, bytes): return SafeBytes(data) else: return SafeText(data) decode = curry(_proxy_method, method=bytes.decode) class SafeText(six.text_type, SafeData): """ A unicode (Python 2) / str (Python 3) subclass that has been specifically marked as "safe" for HTML output purposes. """ def __add__(self, rhs): """ Concatenating a safe unicode string with another safe byte string or safe unicode string is safe. Otherwise, the result is no longer safe. """ t = super(SafeText, self).__add__(rhs) if isinstance(rhs, SafeData): return SafeText(t) return t def _proxy_method(self, *args, **kwargs): """ Wrap a call to a normal unicode method up so that we return safe results. The method that is being wrapped is passed in the 'method' argument. """ method = kwargs.pop('method') data = method(self, *args, **kwargs) if isinstance(data, bytes): return SafeBytes(data) else: return SafeText(data) encode = curry(_proxy_method, method=six.text_type.encode) if six.PY3: SafeString = SafeText else: SafeString = SafeBytes # backwards compatibility for Python 2 SafeUnicode = SafeText def mark_safe(s): """ Explicitly mark a string as safe for (HTML) output purposes. The returned object can be used everywhere a string or unicode object is appropriate. Can be called multiple times on a single string. """ if hasattr(s, '__html__'): return s if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes): return SafeBytes(s) if isinstance(s, (six.text_type, Promise)): return SafeText(s) return SafeString(str(s)) def mark_for_escaping(s): """ Explicitly mark a string as requiring HTML escaping upon output. Has no effect on SafeData subclasses. Can be called multiple times on a single string (the resulting escaping is only applied once). """ if hasattr(s, '__html__') or isinstance(s, EscapeData): return s if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes): return EscapeBytes(s) if isinstance(s, (six.text_type, Promise)): return EscapeText(s) return EscapeString(str(s))
mit
Chilledheart/chromium
content/test/gpu/gpu_tests/webgl_robustness.py
12
2628
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry import benchmark from telemetry.page import page from telemetry.page import page_test from telemetry.story import story_set as story_set_module from webgl_conformance import conformance_harness_script from webgl_conformance import conformance_path from webgl_conformance import WebglConformanceValidator robustness_harness_script = conformance_harness_script + r""" var robustnessTestHarness = {}; robustnessTestHarness._contextLost = false; robustnessTestHarness.initialize = function() { var canvas = document.getElementById('example'); canvas.addEventListener('webglcontextlost', function() { robustnessTestHarness._contextLost = true; }); } robustnessTestHarness.runTestLoop = function() { // Run the test in a loop until the context is lost. main(); if (!robustnessTestHarness._contextLost) window.requestAnimationFrame(robustnessTestHarness.runTestLoop); else robustnessTestHarness.notifyFinished(); } robustnessTestHarness.notifyFinished = function() { // The test may fail in unpredictable ways depending on when the context is // lost. We ignore such errors and only require that the browser doesn't // crash. webglTestHarness._allTestSucceeded = true; // Notify test completion after a delay to make sure the browser is able to // recover from the lost context. setTimeout(webglTestHarness.notifyFinished, 3000); } window.confirm = function() { robustnessTestHarness.initialize(); robustnessTestHarness.runTestLoop(); return false; } window.webglRobustnessTestHarness = robustnessTestHarness; """ class WebglRobustnessPage(page.Page): def __init__(self, story_set, base_dir): super(WebglRobustnessPage, self).__init__( url='file://extra/lots-of-polys-example.html', page_set=story_set, base_dir=base_dir) self.script_to_evaluate_on_commit = robustness_harness_script def RunNavigateSteps(self, action_runner): super(WebglRobustnessPage, self).RunNavigateSteps(action_runner) action_runner.WaitForJavaScriptCondition('webglTestHarness._finished') class WebglRobustness(benchmark.Benchmark): test = WebglConformanceValidator @classmethod def Name(cls): return 'webgl_robustness' def CreateStorySet(self, options): ps = story_set_module.StorySet( base_dir=conformance_path, serving_dirs=['']) ps.AddStory(WebglRobustnessPage(ps, ps.base_dir)) return ps
bsd-3-clause
Lab603/PicEncyclopedias
jni-build/jni-build/jni/include/tensorflow/contrib/distributions/python/kernel_tests/operator_pd_cholesky_test.py
5
10912
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky distributions = tf.contrib.distributions def softplus(x): return np.log(1 + np.exp(x)) class OperatorPDCholeskyTest(tf.test.TestCase): def setUp(self): self._rng = np.random.RandomState(42) def _random_cholesky_array(self, shape): mat = self._rng.rand(*shape) chol = distributions.batch_matrix_diag_transform(mat, transform=tf.nn.softplus) # Zero the upper triangle because we're using this as a true Cholesky factor # in our tests. return tf.batch_matrix_band_part(chol, -1, 0).eval() def test_log_det(self): with self.test_session(): batch_shape = () for k in [1, 4]: chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) log_det = operator.log_det() expected_log_det = np.log(np.prod(np.diag(chol))**2) self.assertEqual(batch_shape, log_det.get_shape()) self.assertAllClose(expected_log_det, log_det.eval()) def test_log_det_batch_matrix(self): with self.test_session(): batch_shape = (2, 3) for k in [1, 4]: chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) log_det = operator.log_det() self.assertEqual(batch_shape, log_det.get_shape()) # Test the log-determinant of the [1, 1] matrix. chol_11 = chol[1, 1, :, :] expected_log_det = np.log(np.prod(np.diag(chol_11))**2) self.assertAllClose(expected_log_det, log_det.eval()[1, 1]) def test_sqrt_matmul_single_matrix(self): with self.test_session(): batch_shape = () for k in [1, 4]: x_shape = batch_shape + (k, 3) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) sqrt_operator_times_x = operator.sqrt_matmul(x) expected = tf.batch_matmul(chol, x) self.assertEqual(expected.get_shape(), sqrt_operator_times_x.get_shape()) self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval()) def test_sqrt_matmul_batch_matrix(self): with self.test_session(): batch_shape = (2, 3) for k in [1, 4]: x_shape = batch_shape + (k, 5) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) sqrt_operator_times_x = operator.sqrt_matmul(x) expected = tf.batch_matmul(chol, x) self.assertEqual(expected.get_shape(), sqrt_operator_times_x.get_shape()) self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval()) def test_sqrt_matmul_batch_matrix_with_transpose(self): with self.test_session(): batch_shape = (2, 3) for k in [1, 4]: x_shape = batch_shape + (5, k) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) sqrt_operator_times_x = operator.sqrt_matmul(x, transpose_x=True) # tf.batch_matmul is defined x * y, so "y" is on the right, not "x". expected = tf.batch_matmul(chol, x, adj_y=True) self.assertEqual(expected.get_shape(), sqrt_operator_times_x.get_shape()) self.assertAllClose(expected.eval(), sqrt_operator_times_x.eval()) def test_matmul_single_matrix(self): with self.test_session(): batch_shape = () for k in [1, 4]: x_shape = batch_shape + (k, 5) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) matrix = tf.batch_matmul(chol, chol, adj_y=True) operator = operator_pd_cholesky.OperatorPDCholesky(chol) expected = tf.batch_matmul(matrix, x) self.assertEqual(expected.get_shape(), operator.matmul(x).get_shape()) self.assertAllClose(expected.eval(), operator.matmul(x).eval()) def test_matmul_batch_matrix(self): with self.test_session(): batch_shape = (2, 3) for k in [1, 4]: x_shape = batch_shape + (k, 5) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) matrix = tf.batch_matmul(chol, chol, adj_y=True) operator = operator_pd_cholesky.OperatorPDCholesky(chol) expected = tf.batch_matmul(matrix, x) self.assertEqual(expected.get_shape(), operator.matmul(x).get_shape()) self.assertAllClose(expected.eval(), operator.matmul(x).eval()) def test_matmul_batch_matrix_with_transpose(self): with self.test_session(): batch_shape = (2, 3) for k in [1, 4]: x_shape = batch_shape + (5, k) x = self._rng.rand(*x_shape) chol_shape = batch_shape + (k, k) chol = self._random_cholesky_array(chol_shape) matrix = tf.batch_matmul(chol, chol, adj_y=True) operator = operator_pd_cholesky.OperatorPDCholesky(chol) operator_times_x = operator.matmul(x, transpose_x=True) # tf.batch_matmul is defined x * y, so "y" is on the right, not "x". expected = tf.batch_matmul(matrix, x, adj_y=True) self.assertEqual(expected.get_shape(), operator_times_x.get_shape()) self.assertAllClose(expected.eval(), operator_times_x.eval()) def test_shape(self): # All other shapes are defined by the abstractmethod shape, so we only need # to test this. with self.test_session(): for shape in [(3, 3), (2, 3, 3), (1, 2, 3, 3)]: chol = self._random_cholesky_array(shape) operator = operator_pd_cholesky.OperatorPDCholesky(chol) self.assertAllEqual(shape, operator.shape().eval()) def test_to_dense(self): with self.test_session(): chol = self._random_cholesky_array((3, 3)) chol_2 = chol.copy() chol_2[0, 2] = 1000 # Make sure upper triangular part makes no diff. operator = operator_pd_cholesky.OperatorPDCholesky(chol_2) self.assertAllClose(chol.dot(chol.T), operator.to_dense().eval()) def test_sqrt_to_dense(self): with self.test_session(): chol = self._random_cholesky_array((2, 3, 3)) chol_2 = chol.copy() chol_2[0, 0, 2] = 1000 # Make sure upper triangular part makes no diff. operator = operator_pd_cholesky.OperatorPDCholesky(chol_2) self.assertAllClose(chol, operator.sqrt_to_dense().eval()) def test_non_positive_definite_matrix_raises(self): # Singlular matrix with one positive eigenvalue and one zero eigenvalue. with self.test_session(): lower_mat = [[1.0, 0.0], [2.0, 0.0]] operator = operator_pd_cholesky.OperatorPDCholesky(lower_mat) with self.assertRaisesOpError('x > 0 did not hold'): operator.to_dense().eval() def test_non_positive_definite_matrix_does_not_raise_if_not_verify_pd(self): # Singlular matrix with one positive eigenvalue and one zero eigenvalue. with self.test_session(): lower_mat = [[1.0, 0.0], [2.0, 0.0]] operator = operator_pd_cholesky.OperatorPDCholesky( lower_mat, verify_pd=False) operator.to_dense().eval() # Should not raise. def test_not_having_two_identical_last_dims_raises(self): # Unless the last two dims are equal, this cannot represent a matrix, and it # should raise. with self.test_session(): batch_vec = [[1.0], [2.0]] # shape 2 x 1 with self.assertRaisesRegexp(ValueError, '.*Dimensions.*'): operator = operator_pd_cholesky.OperatorPDCholesky(batch_vec) operator.to_dense().eval() class BatchMatrixDiagTransformTest(tf.test.TestCase): def setUp(self): self._rng = np.random.RandomState(0) def check_off_diagonal_same(self, m1, m2): """Check the lower triangular part, not upper or diag.""" self.assertAllClose(np.tril(m1, k=-1), np.tril(m2, k=-1)) self.assertAllClose(np.triu(m1, k=1), np.triu(m2, k=1)) def test_non_batch_matrix_with_transform(self): mat = self._rng.rand(4, 4) with self.test_session(): chol = distributions.batch_matrix_diag_transform(mat, transform=tf.nn.softplus) self.assertEqual((4, 4), chol.get_shape()) self.check_off_diagonal_same(mat, chol.eval()) self.assertAllClose(softplus(np.diag(mat)), np.diag(chol.eval())) def test_non_batch_matrix_no_transform(self): mat = self._rng.rand(4, 4) with self.test_session(): # Default is no transform. chol = distributions.batch_matrix_diag_transform(mat) self.assertEqual((4, 4), chol.get_shape()) self.assertAllClose(mat, chol.eval()) def test_batch_matrix_with_transform(self): mat = self._rng.rand(2, 4, 4) mat_0 = mat[0, :, :] with self.test_session(): chol = distributions.batch_matrix_diag_transform(mat, transform=tf.nn.softplus) self.assertEqual((2, 4, 4), chol.get_shape()) chol_0 = chol.eval()[0, :, :] self.check_off_diagonal_same(mat_0, chol_0) self.assertAllClose(softplus(np.diag(mat_0)), np.diag(chol_0)) self.check_off_diagonal_same(mat_0, chol_0) self.assertAllClose(softplus(np.diag(mat_0)), np.diag(chol_0)) def test_batch_matrix_no_transform(self): mat = self._rng.rand(2, 4, 4) with self.test_session(): # Default is no transform. chol = distributions.batch_matrix_diag_transform(mat) self.assertEqual((2, 4, 4), chol.get_shape()) self.assertAllClose(mat, chol.eval()) if __name__ == '__main__': tf.test.main()
mit
sde1000/quicktill
quicktill/stockterminal.py
1
7228
"""Stock Terminal page""" import time import logging from . import ui, td, keyboard, usestock, stocklines, user, tillconfig from .user import load_user from .models import StockLine, StockAnnotation, StockItem from sqlalchemy.sql.expression import tuple_, func, null from sqlalchemy.sql import select, not_ from sqlalchemy.orm import joinedload, undefer_group log = logging.getLogger(__name__) class page(ui.basicpage): def __init__(self, hotkeys, locations=None, user=None, max_unattended_updates=None): super().__init__() self.win.set_cursor(False) self.user = user if user else load_user(tillconfig.default_user) self.display = 0 self.max_unattended_updates = max_unattended_updates self.remaining_life = max_unattended_updates self.hotkeys = hotkeys self.locations = locations if locations else ['Bar'] self.updateheader() self._alarm_handle = tillconfig.mainloop.add_timeout(0, self.alarm) def pagename(self): return self.user.fullname if self.user else "Stock Control" def drawlines(self, h): sl = td.s.query(StockLine).\ filter(StockLine.location.in_(self.locations)).\ order_by(StockLine.name).\ options(joinedload('stockonsale')).\ options(joinedload('stockonsale.stocktype')).\ options(undefer_group('qtys')).\ all() f = ui.tableformatter("pl l L r rp") header = f("Line", "StockID", "Stock", "Used", "Remaining") def fl(line): if line.linetype == "regular" and line.stockonsale: sos = line.stockonsale[0] return (line.name, sos.id, sos.stocktype.format(), "{} {}".format(sos.used, sos.stocktype.unit.name), "{} {}".format(sos.remaining, sos.stocktype.unit.name)) elif line.linetype == "continuous": return (line.name, "", line.stocktype.format(), "", "{} {}".format(line.stocktype.remaining, line.stocktype.unit.name)) elif line.linetype == "display": return (line.name, "", line.stocktype.format(), "", "{}+{} {}".format(line.ondisplay, line.instock, line.stocktype.unit.name)) return (line.name, "", "", "", "") ml = [header] + [f(*fl(line)) for line in sl] y = 0 for l in ml: for line in l.display(self.w): self.win.addstr(y, 0, line) y = y + 1 if y >= h: break def drawstillage(self, h): sl = td.s.query(StockAnnotation)\ .join(StockItem)\ .outerjoin(StockLine)\ .filter(tuple_(StockAnnotation.text,StockAnnotation.time).in_( select([StockAnnotation.text, func.max(StockAnnotation.time)], StockAnnotation.atype == 'location')\ .group_by(StockAnnotation.text)))\ .filter(StockItem.finished == None)\ .order_by(StockLine.name != null(), StockAnnotation.time)\ .options(joinedload('stockitem'))\ .options(joinedload('stockitem.stocktype'))\ .options(joinedload('stockitem.stockline'))\ .all() if not sl: return self.drawlines(h) f = ui.tableformatter('pl l c L c lp') header = f("Loc", "Racked", "StockID", "Name", "BB", "Line") ml = [f(a.text, a.time.date().strftime("%d %b"), a.stockid, a.stockitem.stocktype.format(), a.stockitem.bestbefore or "", a.stockitem.stockline.name if a.stockitem.stockline else "") for a in sl] ml.insert(0, header) y = 0 for l in ml: for line in l.display(self.w): self.win.addstr(y, 0, line) y = y + 1 if y >= h: break def redraw(self): self.win.erase() prompt = ("Ctrl+X = Clear; Ctrl+Y = Cancel. " "Press S for stock management. " "Press U to use stock. Press R to record waste. " "Press Enter to refresh display. " "Press A to add a stock annotation. " "Press L to choose another location.") promptheight = self.win.wrapstr(0, 0, self.w, prompt, display=False) self.win.wrapstr(self.h - promptheight, 0, self.w, prompt) if self.display == 0: self.drawlines(self.h - promptheight) elif self.display == 1: self.drawstillage(self.h - promptheight) def alarm(self, called_by_timer=True): if not called_by_timer: self._alarm_handle.cancel() self._alarm_handle = tillconfig.mainloop.add_timeout( 2 if tillconfig.debug else 60, self.alarm) self.display = self.display + 1 if self.display > 1: self.display = 0 # There won't be a database session set up when we're called # by the timer expiring. if called_by_timer: with td.orm_session(): if self.max_unattended_updates: self.remaining_life = self.remaining_life - 1 if self.remaining_life < 1: self.deselect() return self.redraw() else: self.remaining_life = self.max_unattended_updates self.redraw() def keypress(self,k): if k == 'l' or k == 'L': self.choose_location() elif k == keyboard.K_CASH: self.alarm(called_by_timer=False) elif k == 'u' or k == 'U': stocklines.selectline(usestock.line_chosen, title="Use Stock", blurb="Select a stock line") elif k == keyboard.K_CLEAR or k == keyboard.K_CANCEL: self.deselect() elif k in self.hotkeys: return self.hotkeys[k]() else: ui.beep() def deselect(self): # Ensure that we're not still hanging around when we are invisible super().deselect() self._alarm_handle.cancel() self.dismiss() def choose_location(self): locations = StockLine.locations(td.s) if not locations: ui.infopopup(["There are no locations. Please create a stock line."], title="Error") else: ui.automenu([(x, self.set_location, (x,)) for x in locations], title="Choose location") def set_location(self, location): self.locations = [location] self.alarm(called_by_timer=False) def handle_usertoken(t, *args, **kwargs): """ Called when a usertoken has been handled by the default hotkey handler. """ u = user.user_from_token(t) if u is None: return # Should already have toasted return page(*args, user=u, **kwargs)
gpl-3.0
suyashphadtare/vestasi-erp-1
erpnext/buying/doctype/purchase_order/purchase_order.py
6
9685
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cstr, flt from frappe import msgprint, _, throw from frappe.model.mapper import get_mapped_doc from erpnext.controllers.buying_controller import BuyingController form_grid_templates = { "po_details": "templates/form_grid/item_grid.html" } class PurchaseOrder(BuyingController): tname = 'Purchase Order Item' fname = 'po_details' def __init__(self, arg1, arg2=None): super(PurchaseOrder, self).__init__(arg1, arg2) self.status_updater = [{ 'source_dt': 'Purchase Order Item', 'target_dt': 'Material Request Item', 'join_field': 'prevdoc_detail_docname', 'target_field': 'ordered_qty', 'target_parent_dt': 'Material Request', 'target_parent_field': 'per_ordered', 'target_ref_field': 'qty', 'source_field': 'qty', 'percent_join_field': 'prevdoc_docname', 'overflow_type': 'order' }] def validate(self): super(PurchaseOrder, self).validate() if not self.status: self.status = "Draft" from erpnext.utilities import validate_status validate_status(self.status, ["Draft", "Submitted", "Stopped", "Cancelled"]) pc_obj = frappe.get_doc('Purchase Common') pc_obj.validate_for_items(self) self.check_for_stopped_status(pc_obj) self.validate_uom_is_integer("uom", "qty") self.validate_uom_is_integer("stock_uom", ["qty", "required_qty"]) self.validate_with_previous_doc() self.validate_for_subcontracting() self.validate_minimum_order_qty() self.create_raw_materials_supplied("po_raw_material_details") def validate_with_previous_doc(self): super(PurchaseOrder, self).validate_with_previous_doc(self.tname, { "Supplier Quotation": { "ref_dn_field": "supplier_quotation", "compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]], }, "Supplier Quotation Item": { "ref_dn_field": "supplier_quotation_item", "compare_fields": [["rate", "="], ["project_name", "="], ["item_code", "="], ["uom", "="]], "is_child_table": True } }) def validate_minimum_order_qty(self): itemwise_min_order_qty = frappe._dict(frappe.db.sql("select name, min_order_qty from tabItem")) itemwise_qty = frappe._dict() for d in self.get("po_details"): itemwise_qty.setdefault(d.item_code, 0) itemwise_qty[d.item_code] += flt(d.stock_qty) for item_code, qty in itemwise_qty.items(): if flt(qty) < flt(itemwise_min_order_qty.get(item_code)): frappe.throw(_("Item #{0}: Ordered qty can not less than item's minimum order qty (defined in item master).").format(item_code)) def get_schedule_dates(self): for d in self.get('po_details'): if d.prevdoc_detail_docname and not d.schedule_date: d.schedule_date = frappe.db.get_value("Material Request Item", d.prevdoc_detail_docname, "schedule_date") def get_last_purchase_rate(self): frappe.get_doc('Purchase Common').get_last_purchase_rate(self) #roshan def get_details(self): if self.address == 'Work Address': return frappe.db.get_value('Company', self.company, 'address') if self.address == 'Buisness Address': return frappe.db.get_value('Company', self.company, 'buisness_address') # Check for Stopped status def check_for_stopped_status(self, pc_obj): check_list =[] for d in self.get('po_details'): if d.meta.get_field('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list: check_list.append(d.prevdoc_docname) pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname) def update_requested_qty(self): material_request_map = {} for d in self.get("po_details"): if d.prevdoc_doctype and d.prevdoc_doctype == "Material Request" and d.prevdoc_detail_docname: material_request_map.setdefault(d.prevdoc_docname, []).append(d.prevdoc_detail_docname) for mr, mr_item_rows in material_request_map.items(): if mr and mr_item_rows: mr_obj = frappe.get_doc("Material Request", mr) if mr_obj.status in ["Stopped", "Cancelled"]: frappe.throw(_("Material Request {0} is cancelled or stopped").format(mr), frappe.InvalidStatusError) mr_obj.update_requested_qty(mr_item_rows) def update_ordered_qty(self, po_item_rows=None): """update requested qty (before ordered_qty is updated)""" from erpnext.stock.utils import get_bin def _update_ordered_qty(item_code, warehouse): ordered_qty = frappe.db.sql(""" select sum((po_item.qty - ifnull(po_item.received_qty, 0))*po_item.conversion_factor) from `tabPurchase Order Item` po_item, `tabPurchase Order` po where po_item.item_code=%s and po_item.warehouse=%s and po_item.qty > ifnull(po_item.received_qty, 0) and po_item.parent=po.name and po.status!='Stopped' and po.docstatus=1""", (item_code, warehouse)) bin_doc = get_bin(item_code, warehouse) bin_doc.ordered_qty = flt(ordered_qty[0][0]) if ordered_qty else 0 bin_doc.save() item_wh_list = [] for d in self.get("po_details"): if (not po_item_rows or d.name in po_item_rows) and [d.item_code, d.warehouse] not in item_wh_list \ and frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes" and d.warehouse: item_wh_list.append([d.item_code, d.warehouse]) for item_code, warehouse in item_wh_list: _update_ordered_qty(item_code, warehouse) def check_modified_date(self): mod_db = frappe.db.sql("select modified from `tabPurchase Order` where name = %s", self.name) date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" % ( mod_db[0][0],cstr(self.modified))) if date_diff and date_diff[0][0]: msgprint(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name), raise_exception=True) def update_status(self, status): self.check_modified_date() frappe.db.set(self,'status',cstr(status)) self.update_requested_qty() self.update_ordered_qty() msgprint(_("Status of {0} {1} is now {2}").format(self.doctype, self.name, status)) def on_submit(self): super(PurchaseOrder, self).on_submit() purchase_controller = frappe.get_doc("Purchase Common") self.update_prevdoc_status() self.update_requested_qty() self.update_ordered_qty() frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.grand_total) purchase_controller.update_last_purchase_rate(self, is_submit = 1) frappe.db.set(self,'status','Submitted') def on_cancel(self): pc_obj = frappe.get_doc('Purchase Common') self.check_for_stopped_status(pc_obj) # Check if Purchase Receipt has been submitted against current Purchase Order pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Receipt', docname = self.name, detail_doctype = 'Purchase Receipt Item') # Check if Purchase Invoice has been submitted against current Purchase Order submitted = frappe.db.sql_list("""select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_order = %s and t1.docstatus = 1""", self.name) if submitted: throw(_("Purchase Invoice {0} is already submitted").format(", ".join(submitted))) frappe.db.set(self,'status','Cancelled') self.update_prevdoc_status() # Must be called after updating ordered qty in Material Request self.update_requested_qty() self.update_ordered_qty() pc_obj.update_last_purchase_rate(self, is_submit = 0) def on_update(self): pass def set_missing_values(source, target): target.ignore_pricing_rule = 1 target.run_method("set_missing_values") target.run_method("calculate_taxes_and_totals") @frappe.whitelist() def make_purchase_receipt(source_name, target_doc=None): def update_item(obj, target, source_parent): target.qty = flt(obj.qty) - flt(obj.received_qty) target.stock_qty = (flt(obj.qty) - flt(obj.received_qty)) * flt(obj.conversion_factor) target.amount = (flt(obj.qty) - flt(obj.received_qty)) * flt(obj.rate) target.base_amount = (flt(obj.qty) - flt(obj.received_qty)) * \ flt(obj.rate) * flt(source_parent.conversion_rate) doc = get_mapped_doc("Purchase Order", source_name, { "Purchase Order": { "doctype": "Purchase Receipt", "validation": { "docstatus": ["=", 1], } }, "Purchase Order Item": { "doctype": "Purchase Receipt Item", "field_map": { "name": "prevdoc_detail_docname", "parent": "prevdoc_docname", "parenttype": "prevdoc_doctype", }, "postprocess": update_item, "condition": lambda doc: doc.received_qty < doc.qty }, "Purchase Taxes and Charges": { "doctype": "Purchase Taxes and Charges", "add_if_empty": True } }, target_doc, set_missing_values) return doc @frappe.whitelist() def make_purchase_invoice(source_name, target_doc=None): def update_item(obj, target, source_parent): target.amount = flt(obj.amount) - flt(obj.billed_amt) target.base_amount = target.amount * flt(source_parent.conversion_rate) target.qty = target.amount / flt(obj.rate) if (flt(obj.rate) and flt(obj.billed_amt)) else flt(obj.qty) doc = get_mapped_doc("Purchase Order", source_name, { "Purchase Order": { "doctype": "Purchase Invoice", "validation": { "docstatus": ["=", 1], } }, "Purchase Order Item": { "doctype": "Purchase Invoice Item", "field_map": { "name": "po_detail", "parent": "purchase_order", }, "postprocess": update_item, "condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount }, "Purchase Taxes and Charges": { "doctype": "Purchase Taxes and Charges", "add_if_empty": True } }, target_doc, set_missing_values) return doc
agpl-3.0
jimberlage/servo
tests/wpt/web-platform-tests/tools/wptserve/wptserve/server.py
1
28166
from six.moves import BaseHTTPServer import errno import os import socket from six.moves.socketserver import ThreadingMixIn import ssl import sys import threading import time import traceback from six import binary_type, text_type import uuid from collections import OrderedDict from six.moves.queue import Queue from h2.config import H2Configuration from h2.connection import H2Connection from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded from six.moves.urllib.parse import urlsplit, urlunsplit from . import routes as default_routes from .config import ConfigBuilder from .logger import get_logger from .request import Server, Request, H2Request from .response import Response, H2Response from .router import Router from .utils import HTTPException from .constants import h2_headers """HTTP server designed for testing purposes. The server is designed to provide flexibility in the way that requests are handled, and to provide control both of exactly what bytes are put on the wire for the response, and in the timing of sending those bytes. The server is based on the stdlib HTTPServer, but with some notable differences in the way that requests are processed. Overall processing is handled by a WebTestRequestHandler, which is a subclass of BaseHTTPRequestHandler. This is responsible for parsing the incoming request. A RequestRewriter is then applied and may change the request data if it matches a supplied rule. Once the request data had been finalised, Request and Reponse objects are constructed. These are used by the other parts of the system to read information about the request and manipulate the response. Each request is handled by a particular handler function. The mapping between Request and the appropriate handler is determined by a Router. By default handlers are installed to interpret files under the document root with .py extensions as executable python files (see handlers.py for the api for such files), .asis files as bytestreams to be sent literally and all other files to be served statically. The handler functions are responsible for either populating the fields of the response object, which will then be written when the handler returns, or for directly writing to the output stream. """ class RequestRewriter(object): def __init__(self, rules): """Object for rewriting the request path. :param rules: Initial rules to add; a list of three item tuples (method, input_path, output_path), defined as for register() """ self.rules = {} for rule in reversed(rules): self.register(*rule) self.logger = get_logger() def register(self, methods, input_path, output_path): """Register a rewrite rule. :param methods: Set of methods this should match. "*" is a special value indicating that all methods should be matched. :param input_path: Path to match for the initial request. :param output_path: Path to replace the input path with in the request. """ if isinstance(methods, (binary_type, text_type)): methods = [methods] self.rules[input_path] = (methods, output_path) def rewrite(self, request_handler): """Rewrite the path in a BaseHTTPRequestHandler instance, if it matches a rule. :param request_handler: BaseHTTPRequestHandler for which to rewrite the request. """ split_url = urlsplit(request_handler.path) if split_url.path in self.rules: methods, destination = self.rules[split_url.path] if "*" in methods or request_handler.command in methods: self.logger.debug("Rewriting request path %s to %s" % (request_handler.path, destination)) new_url = list(split_url) new_url[2] = destination new_url = urlunsplit(new_url) request_handler.path = new_url class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): allow_reuse_address = True acceptable_errors = (errno.EPIPE, errno.ECONNABORTED) request_queue_size = 2000 # Ensure that we don't hang on shutdown waiting for requests daemon_threads = True def __init__(self, server_address, request_handler_cls, router, rewriter, bind_address, config=None, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, latency=None, http2=False, **kwargs): """Server for HTTP(s) Requests :param server_address: tuple of (server_name, port) :param request_handler_cls: BaseHTTPRequestHandler-like class to use for handling requests. :param router: Router instance to use for matching requests to handler functions :param rewriter: RequestRewriter-like instance to use for preprocessing requests before they are routed :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param use_ssl: Boolean indicating whether the server should use SSL :param key_file: Path to key file to use if SSL is enabled. :param certificate: Path to certificate to use if SSL is enabled. :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param bind_address True to bind the server to both the IP address and port specified in the server_address parameter. False to bind the server only to the port in the server_address parameter, but not to the address. :param latency: Delay in ms to wait before seving each response, or callable that returns a delay in ms """ self.router = router self.rewriter = rewriter self.scheme = "http2" if http2 else "https" if use_ssl else "http" self.logger = get_logger() self.latency = latency if bind_address: hostname_port = server_address else: hostname_port = ("",server_address[1]) #super doesn't work here because BaseHTTPServer.HTTPServer is old-style BaseHTTPServer.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs) if config is not None: Server.config = config else: self.logger.debug("Using default configuration") with ConfigBuilder(browser_host=server_address[0], ports={"http": [self.server_address[1]]}) as config: assert config["ssl_config"] is None Server.config = config self.key_file = key_file self.certificate = certificate self.encrypt_after_connect = use_ssl and encrypt_after_connect if use_ssl and not encrypt_after_connect: if http2: ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate) ssl_context.set_alpn_protocols(['h2']) self.socket = ssl_context.wrap_socket(self.socket, server_side=True) else: self.socket = ssl.wrap_socket(self.socket, keyfile=self.key_file, certfile=self.certificate, server_side=True) def handle_error(self, request, client_address): error = sys.exc_info()[1] if ((isinstance(error, socket.error) and isinstance(error.args, tuple) and error.args[0] in self.acceptable_errors) or (isinstance(error, IOError) and error.errno in self.acceptable_errors)): pass # remote hang up before the result is sent else: self.logger.error(traceback.format_exc()) class BaseWebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """RequestHandler for WebTestHttpd""" def __init__(self, *args, **kwargs): self.logger = get_logger() BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs) def finish_handling_h1(self, request_line_is_valid): self.server.rewriter.rewrite(self) request = Request(self) response = Response(self, request) if request.method == "CONNECT": self.handle_connect(response) return if not request_line_is_valid: response.set_error(414) response.write() return self.logger.debug("%s %s" % (request.method, request.request_path)) handler = self.server.router.get_handler(request) self.finish_handling(request, response, handler) def finish_handling(self, request, response, handler): # If the handler we used for the request had a non-default base path # set update the doc_root of the request to reflect this if hasattr(handler, "base_path") and handler.base_path: request.doc_root = handler.base_path if hasattr(handler, "url_base") and handler.url_base != "/": request.url_base = handler.url_base if self.server.latency is not None: if callable(self.server.latency): latency = self.server.latency() else: latency = self.server.latency self.logger.warning("Latency enabled. Sleeping %i ms" % latency) time.sleep(latency / 1000.) if handler is None: self.logger.debug("No Handler found!") response.set_error(404) else: try: handler(request, response) except HTTPException as e: response.set_error(e.code, e.message) except Exception as e: self.respond_with_error(response, e) self.logger.debug("%i %s %s (%s) %i" % (response.status[0], request.method, request.request_path, request.headers.get('Referer'), request.raw_input.length)) if not response.writer.content_written: response.write() # If a python handler has been used, the old ones won't send a END_STR data frame, so this # allows for backwards compatibility by accounting for these handlers that don't close streams if isinstance(response, H2Response) and not response.writer.stream_ended: response.writer.end_stream() # If we want to remove this in the future, a solution is needed for # scripts that produce a non-string iterable of content, since these # can't set a Content-Length header. A notable example of this kind of # problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1) if response.close_connection: self.close_connection = True if not self.close_connection: # Ensure that the whole request has been read from the socket request.raw_input.read() def handle_connect(self, response): self.logger.debug("Got CONNECT") response.status = 200 response.write() if self.server.encrypt_after_connect: self.logger.debug("Enabling SSL for connection") self.request = ssl.wrap_socket(self.connection, keyfile=self.server.key_file, certfile=self.server.certificate, server_side=True) self.setup() return def respond_with_error(self, response, e): message = str(e) if message: err = [message] else: err = [] err.append(traceback.format_exc()) response.set_error(500, "\n".join(err)) class Http2WebTestRequestHandler(BaseWebTestRequestHandler): protocol_version = "HTTP/2.0" def handle_one_request(self): """ This is the main HTTP/2.0 Handler. When a browser opens a connection to the server on the HTTP/2.0 port, the server enters this which will initiate the h2 connection and keep running throughout the duration of the interaction, and will read/write directly from the socket. Because there can be multiple H2 connections active at the same time, a UUID is created for each so that it is easier to tell them apart in the logs. """ config = H2Configuration(client_side=False) self.conn = H2ConnectionGuard(H2Connection(config=config)) self.close_connection = False # Generate a UUID to make it easier to distinguish different H2 connection debug messages self.uid = str(uuid.uuid4())[:8] self.logger.debug('(%s) Initiating h2 Connection' % self.uid) with self.conn as connection: connection.initiate_connection() data = connection.data_to_send() window_size = connection.remote_settings.initial_window_size self.request.sendall(data) # Dict of { stream_id: (thread, queue) } stream_queues = {} try: while not self.close_connection: data = self.request.recv(window_size) if data == '': self.logger.debug('(%s) Socket Closed' % self.uid) self.close_connection = True continue with self.conn as connection: frames = connection.receive_data(data) window_size = connection.remote_settings.initial_window_size self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames)) for frame in frames: if isinstance(frame, ConnectionTerminated): self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid) self.close_connection = True # Flood all the streams with connection terminated, this will cause them to stop for stream_id, (thread, queue) in stream_queues.items(): queue.put(frame) elif hasattr(frame, 'stream_id'): if frame.stream_id not in stream_queues: queue = Queue() stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue) stream_queues[frame.stream_id][1].put(frame) if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended): del stream_queues[frame.stream_id] except (socket.timeout, socket.error) as e: self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e))) if not self.close_connection: self.close_connection = True for stream_id, (thread, queue) in stream_queues.items(): queue.put(None) except Exception as e: self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e))) finally: for stream_id, (thread, queue) in stream_queues.items(): thread.join() def start_stream_thread(self, frame, queue): """ This starts a new thread to handle frames for a specific stream. :param frame: The first frame on the stream :param queue: A queue object that the thread will use to check for new frames :return: The thread object that has already been started """ t = threading.Thread( target=Http2WebTestRequestHandler._stream_thread, args=(self, frame.stream_id, queue) ) t.start() return t def _stream_thread(self, stream_id, queue): """ This thread processes frames for a specific stream. It waits for frames to be placed in the queue, and processes them. When it receives a request frame, it will start processing immediately, even if there are data frames to follow. One of the reasons for this is that it can detect invalid requests before needing to read the rest of the frames. """ # The file-like pipe object that will be used to share data to request object if data is received wfile = None request = None response = None req_handler = None while not self.close_connection: # Wait for next frame, blocking frame = queue.get(True, None) self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame))) if isinstance(frame, RequestReceived): rfile, wfile = os.pipe() rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb') stream_handler = H2HandlerCopy(self, frame, rfile) stream_handler.server.rewriter.rewrite(stream_handler) request = H2Request(stream_handler) response = H2Response(stream_handler, request) req_handler = stream_handler.server.router.get_handler(request) if hasattr(req_handler, "frame_handler"): # Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames req_handler = self.frame_handler(request, response, req_handler) if hasattr(req_handler, 'handle_headers'): req_handler.handle_headers(frame, request, response) elif isinstance(frame, DataReceived): wfile.write(frame.data) if hasattr(req_handler, 'handle_data'): req_handler.handle_data(frame, request, response) if frame.stream_ended: wfile.close() elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)): self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id)) break if request is not None: request.frames.append(frame) if hasattr(frame, "stream_ended") and frame.stream_ended: self.finish_handling(request, response, req_handler) def frame_handler(self, request, response, handler): try: return handler.frame_handler(request) except HTTPException as e: response.set_error(e.code, e.message) response.write() except Exception as e: self.respond_with_error(response, e) response.write() class H2ConnectionGuard(object): """H2Connection objects are not threadsafe, so this keeps thread safety""" lock = threading.Lock() def __init__(self, obj): assert isinstance(obj, H2Connection) self.obj = obj def __enter__(self): self.lock.acquire() return self.obj def __exit__(self, exception_type, exception_value, traceback): self.lock.release() class H2Headers(dict): def __init__(self, headers): self.raw_headers = OrderedDict() for key, val in headers: self.raw_headers[key] = val dict.__setitem__(self, self._convert_h2_header_to_h1(key), val) def _convert_h2_header_to_h1(self, header_key): if header_key[1:] in h2_headers and header_key[0] == ':': return header_key[1:] else: return header_key # TODO This does not seem relevant for H2 headers, so using a dummy function for now def getallmatchingheaders(self, header): return ['dummy function'] class H2HandlerCopy(object): def __init__(self, handler, req_frame, rfile): self.headers = H2Headers(req_frame.headers) self.command = self.headers['method'] self.path = self.headers['path'] self.h2_stream_id = req_frame.stream_id self.server = handler.server self.protocol_version = handler.protocol_version self.raw_requestline = '' self.rfile = rfile self.request = handler.request self.conn = handler.conn class Http1WebTestRequestHandler(BaseWebTestRequestHandler): protocol_version = "HTTP/1.1" def handle_one_request(self): response = None try: self.close_connection = False request_line_is_valid = self.get_request_line() if self.close_connection: return request_is_valid = self.parse_request() if not request_is_valid: #parse_request() actually sends its own error responses return self.finish_handling_h1(request_line_is_valid) except socket.timeout as e: self.log_error("Request timed out: %r", e) self.close_connection = True return except Exception: err = traceback.format_exc() if response: response.set_error(500, err) response.write() self.logger.error(err) def get_request_line(self): try: self.raw_requestline = self.rfile.readline(65537) except socket.error: self.close_connection = True return False if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' return False if not self.raw_requestline: self.close_connection = True return True class WebTestHttpd(object): """ :param host: Host from which to serve (default: 127.0.0.1) :param port: Port from which to serve (default: 8000) :param server_cls: Class to use for the server (default depends on ssl vs non-ssl) :param handler_cls: Class to use for the RequestHandler :param use_ssl: Use a SSL server if no explicit server_cls is supplied :param key_file: Path to key file to use if ssl is enabled :param certificate: Path to certificate file to use if ssl is enabled :param encrypt_after_connect: For each connection, don't start encryption until a CONNECT message has been received. This enables the server to act as a self-proxy. :param router_cls: Router class to use when matching URLs to handlers :param doc_root: Document root for serving files :param routes: List of routes with which to initialize the router :param rewriter_cls: Class to use for request rewriter :param rewrites: List of rewrites with which to initialize the rewriter_cls :param config: Dictionary holding environment configuration settings for handlers to read, or None to use the default values. :param bind_address: Boolean indicating whether to bind server to IP address. :param latency: Delay in ms to wait before seving each response, or callable that returns a delay in ms HTTP server designed for testing scenarios. Takes a router class which provides one method get_handler which takes a Request and returns a handler function. .. attribute:: host The host name or ip address of the server .. attribute:: port The port on which the server is running .. attribute:: router The Router object used to associate requests with resources for this server .. attribute:: rewriter The Rewriter object used for URL rewriting .. attribute:: use_ssl Boolean indicating whether the server is using ssl .. attribute:: started Boolean indictaing whether the server is running """ def __init__(self, host="127.0.0.1", port=8000, server_cls=None, handler_cls=Http1WebTestRequestHandler, use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False, router_cls=Router, doc_root=os.curdir, routes=None, rewriter_cls=RequestRewriter, bind_address=True, rewrites=None, latency=None, config=None, http2=False): if routes is None: routes = default_routes.routes self.host = host self.router = router_cls(doc_root, routes) self.rewriter = rewriter_cls(rewrites if rewrites is not None else []) self.use_ssl = use_ssl self.http2 = http2 self.logger = get_logger() if server_cls is None: server_cls = WebTestServer if use_ssl: if not os.path.exists(key_file): raise ValueError("SSL certificate not found: {}".format(key_file)) if not os.path.exists(certificate): raise ValueError("SSL key not found: {}".format(certificate)) try: self.httpd = server_cls((host, port), handler_cls, self.router, self.rewriter, config=config, bind_address=bind_address, use_ssl=use_ssl, key_file=key_file, certificate=certificate, encrypt_after_connect=encrypt_after_connect, latency=latency, http2=http2) self.started = False _host, self.port = self.httpd.socket.getsockname() except Exception: self.logger.error("Failed to start HTTP server. " "You may need to edit /etc/hosts or similar, see README.md.") raise def start(self, block=False): """Start the server. :param block: True to run the server on the current thread, blocking, False to run on a separate thread.""" http_type = "http2" if self.http2 else "https" if self.use_ssl else "http" self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port)) self.started = True if block: self.httpd.serve_forever() else: self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread.setDaemon(True) # don't hang on exit self.server_thread.start() def stop(self): """ Stops the server. If the server is not running, this method has no effect. """ if self.started: try: self.httpd.shutdown() self.httpd.server_close() self.server_thread.join() self.server_thread = None self.logger.info("Stopped http server on %s:%s" % (self.host, self.port)) except AttributeError: pass self.started = False self.httpd = None def get_url(self, path="/", query=None, fragment=None): if not self.started: return None return urlunsplit(("http" if not self.use_ssl else "https", "%s:%s" % (self.host, self.port), path, query, fragment))
mpl-2.0
nityas/6869-finalproject
src/ann.py
1
3260
import sys try: from sklearn import datasets except: print("scikit-learn is required to run this example.") exit(1) try: from openann import * except: print("OpenANN Python bindings are not installed!") exit(1) #NOTE: LABELS ARE 0-INDEXED, UNLIKE WITH LOGISTIC REGRESSION HOG_TRAINING_DATA = 'data/hog_training_data.npy' HOG_TRAINING_LABELS = 'data/hog_training_labels.npy' HOG_TESTING_DATA = 'data/hog_testing_data.npy' HOG_TESTING_LABELS = 'data/hog_testing_labels.npy' def print_usage(): print("Usage:") print(" python benchmark [run]") def run_ann(): train_labels = numpy.load(HOG_TRAINING_LABELS) train_features = numpy.load(HOG_TRAINING_DATA) test_labels = numpy.load(HOG_TESTING_LABELS) test_features = numpy.load(HOG_TESTING_DATA) total_features = numpy.concatenate((train_features, test_features), axis=0) total_labels = numpy.concatenate((train_labels, test_labels), axis=0) X = numpy.array(total_features) Y = numpy.array(total_labels) Y = Y - 1 D = X.shape[1] F = len(numpy.unique(Y)) N = len(X) # Preprocess data (normalization and 1-of-c encoding) stds = X.std(axis=0) for i in range (0, len(stds)): if stds[i] == 0: stds[i] = 1 X = (X - X.mean(axis=0)) / stds T = numpy.zeros((N, F)) T[(range(N), Y)] = 1.0 # Setup network net = Net() net.set_regularization(0.01, 0.01, 0) net.input_layer(D) net.fully_connected_layer(100, Activation.LOGISTIC) net.output_layer(F, Activation.SOFTMAX) net.set_error_function(Error.CE) # Split dataset into training set and validation set and make sure that # each class is equally distributed in the datasets X1 = numpy.vstack((X[0:(N/2)])) T1 = numpy.vstack((T[0:(N/2)])) training_set = DataSet(X1, T1) X2 = numpy.vstack((X[(N/2):])) T2 = numpy.vstack((T[(N/2):])) validation_set = DataSet(X2, T2) # Train for 30 episodes (with tuned parameters for MBSGD) optimizer = MBSGD({"maximal_iterations": 30}, learning_rate=0.9, learning_rate_decay=0.999, min_learning_rate=0.001, momentum=0.5, batch_size=128) Log.set_info() # Deactivate debug output optimizer.optimize(net, training_set) print("TF data set has %d inputs, %d classes and %d examples" % (D, F, N)) print("The data has been split up input training and validation set.") training_percent = float(classification_hits(net, training_set)) / len(X1) testing_percent = float(classification_hits(net, validation_set)) / len(X2) print("Correct predictions on training set: %d/%d, and percent is: %f" % (classification_hits(net, training_set), len(X1), training_percent)) print("Confusion matrix:") print(confusion_matrix(net, training_set)[0]) print("Correct predictions on test set: %d/%d, and percent is: %f" % (classification_hits(net, validation_set), len(X2), testing_percent)) print("Confusion matrix:") print(confusion_matrix(net, validation_set)[0]) if __name__ == "__main__": if len(sys.argv) == 1: print_usage() for command in sys.argv[1:]: if command == "run": run_ann() else: print_usage() exit(1)
mit
Denisolt/Tensorflow_Chat_Bot
local/lib/python2.7/site-packages/pip/_vendor/distlib/util.py
327
52991
# # Copyright (C) 2012-2016 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs from collections import deque import contextlib import csv from glob import iglob as std_iglob import io import json import logging import os import py_compile import re import shutil import socket try: import ssl except ImportError: # pragma: no cover ssl = None import subprocess import sys import tarfile import tempfile import textwrap try: import threading except ImportError: # pragma: no cover import dummy_threading as threading import time from . import DistlibException from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, xmlrpclib, splittype, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile, fsdecode, unquote) logger = logging.getLogger(__name__) # # Requirement parsing code for name + optional constraints + optional extras # # e.g. 'foo >= 1.2, < 2.0 [bar, baz]' # # The regex can seem a bit hairy, so we build it up out of smaller pieces # which are manageable. # COMMA = r'\s*,\s*' COMMA_RE = re.compile(COMMA) IDENT = r'(\w|[.-])+' EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' VERSPEC = IDENT + r'\*?' RELOP = '([<>=!~]=)|[<>]' # # The first relop is optional - if absent, will be taken as '~=' # BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + RELOP + r')\s*(' + VERSPEC + '))*') DIRECT_REF = '(from\s+(?P<diref>.*))' # # Either the bare constraints or the bare constraints in parentheses # CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF + r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)') EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]' REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + CONSTRAINTS + ')?$') REQUIREMENT_RE = re.compile(REQUIREMENT) # # Used to scan through the constraints # RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')' RELOP_IDENT_RE = re.compile(RELOP_IDENT) def parse_requirement(s): def get_constraint(m): d = m.groupdict() return d['op'], d['vn'] result = None m = REQUIREMENT_RE.match(s) if m: d = m.groupdict() name = d['dn'] cons = d['c1'] or d['c2'] if not d['diref']: url = None else: # direct reference cons = None url = d['diref'].strip() if not cons: cons = None constr = '' rs = d['dn'] else: if cons[0] not in '<>!=': cons = '~=' + cons iterator = RELOP_IDENT_RE.finditer(cons) cons = [get_constraint(m) for m in iterator] rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) if not d['ex']: extras = None else: extras = COMMA_RE.split(d['ex']) result = Container(name=name, constraints=cons, extras=extras, requirement=rs, source=s, url=url) return result def get_resources_dests(resources_root, rules): """Find destinations for resources files""" def get_rel_path(base, path): # normalizes and returns a lstripped-/-separated path base = base.replace(os.path.sep, '/') path = path.replace(os.path.sep, '/') assert path.startswith(base) return path[len(base):].lstrip('/') destinations = {} for base, suffix, dest in rules: prefix = os.path.join(resources_root, base) for abs_base in iglob(prefix): abs_glob = os.path.join(abs_base, suffix) for abs_path in iglob(abs_glob): resource_file = get_rel_path(resources_root, abs_path) if dest is None: # remove the entry if it was here destinations.pop(resource_file, None) else: rel_path = get_rel_path(abs_base, abs_path) rel_dest = dest.replace(os.path.sep, '/').rstrip('/') destinations[resource_file] = rel_dest + '/' + rel_path return destinations def in_venv(): if hasattr(sys, 'real_prefix'): # virtualenv venvs result = True else: # PEP 405 venvs result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) return result def get_executable(): # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as # changes to the stub launcher mean that sys.executable always points # to the stub on macOS # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' # in os.environ): # result = os.environ['__PYVENV_LAUNCHER__'] # else: # result = sys.executable # return result result = os.path.normcase(sys.executable) if not isinstance(result, text_type): result = fsdecode(result) return result def proceed(prompt, allowed_chars, error_prompt=None, default=None): p = prompt while True: s = raw_input(p) p = prompt if not s and default: s = default if s: c = s[0].lower() if c in allowed_chars: break if error_prompt: p = '%c: %s\n%s' % (c, error_prompt, prompt) return c def extract_by_key(d, keys): if isinstance(keys, string_types): keys = keys.split() result = {} for key in keys: if key in d: result[key] = d[key] return result def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) # Try to load as JSON, falling back on legacy format data = stream.read() stream = StringIO(data) try: jdata = json.load(stream) result = jdata['extensions']['python.exports']['exports'] for group, entries in result.items(): for k, v in entries.items(): s = '%s = %s' % (k, v) entry = get_export_entry(s) assert entry is not None entries[k] = entry return result except Exception: stream.seek(0, 0) def read_stream(cp, stream): if hasattr(cp, 'read_file'): cp.read_file(stream) else: cp.readfp(stream) cp = configparser.ConfigParser() try: read_stream(cp, stream) except configparser.MissingSectionHeaderError: stream.close() data = textwrap.dedent(data) stream = StringIO(data) read_stream(cp, stream) result = {} for key in cp.sections(): result[key] = entries = {} for name, value in cp.items(key): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None #entry.dist = self entries[name] = entry return result def write_exports(exports, stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getwriter('utf-8')(stream) cp = configparser.ConfigParser() for k, v in exports.items(): # TODO check k, v for valid values cp.add_section(k) for entry in v.values(): if entry.suffix is None: s = entry.prefix else: s = '%s:%s' % (entry.prefix, entry.suffix) if entry.flags: s = '%s [%s]' % (s, ', '.join(entry.flags)) cp.set(k, entry.name, s) cp.write(stream) @contextlib.contextmanager def tempdir(): td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) @contextlib.contextmanager def chdir(d): cwd = os.getcwd() try: os.chdir(d) yield finally: os.chdir(cwd) @contextlib.contextmanager def socket_timeout(seconds=15): cto = socket.getdefaulttimeout() try: socket.setdefaulttimeout(seconds) yield finally: socket.setdefaulttimeout(cto) class cached_property(object): def __init__(self, func): self.func = func #for attr in ('__name__', '__module__', '__doc__'): # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) #obj.__dict__[self.func.__name__] = value = self.func(obj) return value def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths) class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime def copy_file(self, infile, outfile, check=True): """Copy a file respecting dry-run and force flags. """ self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if not self.dry_run: msg = None if check: if os.path.islink(outfile): msg = '%s is a symlink' % outfile elif os.path.exists(outfile) and not os.path.isfile(outfile): msg = '%s is a non-regular file' % outfile if msg: raise ValueError(msg + ' which would be overwritten') shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert not os.path.isdir(outfile) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if not self.dry_run: if encoding is None: outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data.encode(encoding)) self.record_as_written(path) def set_mode(self, bits, mask, files): if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): # Set the executable bits (owner, group, and world) on # all the files specified. for f in files: if self.dry_run: logger.info("changing mode of %s", f) else: mode = (os.stat(f).st_mode | bits) & mask logger.info("changing mode of %s to %o", f, mode) os.chmod(f, mode) set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) def ensure_dir(self, path): path = os.path.abspath(path) if path not in self.ensured and not os.path.exists(path): self.ensured.add(path) d, f = os.path.split(path) self.ensure_dir(d) logger.info('Creating %s' % path) if not self.dry_run: os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None): dpath = cache_from_source(path, not optimize) logger.info('Byte-compiling %s to %s', path, dpath) if not self.dry_run: if force or self.newer(path, dpath): if not prefix: diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] py_compile.compile(path, dpath, diagpath, True) # raise error self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if os.path.isdir(path) and not os.path.islink(path): logger.debug('Removing directory tree at %s', path) if not self.dry_run: shutil.rmtree(path) if self.record: if path in self.dirs_created: self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if not self.dry_run: os.remove(path) if self.record: if path in self.files_written: self.files_written.remove(path) def is_writable(self, path): result = False while not result: if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if parent == path: break path = parent return result def commit(self): """ Commit recorded changes, turn off recording, return changes. """ assert self.record result = self.files_written, self.dirs_created self._init_record() return result def rollback(self): if not self.dry_run: for f in list(self.files_written): if os.path.exists(f): os.remove(f) # dirs should all be empty now, except perhaps for # __pycache__ subdirs # reverse so that subdirs appear before their parents dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) # should fail if non-empty self._init_record() def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result class ExportEntry(object): def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix self.suffix = suffix self.flags = flags @cached_property def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): # pragma: no cover return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: result = None if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) else: d = m.groupdict() name = d['name'] path = d['callable'] colons = path.count(':') if colons == 0: prefix, suffix = path, None else: if colons != 1: raise DistlibException("Invalid specification " "'%s'" % specification) prefix, suffix = path.split(':') flags = d['flags'] if flags is None: if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) flags = [] else: flags = [f.strip() for f in flags.split(',')] result = ExportEntry(name, prefix, suffix, flags) return result def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix) def path_to_cache_dir(path): """ Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ d, p = os.path.splitdrive(os.path.abspath(path)) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') return d + p + '.cache' def ensure_slash(s): if not s.endswith('/'): return s + '/' return s def parse_credentials(netloc): username = password = None if '@' in netloc: prefix, netloc = netloc.split('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) return username, password, netloc def get_process_umask(): result = os.umask(0o22) os.umask(result) return result def is_string_sequence(seq): result = True i = None for i, s in enumerate(seq): if not isinstance(s, string_types): result = False break assert i is not None return result PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') def split_filename(filename, project_name=None): """ Extract name, version, python version from a filename (no extension) Return name, version, pyver or None """ result = None pyver = None filename = unquote(filename).replace(' ', '-') m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$') def parse_name_and_version(p): """ A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. """ m = NAME_VERSION_RE.match(p) if not m: raise DistlibException('Ill-formed name/version string: \'%s\'' % p) d = m.groupdict() return d['name'].strip().lower(), d['ver'] def get_extras(requested, available): result = set() requested = set(requested or []) available = set(available or []) if '*' in requested: requested.remove('*') result |= available for r in requested: if r == '-': result.add(r) elif r.startswith('-'): unwanted = r[1:] if unwanted not in available: logger.warning('undeclared extra: %s' % unwanted) if unwanted in result: result.remove(unwanted) else: if r not in available: logger.warning('undeclared extra: %s' % r) result.add(r) return result # # Extended metadata functionality # def _get_external_data(url): result = {} try: # urlopen might fail if it runs into redirections, # because of Python issue #13696. Fixed in locators # using a custom redirect handler. resp = urlopen(url) headers = resp.info() ct = headers.get('Content-Type') if not ct.startswith('application/json'): logger.debug('Unexpected response for JSON request: %s', ct) else: reader = codecs.getreader('utf-8')(resp) #data = reader.read().decode('utf-8') #result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' def get_project_data(name): url = '%s/%s/project.json' % (name[0].upper(), name) url = urljoin(_external_data_base_url, url) result = _get_external_data(url) return result def get_package_data(name, version): url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) url = urljoin(_external_data_base_url, url) return _get_external_data(url) class Cache(object): """ A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. """ def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): # pragma: no cover os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix) def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed class EventMixin(object): """ A very simple publish/subscribe system. """ def __init__(self): self._subscribers = {} def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber) def remove(self, event, subscriber): """ Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed. """ subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber) def get_subscribers(self, event): """ Return an iterator for the subscribers for an event. :param event: The event to return subscribers for. """ return iter(self._subscribers.get(event, ())) def publish(self, event, *args, **kwargs): """ Publish a event and return a list of values returned by its subscribers. :param event: The event to publish. :param args: The positional arguments to pass to the event's subscribers. :param kwargs: The keyword arguments to pass to the event's subscribers. """ result = [] for subscriber in self.get_subscribers(event): try: value = subscriber(event, *args, **kwargs) except Exception: logger.exception('Exception during event publication') value = None result.append(value) logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result # # Simple sequencing # class Sequencer(object): def __init__(self): self._preds = {} self._succs = {} self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) def remove_node(self, node, edges=False): if node in self._nodes: self._nodes.remove(node) if edges: for p in set(self._preds.get(node, ())): self.remove(p, node) for s in set(self._succs.get(node, ())): self.remove(node, s) # Remove empties for k, v in list(self._preds.items()): if not v: del self._preds[k] for k, v in list(self._succs.items()): if not v: del self._succs[k] def add(self, pred, succ): assert pred != succ self._preds.setdefault(succ, set()).add(pred) self._succs.setdefault(pred, set()).add(succ) def remove(self, pred, succ): assert pred != succ try: preds = self._preds[succ] succs = self._succs[pred] except KeyError: # pragma: no cover raise ValueError('%r not a successor of anything' % succ) try: preds.remove(pred) succs.remove(succ) except KeyError: # pragma: no cover raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): raise ValueError('Unknown: %r' % final) result = [] todo = [] seen = set() todo.append(final) while todo: step = todo.pop(0) if step in seen: # if a step was already seen, # move it to the end (so it will appear earlier # when reversed on return) ... but not for the # final step, as that would be confusing for # users if step != final: result.remove(step) result.append(step) else: seen.add(step) result.append(step) preds = self._preds.get(step, ()) todo.extend(preds) return reversed(result) @property def strong_connections(self): #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} index = {} result = [] graph = self._succs def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors try: successors = graph[node] except Exception: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) lowlinks[node] = min(lowlinks[node],lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node],index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result @property def dot(self): result = ['digraph G {'] for succ in self._preds: preds = self._preds[succ] for pred in preds: result.append(' %s -> %s;' % (pred, succ)) for node in self._nodes: result.append(' %s;' % node) result.append('}') return '\n'.join(result) # # Unarchiving functionality for zip, tar, tgz, tbz, whl # ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') def unarchive(archive_filename, dest_dir, format=None, check=True): def check_path(path): if not isinstance(path, text_type): path = path.decode('utf-8') p = os.path.abspath(os.path.join(dest_dir, path)) if not p.startswith(dest_dir) or p[plen] != os.sep: raise ValueError('path outside destination: %r' % p) dest_dir = os.path.abspath(dest_dir) plen = len(dest_dir) archive = None if format is None: if archive_filename.endswith(('.zip', '.whl')): format = 'zip' elif archive_filename.endswith(('.tar.gz', '.tgz')): format = 'tgz' mode = 'r:gz' elif archive_filename.endswith(('.tar.bz2', '.tbz')): format = 'tbz' mode = 'r:bz2' elif archive_filename.endswith('.tar'): format = 'tar' mode = 'r' else: # pragma: no cover raise ValueError('Unknown format for %r' % archive_filename) try: if format == 'zip': archive = ZipFile(archive_filename, 'r') if check: names = archive.namelist() for name in names: check_path(name) else: archive = tarfile.open(archive_filename, mode) if check: names = archive.getnames() for name in names: check_path(name) if format != 'zip' and sys.version_info[0] < 3: # See Python issue 17153. If the dest path contains Unicode, # tarfile extraction fails on Python 2.x if a member path name # contains non-ASCII characters - it leads to an implicit # bytes -> unicode conversion using ASCII to decode. for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') archive.extractall(dest_dir) finally: if archive: archive.close() def zip_dir(directory): """zip a directory tree into a BytesIO object""" result = io.BytesIO() dlen = len(directory) with ZipFile(result, "w") as zf: for root, dirs, files in os.walk(directory): for name in files: full = os.path.join(root, name) rel = root[dlen:] dest = os.path.join(rel, name) zf.write(full, dest) return result # # Simple progress bar # UNITS = ('', 'K', 'M', 'G','T','P') class Progress(object): unknown = 'UNKNOWN' def __init__(self, minval=0, maxval=100): assert maxval is None or maxval >= minval self.min = self.cur = minval self.max = maxval self.started = None self.elapsed = 0 self.done = False def update(self, curval): assert self.min <= curval assert self.max is None or curval <= self.max self.cur = curval now = time.time() if self.started is None: self.started = now else: self.elapsed = now - self.started def increment(self, incr): assert incr >= 0 self.update(self.cur + incr) def start(self): self.update(self.min) return self def stop(self): if self.max is not None: self.update(self.max) self.done = True @property def maximum(self): return self.unknown if self.max is None else self.max @property def percentage(self): if self.done: result = '100 %' elif self.max is None: result = ' ?? %' else: v = 100.0 * (self.cur - self.min) / (self.max - self.min) result = '%3d %%' % v return result def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' #elif duration < 1: # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @property def ETA(self): if self.done: prefix = 'Done' t = self.elapsed #import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: t = -1 elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: #import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed return '%s: %s' % (prefix, self.format_duration(t)) @property def speed(self): if self.elapsed == 0: result = 0.0 else: result = (self.cur - self.min) / self.elapsed for unit in UNITS: if result < 1000: break result /= 1000.0 return '%d %sB/s' % (result, unit) # # Glob functionality # RICH_GLOB = re.compile(r'\{([^}]*)\}') _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') def iglob(path_glob): """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" if _CHECK_RECURSIVE_GLOB.search(path_glob): msg = """invalid glob %r: recursive glob "**" must be used alone""" raise ValueError(msg % path_glob) if _CHECK_MISMATCH_SET.search(path_glob): msg = """invalid glob %r: mismatching set marker '{' or '}'""" raise ValueError(msg % path_glob) return _iglob(path_glob) def _iglob(path_glob): rich_path_glob = RICH_GLOB.split(path_glob, 1) if len(rich_path_glob) > 1: assert len(rich_path_glob) == 3, rich_path_glob prefix, set, suffix = rich_path_glob for item in set.split(','): for path in _iglob(''.join((prefix, item, suffix))): yield path else: if '**' not in path_glob: for item in std_iglob(path_glob): yield item else: prefix, radical = path_glob.split('**', 1) if prefix == '': prefix = '.' if radical == '': radical = '*' else: # we support both radical = radical.lstrip('/') radical = radical.lstrip('\\') for path, dir, files in os.walk(prefix): path = os.path.normpath(path) for fn in _iglob(os.path.join(path, radical)): yield fn if ssl: from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError) # # HTTPSConnection which verifies certificates/matches domains # class HTTPSConnection(httplib.HTTPSConnection): ca_certs = None # set this to the path to the certs file (.pem) check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() if not hasattr(ssl, 'SSLContext'): # For 2.x if self.ca_certs: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=cert_reqs, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.ca_certs) else: # pragma: no cover context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} if self.ca_certs: context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(cafile=self.ca_certs) if getattr(ssl, 'HAS_SNI', False): kwargs['server_hostname'] = self.host self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) logger.debug('Host verified: %s', self.host) except CertificateError: # pragma: no cover self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise class HTTPSHandler(BaseHTTPSHandler): def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs self.check_domain = check_domain def _conn_maker(self, *args, **kwargs): """ This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. """ result = HTTPSConnection(*args, **kwargs) if self.ca_certs: result.ca_certs = self.ca_certs result.check_domain = self.check_domain return result def https_open(self, req): try: return self.do_open(self._conn_maker, req) except URLError as e: if 'certificate verify failed' in str(e.reason): raise CertificateError('Unable to verify server certificate ' 'for %s' % req.host) else: raise # # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves # HTML containing a http://xyz link when it should be https://xyz), # you can use the following handler class, which does not allow HTTP traffic. # # It works by inheriting from HTTPHandler - so build_opener won't add a # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) # # XML-RPC with timeouts # _ver_info = sys.version_info[:2] if _ver_info == (2, 6): class HTTP(httplib.HTTP): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) if ssl: class HTTPS(httplib.HTTPS): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) if _ver_info == (2, 6): result = HTTP(h, timeout=self.timeout) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPConnection(h) result = self._connection[1] return result if ssl: class SafeTransport(xmlrpclib.SafeTransport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) def make_connection(self, host): h, eh, kwargs = self.get_host_info(host) if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout if _ver_info == (2, 6): result = HTTPS(host, None, **kwargs) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) result = self._connection[1] return result class ServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: scheme, _ = splittype(uri) use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport else: tcls = Transport kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' else: kwargs['newline'] = '' return open(fn, mode, **kwargs) class CSVBase(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() class CSVReader(CSVBase): def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) self.stream = stream else: self.stream = _csv_open(kwargs['path'], 'r') self.reader = csv.reader(self.stream, **self.defaults) def __iter__(self): return self def next(self): result = next(self.reader) if sys.version_info[0] < 3: for i, item in enumerate(result): if not isinstance(item, text_type): result[i] = item.decode('utf-8') return result __next__ = next class CSVWriter(CSVBase): def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) def writerow(self, row): if sys.version_info[0] < 3: r = [] for item in row: if isinstance(item, text_type): item = item.encode('utf-8') r.append(item) row = r self.writer.writerow(row) # # Configurator functionality # class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) value_converters['inc'] = 'inc_convert' def __init__(self, config, base=None): super(Configurator, self).__init__(config) self.base = base or os.getcwd() def configure_custom(self, config): def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) elif isinstance(o, dict): if '()' in o: result = self.configure_custom(o) else: result = {} for k in o: result[k] = convert(o[k]) else: result = self.convert(o) return result c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers args = config.pop('[]', ()) if args: args = tuple([convert(o) for o in args]) items = [(k, convert(config[k])) for k in config if valid_ident(k)] kwargs = dict(items) result = c(*args, **kwargs) if props: for n, v in props.items(): setattr(result, n, convert(v)) return result def __getitem__(self, key): result = self.config[key] if isinstance(result, dict) and '()' in result: self.config[key] = result = self.configure_custom(result) return result def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result # # Mixin for running subprocesses and capturing their output # class SubprocessMixin(object): def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress def reader(self, stream, context): """ Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. """ progress = self.progress verbose = self.verbose while True: s = stream.readline() if not s: break if progress is not None: progress(s, context) else: if not verbose: sys.stderr.write('.') else: sys.stderr.write(s.decode('utf-8')) sys.stderr.flush() stream.close() def run_command(self, cmd, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) t2.start() p.wait() t1.join() t2.join() if self.progress is not None: self.progress('done.', 'main') elif self.verbose: sys.stderr.write('done.\n') return p def normalize_name(name): """Normalize a python package name a la PEP 503""" # https://www.python.org/dev/peps/pep-0503/#normalized-names return re.sub('[-_.]+', '-', name).lower()
gpl-3.0
redhat-openstack/cinder
cinder/tests/scheduler/test_capacity_weigher.py
3
4067
# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Capacity Weigher. """ import mock from oslo.config import cfg from cinder import context from cinder.openstack.common.scheduler.weights import HostWeightHandler from cinder.scheduler.weights.capacity import CapacityWeigher from cinder import test from cinder.tests.scheduler import fakes from cinder.volume import utils CONF = cfg.CONF class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = HostWeightHandler('cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects([CapacityWeigher], hosts, weight_properties)[0] @mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.volume_topic, disabled=disabled) return host_states def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1) # host2: free_capacity_gb=300, free=300*(1-0.1) # host3: free_capacity_gb=512, free=256 # host4: free_capacity_gb=200, free=200*(1-0.05) # host5: free_capacity_gb=unknown free=-1 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 921.0) self.assertEqual( utils.extract_host(weighed_host.obj.host), 'host1') def test_capacity_weight_multiplier1(self): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=-1024*(1-0.1) # host2: free_capacity_gb=300, free=-300*(1-0.1) # host3: free_capacity_gb=512, free=-256 # host4: free_capacity_gb=200, free=-200*(1-0.05) # host5: free_capacity_gb=unknown free=-float('inf') # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, -190.0) self.assertEqual( utils.extract_host(weighed_host.obj.host), 'host4') def test_capacity_weight_multiplier2(self): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2 # host2: free_capacity_gb=300, free=300*(1-0.1)*2 # host3: free_capacity_gb=512, free=256*2 # host4: free_capacity_gb=200, free=200*(1-0.05)*2 # host5: free_capacity_gb=unknown free=-2 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 921.0 * 2) self.assertEqual( utils.extract_host(weighed_host.obj.host), 'host1')
apache-2.0
TiVoMaker/boto
boto/ecs/__init__.py
153
4177
# Copyright (c) 2010 Chris Moyer http://coredumped.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import boto from boto.connection import AWSQueryConnection, AWSAuthConnection from boto.exception import BotoServerError import time import urllib import xml.sax from boto.ecs.item import ItemSet from boto import handler class ECSConnection(AWSQueryConnection): """ ECommerce Connection For more information on how to use this module see: http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html """ APIVersion = '2010-11-01' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com', debug=0, https_connection_factory=None, path='/', security_token=None, profile_name=None): super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, host, debug, https_connection_factory, path, security_token=security_token, profile_name=profile_name) def _required_auth_capability(self): return ['ecs'] def get_response(self, action, params, page=0, itemSet=None): """ Utility method to handle calls to ECS and parsing of responses. """ params['Service'] = "AWSECommerceService" params['Operation'] = action if page: params['ItemPage'] = page response = self.make_request(None, params, "/onca/xml") body = response.read().decode('utf-8') boto.log.debug(body) if response.status != 200: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise BotoServerError(response.status, response.reason, body) if itemSet is None: rs = ItemSet(self, action, params, page) else: rs = itemSet h = handler.XmlHandler(rs, self) xml.sax.parseString(body.encode('utf-8'), h) if not rs.is_valid: raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0])) return rs # # Group methods # def item_search(self, search_index, **params): """ Returns items that satisfy the search criteria, including one or more search indices. For a full list of search terms, :see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html """ params['SearchIndex'] = search_index return self.get_response('ItemSearch', params) def item_lookup(self, **params): """ Returns items that satisfy the lookup query. For a full list of parameters, see: http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf """ return self.get_response('ItemLookup', params)
mit
Eomys/MoSQITo
mosqito/methods/Audio/compute_level.py
1
2691
# -*- coding: utf-8 -*- from numpy import log10, linspace, mean, sqrt from SciDataTool import Data1D, DataTime def compute_level(self, nb_points=[], start=[], stop=[]): """Overall Sound Pressure Level calculation from the time signal The SPL can be computed according to a specified number of points or during a given time frame Parameter: ---------- signal : numpy.array time signal value fs: integer sampling frequency Output: ------- level : numpy.array SPL in dB """ # Check the inputs if nb_points != []: if type(nb_points) != int: raise TypeError("ERROR : Number of points should be an integer") if nb_points < 1 or nb_points > len(self.signal.values): raise ValueError( "ERROR : Number of points should be between 1 and the length of the given signal" ) if start != [] and stop != []: if type(start) != int and type(start) != float: raise TypeError("ERROR : Start (in sec) should be an integer or a float ") if type(stop) != int and type(stop) != float: raise TypeError("ERROR : Stop (in sec) should be an integer or a float ") if ( start < 0 or stop < 0 or start > len(self.signal.values) / self.fs or stop > len(self.signal.values) / self.fs ): raise ValueError( "ERROR : Time frame should be between 0s and the duration of the signal" ) if start == stop: raise ValueError("ERROR : Start and stop values must be different") # Initialization level = [] # Case of a given time frame if start != [] and stop != []: frame = self.signal.values[int(start * self.fs) : int(stop * self.fs)] else: start = 0 stop = len(self.signal.values) / self.fs frame = self.signal.values # Case of a given number of points if nb_points != []: time = Data1D( name="time", unit="s", values=linspace(start, stop, num=nb_points) ) frame_size = int(len(frame) / nb_points) for i in range(nb_points): frame_i = frame[i * frame_size : i * frame_size + frame_size] peff = sqrt(mean(frame_i ** 2)) level.append(10 * log10((peff ** 2 / (2e-05) ** 2))) self.level = DataTime( name="Sound Pressure Level", symbol="SPL", unit="dB", axes=[time], values=level, ) else: peff = sqrt(mean(frame ** 2)) self.level = 10 * log10((peff ** 2 / (2e-05) ** 2))
apache-2.0
jslootbeek/roundware-server
roundware/api1/urls.py
3
1338
# Roundware Server is released under the GNU Affero General Public License v3. # See COPYRIGHT.txt, AUTHORS.txt, and LICENSE.txt in the project root directory. from __future__ import unicode_literals from django.conf.urls import patterns, url, include from roundware.api1 import views import logging logger = logging.getLogger(__name__) urlpatterns = patterns('', # V1 API url(r'^$', 'api1.views.operations'), url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')), # V1 DRF API - V1 is partially REST. url(r'^rest/$', views.APIRootView.as_view()), url(r'^rest/asset/$', views.AssetList.as_view(), name='api1-asset'), url(r'^rest/assetlocation/$', views.AssetLocationList.as_view(), name='api1-assetlocation'), url(r'^rest/assetlocation/(?P<pk>[0-9]+)/$', views.AssetLocationDetail.as_view(), name='api1-assetlocation-detail'), url(r'^rest/project/$', views.ProjectList.as_view(), name='api1-project'), url(r'^rest/event/$', views.EventList.as_view(), name='api1-event'), url(r'^rest/session/$', views.SessionList.as_view(), name='api1-session'), url(r'^rest/listeninghistoryitem/$', views.ListeningHistoryItemList.as_view(), name='api1-listeninghistoryitem'), )
agpl-3.0
tiagormk/gem5-hmp
src/arch/x86/isa/insts/general_purpose/data_transfer/move.py
44
9098
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' # # Regular moves # def macroop MOV_R_MI { limm t1, imm, dataSize=asz ld reg, seg, [1, t0, t1] }; def macroop MOV_MI_R { limm t1, imm, dataSize=asz st reg, seg, [1, t0, t1] }; def macroop MOV_R_R { mov reg, reg, regm }; def macroop MOV_M_R { st reg, seg, sib, disp }; def macroop MOV_P_R { rdip t7 st reg, seg, riprel, disp }; def macroop MOV_R_M { ld reg, seg, sib, disp }; def macroop MOV_R_P { rdip t7 ld reg, seg, riprel, disp }; def macroop MOV_R_I { limm reg, imm }; def macroop MOV_M_I { limm t1, imm st t1, seg, sib, disp }; def macroop MOV_P_I { rdip t7 limm t1, imm st t1, seg, riprel, disp }; # # Sign extending moves # def macroop MOVSXD_R_R { sexti reg, regm, 31 }; def macroop MOVSXD_R_M { ld t1, seg, sib, disp, dataSize=4 sexti reg, t1, 31 }; def macroop MOVSXD_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=4 sexti reg, t1, 31 }; def macroop MOVSX_B_R_R { mov t1, t1, regm, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_B_R_M { ld t1, seg, sib, disp, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_B_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=1 sexti reg, t1, 7 }; def macroop MOVSX_W_R_R { sexti reg, regm, 15 }; def macroop MOVSX_W_R_M { ld reg, seg, sib, disp, dataSize=2 sexti reg, reg, 15 }; def macroop MOVSX_W_R_P { rdip t7 ld reg, seg, riprel, disp, dataSize=2 sexti reg, reg, 15 }; # # Zero extending moves # def macroop MOVZX_B_R_R { mov t1, t1, regm, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_B_R_M { ld t1, seg, sib, disp, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_B_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=1 zexti reg, t1, 7 }; def macroop MOVZX_W_R_R { zexti reg, regm, 15 }; def macroop MOVZX_W_R_M { ld t1, seg, sib, disp, dataSize=2 zexti reg, t1, 15 }; def macroop MOVZX_W_R_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 zexti reg, t1, 15 }; def macroop MOV_C_R { .serializing .adjust_env maxOsz wrcr reg, regm }; def macroop MOV_R_C { .serializing .adjust_env maxOsz rdcr reg, regm }; def macroop MOV_D_R { .serializing .adjust_env maxOsz wrdr reg, regm }; def macroop MOV_R_D { .adjust_env maxOsz rddr reg, regm }; def macroop MOV_R_S { rdsel reg, regm }; def macroop MOV_M_S { rdsel t1, reg st t1, seg, sib, disp, dataSize=2 }; def macroop MOV_P_S { rdip t7 rdsel t1, reg st t1, seg, riprel, disp, dataSize=2 }; def macroop MOV_REAL_S_R { zexti t2, regm, 15, dataSize=8 slli t3, t2, 4, dataSize=8 wrsel reg, regm wrbase reg, t3 }; def macroop MOV_REAL_S_M { ld t1, seg, sib, disp, dataSize=2 zexti t2, t1, 15, dataSize=8 slli t3, t2, 4, dataSize=8 wrsel reg, t1 wrbase reg, t3 }; def macroop MOV_REAL_S_P { panic "RIP relative addressing shouldn't happen in real mode" }; def macroop MOV_S_R { andi t0, regm, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, regm, 0xF8, dataSize=8 andi t0, regm, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks regm, t3, dataSize=8 wrdl reg, t3, regm wrsel reg, regm }; def macroop MOV_S_M { ld t1, seg, sib, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOV_S_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVSS_S_R { andi t0, regm, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, regm, 0xF8, dataSize=8 andi t0, regm, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks regm, t3, SSCheck, dataSize=8 wrdl reg, t3, regm wrsel reg, regm }; def macroop MOVSS_S_M { ld t1, seg, sib, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, SSCheck, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVSS_S_P { rdip t7 ld t1, seg, riprel, disp, dataSize=2 andi t0, t1, 0xFC, flags=(EZF,), dataSize=2 br label("processDescriptor"), flags=(CEZF,) andi t2, t1, 0xF8, dataSize=8 andi t0, t1, 0x4, flags=(EZF,), dataSize=2 br label("globalDescriptor"), flags=(CEZF,) ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8 br label("processDescriptor") globalDescriptor: ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8 processDescriptor: chks t1, t3, SSCheck, dataSize=8 wrdl reg, t3, t1 wrsel reg, t1 }; def macroop MOVNTI_M_R { st reg, seg, sib, disp }; def macroop MOVNTI_P_R { rdip t7 st reg, seg, riprel, disp }; def macroop MOVD_XMM_R { mov2fp xmml, regm, srcSize=dsz, destSize=8 lfpimm xmmh, 0 }; def macroop MOVD_XMM_M { ldfp xmml, seg, sib, disp, dataSize=dsz lfpimm xmmh, 0 }; def macroop MOVD_XMM_P { rdip t7 ldfp xmml, seg, riprel, disp, dataSize=dsz lfpimm xmmh, 0 }; def macroop MOVD_R_XMM { mov2int reg, xmmlm, size=dsz }; def macroop MOVD_M_XMM { stfp xmml, seg, sib, disp, dataSize=dsz }; def macroop MOVD_P_XMM { rdip t7 stfp xmml, seg, riprel, disp, dataSize=dsz }; ''' #let {{ # class MOVD(Inst): # "GenFault ${new UnimpInstFault}" #}};
bsd-3-clause
zky829/jsPDF
wscript.py
55
5235
#! /usr/bin/python def default(context): minifyfiles(context) def minifyfiles(context): src = context.Node('jspdf.js') dst = src.parent + 'dist/' + src.name - '.js' + '.source.js' dst.text = src.text.replace( "${buildDate}", timeUTC() ).replace( "${commitID}", getCommitIDstring() ) + \ (src - 'jspdf.js' + 'libs/polyfill.js').text + \ (src - '.js' + '.plugin.addimage.js').text + \ (src - '.js' + '.plugin.autoprint.js').text + \ (src - '.js' + '.plugin.cell.js').text + \ (src - '.js' + '.plugin.from_html.js').text + \ (src - '.js' + '.plugin.sillysvgrenderer.js').text + \ (src - '.js' + '.plugin.split_text_to_size.js').text + \ (src - '.js' + '.plugin.standard_fonts_metrics.js').text + \ (src - '.js' + '.plugin.total_pages.js').text + \ (src - 'jspdf.js' + 'libs/Blob.js/Blob.js').text + \ (src - 'jspdf.js' + 'libs/FileSaver.js/FileSaver.js').text + \ (src - 'jspdf.js' + 'libs/deflate.js').text + \ (src - 'jspdf.js' + 'libs/adler32cs.js/adler32cs.js').text # (src - '.js' + '.plugin.from_html.js').text + \ # minified = dst - '.source.js' + '.min.js' print("=== Compressing jsPDF and select plugins into " + minified.name) minified.text = compress_with_closure_compiler( dst.text ) # AMD-compatible version: # (minified - '.min.js' + '.amd.min.js').text = """;(function(){ # %s # ;define(function(){return jsPDF})})(); # """ % minified.text # jQuery "NoConflict" version: # only needed if some of the modules compiled into jsPDF need $ # one such module is fromHTML # (minified - '.min.js' + '.noconflict.min.js').text = """;(function($){ # %s # })(jQuery); # """ % minified.text def docs(context): ''' java -jar %jsdocbindir%\jsrun.jar %jsdocbindir%\app\run.js -v %rootdir%\jspdf.js -d=%rootdir%\doc -t=%rootdir%\tools\jsdoc_template ''' jsdocBinDir = context.Node('~/bin/jsdoc-toolkit/') codefile = context.Node('jspdf.js') destinationFolder = context.Node('doc/') templateFolder = context.Node('tools/jsdoc_template/') import subprocess subprocess.call( [ 'java' , '-jar' , (jsdocBinDir + 'jsrun.jar').absolutepath , (jsdocBinDir + 'app/run.js').absolutepath , '-v' , codefile.absolutepath , '-d='+destinationFolder.absolutepath , '-t='+templateFolder.absolutepath ] ) def timeUTC(): import datetime return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M") def getCommitIDstring(): import subprocess if not hasattr( subprocess, "check_output"): # let's not bother emulating it. Not important return "" else: return subprocess.check_output( [ 'git' , 'rev-parse' , 'HEAD' ] ).strip() def compress_with_closure_compiler(code, compression_level = None): '''Sends text of JavaScript code to Google's Closure Compiler API Returns text of compressed code. ''' # script (with some modifications) from # https://developers.google.com/closure/compiler/docs/api-tutorial1 import httplib, urllib, sys compression_levels = [ 'WHITESPACE_ONLY' , 'SIMPLE_OPTIMIZATIONS' , 'ADVANCED_OPTIMIZATIONS' ] if compression_level not in compression_levels: compression_level = compression_levels[1] # simple optimizations # Define the parameters for the POST request and encode them in # a URL-safe format. params = urllib.urlencode([ ('js_code', code) , ('compilation_level', compression_level) , ('output_format', 'json') , ('output_info', 'compiled_code') , ('output_info', 'warnings') , ('output_info', 'errors') , ('output_info', 'statistics') # , ('output_file_name', 'default.js') # , ('js_externs', 'javascript with externs') # only used on Advanced. ]) # Always use the following value for the Content-type header. headers = { "Content-type": "application/x-www-form-urlencoded" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse() if response.status != 200: raise Exception("Compilation server responded with non-OK status of " + str(response.status)) compressedcode = response.read() conn.close() import json # needs python 2.6+ or simplejson module for earlier parts = json.loads(compressedcode) if 'errors' in parts: prettyerrors = ['\nCompilation Error:'] for error in parts['errors']: prettyerrors.append( "\nln %s, ch %s, '%s' - %s" % ( error['lineno'] , error['charno'] , error['line'] , error['error'] ) ) raise Exception(''.join(prettyerrors)) return parts['compiledCode'] if __name__ == '__main__': print("This is a Wak build automation tool script. Please, get Wak on GitHub and run it against the folder containing this automation script.")
mit
pnigos/gyp
test/mac/gyptest-xcode-gcc.py
212
1781
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that xcode-style GCC_... settings are handled properly. """ import TestGyp import os import subprocess import sys def IgnoreOutput(string, expected_string): return True def CompilerVersion(compiler): stdout = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT) return stdout.rstrip('\n') def CompilerSupportsWarnAboutInvalidOffsetOfMacro(test): # "clang" does not support the "-Winvalid-offsetof" flag, and silently # ignore it. Starting with Xcode 5.0.0, "gcc" is just a "clang" binary with # some hard-coded include path hack, so use the output of "-v" to detect if # the compiler supports the flag or not. return 'clang' not in CompilerVersion('/usr/bin/cc') if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) CHDIR = 'xcode-gcc' test.run_gyp('test.gyp', chdir=CHDIR) # List of targets that'll pass. It expects targets of the same name with # '-fail' appended that'll fail to build. targets = [ 'warn_about_missing_newline', ] # clang doesn't warn on invalid offsetofs, it silently ignores # -Wno-invalid-offsetof. if CompilerSupportsWarnAboutInvalidOffsetOfMacro(test): targets.append('warn_about_invalid_offsetof_macro') for target in targets: test.build('test.gyp', target, chdir=CHDIR) test.built_file_must_exist(target, chdir=CHDIR) fail_target = target + '-fail' test.build('test.gyp', fail_target, chdir=CHDIR, status=None, stderr=None, match=IgnoreOutput) test.built_file_must_not_exist(fail_target, chdir=CHDIR) test.pass_test()
bsd-3-clause
0k/odoo
addons/account/account_invoice.py
5
78350
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import itertools from lxml import etree from openerp import models, fields, api, _ from openerp.exceptions import except_orm, Warning, RedirectWarning import openerp.addons.decimal_precision as dp # mapping invoice type to journal type TYPE2JOURNAL = { 'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund', } # mapping invoice type to refund type TYPE2REFUND = { 'out_invoice': 'out_refund', # Customer Invoice 'in_invoice': 'in_refund', # Supplier Invoice 'out_refund': 'out_invoice', # Customer Refund 'in_refund': 'in_invoice', # Supplier Refund } MAGIC_COLUMNS = ('id', 'create_uid', 'create_date', 'write_uid', 'write_date') class account_invoice(models.Model): _name = "account.invoice" _inherit = ['mail.thread'] _description = "Invoice" _order = "number desc, id desc" _track = { 'type': { }, 'state': { 'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj.state == 'paid' and obj.type in ('out_invoice', 'out_refund'), 'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj.state == 'open' and obj.type in ('out_invoice', 'out_refund'), }, } @api.one @api.depends('invoice_line.price_subtotal', 'tax_line.amount') def _compute_amount(self): self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line) self.amount_tax = sum(line.amount for line in self.tax_line) self.amount_total = self.amount_untaxed + self.amount_tax @api.model def _default_journal(self): inv_type = self._context.get('type', 'out_invoice') inv_types = inv_type if isinstance(inv_type, list) else [inv_type] company_id = self._context.get('company_id', self.env.user.company_id.id) domain = [ ('type', 'in', filter(None, map(TYPE2JOURNAL.get, inv_types))), ('company_id', '=', company_id), ] return self.env['account.journal'].search(domain, limit=1) @api.model def _default_currency(self): journal = self._default_journal() return journal.currency or journal.company_id.currency_id @api.model @api.returns('account.analytic.journal', lambda r: r.id) def _get_journal_analytic(self, inv_type): """ Return the analytic journal corresponding to the given invoice type. """ journal_type = TYPE2JOURNAL.get(inv_type, 'sale') journal = self.env['account.analytic.journal'].search([('type', '=', journal_type)], limit=1) if not journal: raise except_orm(_('No Analytic Journal!'), _("You must define an analytic journal of type '%s'!") % (journal_type,)) return journal[0] @api.one @api.depends('account_id', 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id') def _compute_reconciled(self): self.reconciled = self.test_paid() @api.model def _get_reference_type(self): return [('none', _('Free Reference'))] @api.one @api.depends( 'state', 'currency_id', 'invoice_line.price_subtotal', 'move_id.line_id.account_id.type', 'move_id.line_id.amount_residual', 'move_id.line_id.amount_residual_currency', 'move_id.line_id.currency_id', 'move_id.line_id.reconcile_partial_id.line_partial_ids.invoice.type', ) def _compute_residual(self): nb_inv_in_partial_rec = max_invoice_id = 0 self.residual = 0.0 for line in self.sudo().move_id.line_id: if line.account_id.type in ('receivable', 'payable'): if line.currency_id == self.currency_id: self.residual += line.amount_residual_currency else: # ahem, shouldn't we use line.currency_id here? from_currency = line.company_id.currency_id.with_context(date=line.date) self.residual += from_currency.compute(line.amount_residual, self.currency_id) # we check if the invoice is partially reconciled and if there # are other invoices involved in this partial reconciliation for pline in line.reconcile_partial_id.line_partial_ids: if pline.invoice and self.type == pline.invoice.type: nb_inv_in_partial_rec += 1 # store the max invoice id as for this invoice we will # make a balance instead of a simple division max_invoice_id = max(max_invoice_id, pline.invoice.id) if nb_inv_in_partial_rec: # if there are several invoices in a partial reconciliation, we # split the residual by the number of invoices to have a sum of # residual amounts that matches the partner balance new_value = self.currency_id.round(self.residual / nb_inv_in_partial_rec) if self.id == max_invoice_id: # if it's the last the invoice of the bunch of invoices # partially reconciled together, we make a balance to avoid # rounding errors self.residual = self.residual - ((nb_inv_in_partial_rec - 1) * new_value) else: self.residual = new_value # prevent the residual amount on the invoice to be less than 0 self.residual = max(self.residual, 0.0) @api.one @api.depends( 'move_id.line_id.account_id', 'move_id.line_id.reconcile_id.line_id', 'move_id.line_id.reconcile_partial_id.line_partial_ids', ) def _compute_move_lines(self): # Give Journal Items related to the payment reconciled to this invoice. # Return partial and total payments related to the selected invoice. self.move_lines = self.env['account.move.line'] if not self.move_id: return data_lines = self.move_id.line_id.filtered(lambda l: l.account_id == self.account_id) partial_lines = self.env['account.move.line'] for data_line in data_lines: if data_line.reconcile_id: lines = data_line.reconcile_id.line_id elif data_line.reconcile_partial_id: lines = data_line.reconcile_partial_id.line_partial_ids else: lines = self.env['account_move_line'] partial_lines += data_line self.move_lines = lines - partial_lines @api.one @api.depends( 'move_id.line_id.reconcile_id.line_id', 'move_id.line_id.reconcile_partial_id.line_partial_ids', ) def _compute_payments(self): partial_lines = lines = self.env['account.move.line'] for line in self.move_id.line_id: if line.account_id != self.account_id: continue if line.reconcile_id: lines |= line.reconcile_id.line_id elif line.reconcile_partial_id: lines |= line.reconcile_partial_id.line_partial_ids partial_lines += line self.payment_ids = (lines - partial_lines).sorted() name = fields.Char(string='Reference/Description', index=True, readonly=True, states={'draft': [('readonly', False)]}) origin = fields.Char(string='Source Document', help="Reference of the document that produced this invoice.", readonly=True, states={'draft': [('readonly', False)]}) supplier_invoice_number = fields.Char(string='Supplier Invoice Number', help="The reference of this invoice as provided by the supplier.", readonly=True, states={'draft': [('readonly', False)]}) type = fields.Selection([ ('out_invoice','Customer Invoice'), ('in_invoice','Supplier Invoice'), ('out_refund','Customer Refund'), ('in_refund','Supplier Refund'), ], string='Type', readonly=True, index=True, change_default=True, default=lambda self: self._context.get('type', 'out_invoice'), track_visibility='always') number = fields.Char(related='move_id.name', store=True, readonly=True, copy=False) internal_number = fields.Char(string='Invoice Number', readonly=True, default=False, copy=False, help="Unique number of the invoice, computed automatically when the invoice is created.") reference = fields.Char(string='Invoice Reference', help="The partner reference of this invoice.") reference_type = fields.Selection('_get_reference_type', string='Payment Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, default='none') comment = fields.Text('Additional Information') state = fields.Selection([ ('draft','Draft'), ('proforma','Pro-forma'), ('proforma2','Pro-forma'), ('open','Open'), ('paid','Paid'), ('cancel','Cancelled'), ], string='Status', index=True, readonly=True, default='draft', track_visibility='onchange', copy=False, help=" * The 'Draft' status is used when a user is encoding a new and unconfirmed Invoice.\n" " * The 'Pro-forma' when invoice is in Pro-forma status,invoice does not have an invoice number.\n" " * The 'Open' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice.\n" " * The 'Paid' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled.\n" " * The 'Cancelled' status is used when user cancel invoice.") sent = fields.Boolean(readonly=True, default=False, copy=False, help="It indicates that the invoice has been sent.") date_invoice = fields.Date(string='Invoice Date', readonly=True, states={'draft': [('readonly', False)]}, index=True, help="Keep empty to use the current date", copy=False) date_due = fields.Date(string='Due Date', readonly=True, states={'draft': [('readonly', False)]}, index=True, copy=False, help="If you use payment terms, the due date will be computed automatically at the generation " "of accounting entries. The payment term may compute several due dates, for example 50% " "now and 50% in one month, but if you want to force a due date, make sure that the payment " "term is not set on the invoice. If you keep the payment term and the due date empty, it " "means direct payment.") partner_id = fields.Many2one('res.partner', string='Partner', change_default=True, required=True, readonly=True, states={'draft': [('readonly', False)]}, track_visibility='always') payment_term = fields.Many2one('account.payment.term', string='Payment Terms', readonly=True, states={'draft': [('readonly', False)]}, help="If you use payment terms, the due date will be computed automatically at the generation " "of accounting entries. If you keep the payment term and the due date empty, it means direct payment. " "The payment term may compute several due dates, for example 50% now, 50% in one month.") period_id = fields.Many2one('account.period', string='Force Period', domain=[('state', '!=', 'done')], copy=False, help="Keep empty to use the period of the validation(invoice) date.", readonly=True, states={'draft': [('readonly', False)]}) account_id = fields.Many2one('account.account', string='Account', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="The partner account used for this invoice.") invoice_line = fields.One2many('account.invoice.line', 'invoice_id', string='Invoice Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True) tax_line = fields.One2many('account.invoice.tax', 'invoice_id', string='Tax Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True) move_id = fields.Many2one('account.move', string='Journal Entry', readonly=True, index=True, ondelete='restrict', copy=False, help="Link to the automatically generated Journal Items.") amount_untaxed = fields.Float(string='Subtotal', digits=dp.get_precision('Account'), store=True, readonly=True, compute='_compute_amount', track_visibility='always') amount_tax = fields.Float(string='Tax', digits=dp.get_precision('Account'), store=True, readonly=True, compute='_compute_amount') amount_total = fields.Float(string='Total', digits=dp.get_precision('Account'), store=True, readonly=True, compute='_compute_amount') currency_id = fields.Many2one('res.currency', string='Currency', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=_default_currency, track_visibility='always') journal_id = fields.Many2one('account.journal', string='Journal', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=_default_journal, domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]") company_id = fields.Many2one('res.company', string='Company', change_default=True, required=True, readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env['res.company']._company_default_get('account.invoice')) check_total = fields.Float(string='Verification Total', digits=dp.get_precision('Account'), readonly=True, states={'draft': [('readonly', False)]}, default=0.0) reconciled = fields.Boolean(string='Paid/Reconciled', store=True, readonly=True, compute='_compute_reconciled', help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment.") partner_bank_id = fields.Many2one('res.partner.bank', string='Bank Account', help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft': [('readonly', False)]}) move_lines = fields.Many2many('account.move.line', string='Entry Lines', compute='_compute_move_lines') residual = fields.Float(string='Balance', digits=dp.get_precision('Account'), compute='_compute_residual', store=True, help="Remaining amount due.") payment_ids = fields.Many2many('account.move.line', string='Payments', compute='_compute_payments') move_name = fields.Char(string='Journal Entry', readonly=True, states={'draft': [('readonly', False)]}, copy=False) user_id = fields.Many2one('res.users', string='Salesperson', track_visibility='onchange', readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env.user) fiscal_position = fields.Many2one('account.fiscal.position', string='Fiscal Position', readonly=True, states={'draft': [('readonly', False)]}) commercial_partner_id = fields.Many2one('res.partner', string='Commercial Entity', related='partner_id.commercial_partner_id', store=True, readonly=True, help="The commercial entity that will be used on Journal Entries for this invoice") _sql_constraints = [ ('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!'), ] @api.model def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False): context = self._context if context.get('active_model') == 'res.partner' and context.get('active_ids'): partner = self.env['res.partner'].browse(context['active_ids'])[0] if not view_type: view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.tree')]).id view_type = 'tree' elif view_type == 'form': if partner.supplier and not partner.customer: view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.supplier.form')]).id elif partner.customer and not partner.supplier: view_id = self.env['ir.ui.view'].search([('name', '=', 'account.invoice.form')]).id res = super(account_invoice, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu) # adapt selection of field journal_id for field in res['fields']: if field == 'journal_id' and type: journal_select = self.env['account.journal']._name_search('', [('type', '=', type)], name_get_uid=1) res['fields'][field]['selection'] = journal_select doc = etree.XML(res['arch']) if context.get('type'): for node in doc.xpath("//field[@name='partner_bank_id']"): if context['type'] == 'in_refund': node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]") elif context['type'] == 'out_refund': node.set('domain', "[('partner_id', '=', partner_id)]") if view_type == 'search': if context.get('type') in ('out_invoice', 'out_refund'): for node in doc.xpath("//group[@name='extended filter']"): doc.remove(node) if view_type == 'tree': partner_string = _('Customer') if context.get('type') in ('in_invoice', 'in_refund'): partner_string = _('Supplier') for node in doc.xpath("//field[@name='reference']"): node.set('invisible', '0') for node in doc.xpath("//field[@name='partner_id']"): node.set('string', partner_string) res['arch'] = etree.tostring(doc) return res @api.multi def invoice_print(self): """ Print the invoice and mark it as sent, so that we can see more easily the next step of the workflow """ assert len(self) == 1, 'This option should only be used for a single id at a time.' self.sent = True return self.env['report'].get_action(self, 'account.report_invoice') @api.multi def action_invoice_sent(self): """ Open a window to compose an email, with the edi invoice template message loaded by default """ assert len(self) == 1, 'This option should only be used for a single id at a time.' template = self.env.ref('account.email_template_edi_invoice', False) compose_form = self.env.ref('mail.email_compose_message_wizard_form', False) ctx = dict( default_model='account.invoice', default_res_id=self.id, default_use_template=bool(template), default_template_id=template.id, default_composition_mode='comment', mark_invoice_as_sent=True, ) return { 'name': _('Compose Email'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form.id, 'form')], 'view_id': compose_form.id, 'target': 'new', 'context': ctx, } @api.multi def confirm_paid(self): return self.write({'state': 'paid'}) @api.multi def unlink(self): for invoice in self: if invoice.state not in ('draft', 'cancel'): raise Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.')) elif invoice.internal_number: raise Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.')) return super(account_invoice, self).unlink() @api.multi def onchange_partner_id(self, type, partner_id, date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False): account_id = False payment_term_id = False fiscal_position = False bank_id = False if partner_id: p = self.env['res.partner'].browse(partner_id) rec_account = p.property_account_receivable pay_account = p.property_account_payable if company_id: if p.property_account_receivable.company_id and \ p.property_account_receivable.company_id.id != company_id and \ p.property_account_payable.company_id and \ p.property_account_payable.company_id.id != company_id: prop = self.env['ir.property'] rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)] pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)] res_dom = [('res_id', '=', 'res.partner,%s' % partner_id)] rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom) pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom) rec_account = rec_prop.get_by_record(rec_prop) pay_account = pay_prop.get_by_record(pay_prop) if not rec_account and not pay_account: action = self.env.ref('account.action_account_config') msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.') raise RedirectWarning(msg, action.id, _('Go to the configuration panel')) if type in ('out_invoice', 'out_refund'): account_id = rec_account.id payment_term_id = p.property_payment_term.id else: account_id = pay_account.id payment_term_id = p.property_supplier_payment_term.id fiscal_position = p.property_account_position.id bank_id = p.bank_ids.id result = {'value': { 'account_id': account_id, 'payment_term': payment_term_id, 'fiscal_position': fiscal_position, }} if type in ('in_invoice', 'in_refund'): result['value']['partner_bank_id'] = bank_id if payment_term != payment_term_id: if payment_term_id: to_update = self.onchange_payment_term_date_invoice(payment_term_id, date_invoice) result['value'].update(to_update.get('value', {})) else: result['value']['date_due'] = False if partner_bank_id != bank_id: to_update = self.onchange_partner_bank(bank_id) result['value'].update(to_update.get('value', {})) return result @api.multi def onchange_journal_id(self, journal_id=False): if journal_id: journal = self.env['account.journal'].browse(journal_id) return { 'value': { 'currency_id': journal.currency.id or journal.company_id.currency_id.id, 'company_id': journal.company_id.id, } } return {} @api.multi def onchange_payment_term_date_invoice(self, payment_term_id, date_invoice): if not date_invoice: date_invoice = fields.Date.context_today(self) if not payment_term_id: # To make sure the invoice due date should contain due date which is # entered by user when there is no payment term defined return {'value': {'date_due': self.date_due or date_invoice}} pterm = self.env['account.payment.term'].browse(payment_term_id) pterm_list = pterm.compute(value=1, date_ref=date_invoice)[0] if pterm_list: return {'value': {'date_due': max(line[0] for line in pterm_list)}} else: raise except_orm(_('Insufficient Data!'), _('The payment term of supplier does not have a payment term line.')) @api.multi def onchange_invoice_line(self, lines): return {} @api.multi def onchange_partner_bank(self, partner_bank_id=False): return {'value': {}} @api.multi def onchange_company_id(self, company_id, part_id, type, invoice_line, currency_id): # TODO: add the missing context parameter when forward-porting in trunk # so we can remove this hack! self = self.with_context(self.env['res.users'].context_get()) values = {} domain = {} if company_id and part_id and type: p = self.env['res.partner'].browse(part_id) if p.property_account_payable and p.property_account_receivable and \ p.property_account_payable.company_id.id != company_id and \ p.property_account_receivable.company_id.id != company_id: prop = self.env['ir.property'] rec_dom = [('name', '=', 'property_account_receivable'), ('company_id', '=', company_id)] pay_dom = [('name', '=', 'property_account_payable'), ('company_id', '=', company_id)] res_dom = [('res_id', '=', 'res.partner,%s' % part_id)] rec_prop = prop.search(rec_dom + res_dom) or prop.search(rec_dom) pay_prop = prop.search(pay_dom + res_dom) or prop.search(pay_dom) rec_account = rec_prop.get_by_record(rec_prop) pay_account = pay_prop.get_by_record(pay_prop) if not rec_account and not pay_account: action = self.env.ref('account.action_account_config') msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.') raise RedirectWarning(msg, action.id, _('Go to the configuration panel')) if type in ('out_invoice', 'out_refund'): acc_id = rec_account.id else: acc_id = pay_account.id values= {'account_id': acc_id} if self: if company_id: for line in self.invoice_line: if not line.account_id: continue if line.account_id.company_id.id == company_id: continue accounts = self.env['account.account'].search([('name', '=', line.account_id.name), ('company_id', '=', company_id)]) if not accounts: action = self.env.ref('account.action_account_config') msg = _('Cannot find a chart of accounts for this company, You should configure it. \nPlease go to Account Configuration.') raise RedirectWarning(msg, action.id, _('Go to the configuration panel')) line.write({'account_id': accounts[-1].id}) else: for line_cmd in invoice_line or []: if len(line_cmd) >= 3 and isinstance(line_cmd[2], dict): line = self.env['account.account'].browse(line_cmd[2]['account_id']) if line.company_id.id != company_id: raise except_orm( _('Configuration Error!'), _("Invoice line account's company and invoice's company does not match.") ) if company_id and type: journal_type = TYPE2JOURNAL[type] journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)]) if journals: values['journal_id'] = journals[0].id journal_defaults = self.env['ir.values'].get_defaults_dict('account.invoice', 'type=%s' % type) if 'journal_id' in journal_defaults: values['journal_id'] = journal_defaults['journal_id'] if not values.get('journal_id'): field_desc = journals.fields_get(['type']) type_label = next(t for t, label in field_desc['type']['selection'] if t == journal_type) action = self.env.ref('account.action_account_journal_form') msg = _('Cannot find any account journal of type "%s" for this company, You should create one.\n Please go to Journal Configuration') % type_label raise RedirectWarning(msg, action.id, _('Go to the configuration panel')) domain = {'journal_id': [('id', 'in', journals.ids)]} return {'value': values, 'domain': domain} @api.multi def action_cancel_draft(self): # go from canceled state to draft state self.write({'state': 'draft'}) self.delete_workflow() self.create_workflow() return True @api.one @api.returns('ir.ui.view') def get_formview_id(self): """ Update form view id of action to open the invoice """ if self.type == 'in_invoice': return self.env.ref('account.invoice_supplier_form') else: return self.env.ref('account.invoice_form') @api.multi def move_line_id_payment_get(self): # return the move line ids with the same account as the invoice self if not self.id: return [] query = """ SELECT l.id FROM account_move_line l, account_invoice i WHERE i.id = %s AND l.move_id = i.move_id AND l.account_id = i.account_id """ self._cr.execute(query, (self.id,)) return [row[0] for row in self._cr.fetchall()] @api.multi def test_paid(self): # check whether all corresponding account move lines are reconciled line_ids = self.move_line_id_payment_get() if not line_ids: return False query = "SELECT reconcile_id FROM account_move_line WHERE id IN %s" self._cr.execute(query, (tuple(line_ids),)) return all(row[0] for row in self._cr.fetchall()) @api.multi def button_reset_taxes(self): account_invoice_tax = self.env['account.invoice.tax'] ctx = dict(self._context) for invoice in self: self._cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (invoice.id,)) self.invalidate_cache() partner = invoice.partner_id if partner.lang: ctx['lang'] = partner.lang for taxe in account_invoice_tax.compute(invoice).values(): account_invoice_tax.create(taxe) # dummy write on self to trigger recomputations return self.with_context(ctx).write({'invoice_line': []}) @api.multi def button_compute(self, set_total=False): self.button_reset_taxes() for invoice in self: if set_total: invoice.check_total = invoice.amount_total return True @api.multi def _get_analytic_lines(self): """ Return a list of dict for creating analytic lines for self[0] """ company_currency = self.company_id.currency_id sign = 1 if self.type in ('out_invoice', 'in_refund') else -1 iml = self.env['account.invoice.line'].move_line_get(self.id) for il in iml: if il['account_analytic_id']: if self.type in ('in_invoice', 'in_refund'): ref = self.reference else: ref = self.number if not self.journal_id.analytic_journal_id: raise except_orm(_('No Analytic Journal!'), _("You have to define an analytic journal on the '%s' journal!") % (self.journal_id.name,)) currency = self.currency_id.with_context(date=self.date_invoice) il['analytic_lines'] = [(0,0, { 'name': il['name'], 'date': self.date_invoice, 'account_id': il['account_analytic_id'], 'unit_amount': il['quantity'], 'amount': currency.compute(il['price'], company_currency) * sign, 'product_id': il['product_id'], 'product_uom_id': il['uos_id'], 'general_account_id': il['account_id'], 'journal_id': self.journal_id.analytic_journal_id.id, 'ref': ref, })] return iml @api.multi def action_date_assign(self): for inv in self: res = inv.onchange_payment_term_date_invoice(inv.payment_term.id, inv.date_invoice) if res and res.get('value'): inv.write(res['value']) return True @api.multi def finalize_invoice_move_lines(self, move_lines): """ finalize_invoice_move_lines(move_lines) -> move_lines Hook method to be overridden in additional modules to verify and possibly alter the move lines to be created by an invoice, for special cases. :param move_lines: list of dictionaries with the account.move.lines (as for create()) :return: the (possibly updated) final move_lines to create for this invoice """ return move_lines @api.multi def check_tax_lines(self, compute_taxes): account_invoice_tax = self.env['account.invoice.tax'] company_currency = self.company_id.currency_id if not self.tax_line: for tax in compute_taxes.values(): account_invoice_tax.create(tax) else: tax_key = [] for tax in self.tax_line: if tax.manual: continue key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id) tax_key.append(key) if key not in compute_taxes: raise except_orm(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !')) base = compute_taxes[key]['base'] if abs(base - tax.base) > company_currency.rounding: raise except_orm(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.')) for key in compute_taxes: if key not in tax_key: raise except_orm(_('Warning!'), _('Taxes are missing!\nClick on compute button.')) @api.multi def compute_invoice_totals(self, company_currency, ref, invoice_move_lines): total = 0 total_currency = 0 for line in invoice_move_lines: if self.currency_id != company_currency: currency = self.currency_id.with_context(date=self.date_invoice or fields.Date.context_today(self)) line['currency_id'] = currency.id line['amount_currency'] = line['price'] line['price'] = currency.compute(line['price'], company_currency) else: line['currency_id'] = False line['amount_currency'] = False line['ref'] = ref if self.type in ('out_invoice','in_refund'): total += line['price'] total_currency += line['amount_currency'] or line['price'] line['price'] = - line['price'] else: total -= line['price'] total_currency -= line['amount_currency'] or line['price'] return total, total_currency, invoice_move_lines def inv_line_characteristic_hashcode(self, invoice_line): """Overridable hashcode generation for invoice lines. Lines having the same hashcode will be grouped together if the journal has the 'group line' option. Of course a module can add fields to invoice lines that would need to be tested too before merging lines or not.""" return "%s-%s-%s-%s-%s" % ( invoice_line['account_id'], invoice_line.get('tax_code_id', 'False'), invoice_line.get('product_id', 'False'), invoice_line.get('analytic_account_id', 'False'), invoice_line.get('date_maturity', 'False'), ) def group_lines(self, iml, line): """Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals""" if self.journal_id.group_invoice_lines: line2 = {} for x, y, l in line: tmp = self.inv_line_characteristic_hashcode(l) if tmp in line2: am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit']) line2[tmp]['debit'] = (am > 0) and am or 0.0 line2[tmp]['credit'] = (am < 0) and -am or 0.0 line2[tmp]['tax_amount'] += l['tax_amount'] line2[tmp]['analytic_lines'] += l['analytic_lines'] else: line2[tmp] = l line = [] for key, val in line2.items(): line.append((0,0,val)) return line @api.multi def action_move_create(self): """ Creates invoice related analytics and financial move lines """ account_invoice_tax = self.env['account.invoice.tax'] account_move = self.env['account.move'] for inv in self: if not inv.journal_id.sequence_id: raise except_orm(_('Error!'), _('Please define sequence on the journal related to this invoice.')) if not inv.invoice_line: raise except_orm(_('No Invoice Lines!'), _('Please create some invoice lines.')) if inv.move_id: continue ctx = dict(self._context, lang=inv.partner_id.lang) if not inv.date_invoice: inv.with_context(ctx).write({'date_invoice': fields.Date.context_today(self)}) date_invoice = inv.date_invoice company_currency = inv.company_id.currency_id # create the analytical lines, one move line per invoice line iml = inv._get_analytic_lines() # check if taxes are all computed compute_taxes = account_invoice_tax.compute(inv) inv.check_tax_lines(compute_taxes) # I disabled the check_total feature if self.env['res.users'].has_group('account.group_supplier_inv_check_total'): if inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0): raise except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.')) if inv.payment_term: total_fixed = total_percent = 0 for line in inv.payment_term.line_ids: if line.value == 'fixed': total_fixed += line.value_amount if line.value == 'procent': total_percent += line.value_amount total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0) if (total_fixed + total_percent) > 100: raise except_orm(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.")) # one move line per tax line iml += account_invoice_tax.move_line_get(inv.id) if inv.type in ('in_invoice', 'in_refund'): ref = inv.reference else: ref = inv.number diff_currency = inv.currency_id != company_currency # create one move line for the total and possibly adjust the other lines amount total, total_currency, iml = inv.with_context(ctx).compute_invoice_totals(company_currency, ref, iml) name = inv.name or inv.supplier_invoice_number or '/' totlines = [] if inv.payment_term: totlines = inv.with_context(ctx).payment_term.compute(total, date_invoice)[0] if totlines: res_amount_currency = total_currency ctx['date'] = date_invoice for i, t in enumerate(totlines): if inv.currency_id != company_currency: amount_currency = company_currency.with_context(ctx).compute(t[1], inv.currency_id) else: amount_currency = False # last line: add the diff res_amount_currency -= amount_currency or 0 if i + 1 == len(totlines): amount_currency += res_amount_currency iml.append({ 'type': 'dest', 'name': name, 'price': t[1], 'account_id': inv.account_id.id, 'date_maturity': t[0], 'amount_currency': diff_currency and amount_currency, 'currency_id': diff_currency and inv.currency_id.id, 'ref': ref, }) else: iml.append({ 'type': 'dest', 'name': name, 'price': total, 'account_id': inv.account_id.id, 'date_maturity': inv.date_due, 'amount_currency': diff_currency and total_currency, 'currency_id': diff_currency and inv.currency_id.id, 'ref': ref }) date = date_invoice part = self.env['res.partner']._find_accounting_partner(inv.partner_id) line = [(0, 0, self.line_get_convert(l, part.id, date)) for l in iml] line = inv.group_lines(iml, line) journal = inv.journal_id.with_context(ctx) if journal.centralisation: raise except_orm(_('User Error!'), _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.')) line = inv.finalize_invoice_move_lines(line) move_vals = { 'ref': inv.reference or inv.name, 'line_id': line, 'journal_id': journal.id, 'date': inv.date_invoice, 'narration': inv.comment, 'company_id': inv.company_id.id, } ctx['company_id'] = inv.company_id.id period = inv.period_id if not period: period = period.with_context(ctx).find(date_invoice)[:1] if period: move_vals['period_id'] = period.id for i in line: i[2]['period_id'] = period.id ctx['invoice'] = inv move = account_move.with_context(ctx).create(move_vals) # make the invoice point to that move vals = { 'move_id': move.id, 'period_id': period.id, 'move_name': move.name, } inv.with_context(ctx).write(vals) # Pass invoice in context in method post: used if you want to get the same # account move reference when creating the same invoice after a cancelled one: move.post() self._log_event() return True @api.multi def invoice_validate(self): return self.write({'state': 'open'}) @api.model def line_get_convert(self, line, part, date): return { 'date_maturity': line.get('date_maturity', False), 'partner_id': part, 'name': line['name'][:64], 'date': date, 'debit': line['price']>0 and line['price'], 'credit': line['price']<0 and -line['price'], 'account_id': line['account_id'], 'analytic_lines': line.get('analytic_lines', []), 'amount_currency': line['price']>0 and abs(line.get('amount_currency', False)) or -abs(line.get('amount_currency', False)), 'currency_id': line.get('currency_id', False), 'tax_code_id': line.get('tax_code_id', False), 'tax_amount': line.get('tax_amount', False), 'ref': line.get('ref', False), 'quantity': line.get('quantity',1.00), 'product_id': line.get('product_id', False), 'product_uom_id': line.get('uos_id', False), 'analytic_account_id': line.get('account_analytic_id', False), } @api.multi def action_number(self): #TODO: not correct fix but required a fresh values before reading it. self.write({}) for inv in self: self.write({'internal_number': inv.number}) if inv.type in ('in_invoice', 'in_refund'): if not inv.reference: ref = inv.number else: ref = inv.reference else: ref = inv.number self._cr.execute(""" UPDATE account_move SET ref=%s WHERE id=%s AND (ref IS NULL OR ref = '')""", (ref, inv.move_id.id)) self._cr.execute(""" UPDATE account_move_line SET ref=%s WHERE move_id=%s AND (ref IS NULL OR ref = '')""", (ref, inv.move_id.id)) self._cr.execute(""" UPDATE account_analytic_line SET ref=%s FROM account_move_line WHERE account_move_line.move_id = %s AND account_analytic_line.move_id = account_move_line.id""", (ref, inv.move_id.id)) self.invalidate_cache() return True @api.multi def action_cancel(self): moves = self.env['account.move'] for inv in self: if inv.move_id: moves += inv.move_id if inv.payment_ids: for move_line in inv.payment_ids: if move_line.reconcile_partial_id.line_partial_ids: raise except_orm(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.')) # First, set the invoices as cancelled and detach the move ids self.write({'state': 'cancel', 'move_id': False}) if moves: # second, invalidate the move(s) moves.button_cancel() # delete the move this invoice was pointing to # Note that the corresponding move_lines and move_reconciles # will be automatically deleted too moves.unlink() self._log_event(-1.0, 'Cancel Invoice') return True ################### @api.multi def _log_event(self, factor=1.0, name='Open Invoice'): #TODO: implement messages system return True @api.multi def name_get(self): TYPES = { 'out_invoice': _('Invoice'), 'in_invoice': _('Supplier Invoice'), 'out_refund': _('Refund'), 'in_refund': _('Supplier Refund'), } result = [] for inv in self: result.append((inv.id, "%s %s" % (inv.number or TYPES[inv.type], inv.name or ''))) return result @api.model def name_search(self, name, args=None, operator='ilike', limit=100): args = args or [] recs = self.browse() if name: recs = self.search([('number', '=', name)] + args, limit=limit) if not recs: recs = self.search([('name', operator, name)] + args, limit=limit) return recs.name_get() @api.model def _refund_cleanup_lines(self, lines): """ Convert records to dict of values suitable for one2many line creation :param recordset lines: records to convert :return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...] """ result = [] for line in lines: values = {} for name, field in line._fields.iteritems(): if name in MAGIC_COLUMNS: continue elif field.type == 'many2one': values[name] = line[name].id elif field.type not in ['many2many', 'one2many']: values[name] = line[name] elif name == 'invoice_line_tax_id': values[name] = [(6, 0, line[name].ids)] result.append((0, 0, values)) return result @api.model def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None): """ Prepare the dict of values to create the new refund from the invoice. This method may be overridden to implement custom refund generation (making sure to call super() to establish a clean extension chain). :param record invoice: invoice to refund :param string date: refund creation date from the wizard :param integer period_id: force account.period from the wizard :param string description: description of the refund from the wizard :param integer journal_id: account.journal from the wizard :return: dict of value to create() the refund """ values = {} for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id', 'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']: if invoice._fields[field].type == 'many2one': values[field] = invoice[field].id else: values[field] = invoice[field] or False values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line) tax_lines = filter(lambda l: l.manual, invoice.tax_line) values['tax_line'] = self._refund_cleanup_lines(tax_lines) if journal_id: journal = self.env['account.journal'].browse(journal_id) elif invoice['type'] == 'in_invoice': journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1) else: journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1) values['journal_id'] = journal.id values['type'] = TYPE2REFUND[invoice['type']] values['date_invoice'] = date or fields.Date.context_today(invoice) values['state'] = 'draft' values['number'] = False if period_id: values['period_id'] = period_id if description: values['name'] = description return values @api.multi @api.returns('self') def refund(self, date=None, period_id=None, description=None, journal_id=None): new_invoices = self.browse() for invoice in self: # create the new invoice values = self._prepare_refund(invoice, date=date, period_id=period_id, description=description, journal_id=journal_id) new_invoices += self.create(values) return new_invoices @api.v8 def pay_and_reconcile(self, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=''): # TODO check if we can use different period for payment and the writeoff line assert len(self)==1, "Can only pay one invoice at a time." # Take the seq as name for move SIGN = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1} direction = SIGN[self.type] # take the chosen date date = self._context.get('date_p') or fields.Date.context_today(self) # Take the amount in currency and the currency of the payment if self._context.get('amount_currency') and self._context.get('currency_id'): amount_currency = self._context['amount_currency'] currency_id = self._context['currency_id'] else: amount_currency = False currency_id = False pay_journal = self.env['account.journal'].browse(pay_journal_id) if self.type in ('in_invoice', 'in_refund'): ref = self.reference else: ref = self.number partner = self.partner_id._find_accounting_partner(self.partner_id) name = name or self.invoice_line.name or self.number # Pay attention to the sign for both debit/credit AND amount_currency l1 = { 'name': name, 'debit': direction * pay_amount > 0 and direction * pay_amount, 'credit': direction * pay_amount < 0 and -direction * pay_amount, 'account_id': self.account_id.id, 'partner_id': partner.id, 'ref': ref, 'date': date, 'currency_id': currency_id, 'amount_currency': direction * (amount_currency or 0.0), 'company_id': self.company_id.id, } l2 = { 'name': name, 'debit': direction * pay_amount < 0 and -direction * pay_amount, 'credit': direction * pay_amount > 0 and direction * pay_amount, 'account_id': pay_account_id, 'partner_id': partner.id, 'ref': ref, 'date': date, 'currency_id': currency_id, 'amount_currency': -direction * (amount_currency or 0.0), 'company_id': self.company_id.id, } move = self.env['account.move'].create({ 'ref': ref, 'line_id': [(0, 0, l1), (0, 0, l2)], 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date, }) move_ids = (move | self.move_id).ids self._cr.execute("SELECT id FROM account_move_line WHERE move_id IN %s", (tuple(move_ids),)) lines = self.env['account.move.line'].browse([r[0] for r in self._cr.fetchall()]) lines2rec = lines.browse() total = 0.0 for line in itertools.chain(lines, self.payment_ids): if line.account_id == self.account_id: lines2rec += line total += (line.debit or 0.0) - (line.credit or 0.0) inv_id, name = self.name_get()[0] if not round(total, self.env['decimal.precision'].precision_get('Account')) or writeoff_acc_id: lines2rec.reconcile('manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id) else: code = self.currency_id.symbol # TODO: use currency's formatting function msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \ (pay_amount, code, self.amount_total, code, total, code) self.message_post(body=msg) lines2rec.reconcile_partial('manual') # Update the stored value (fields.function), so we write to trigger recompute return self.write({}) @api.v7 def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''): recs = self.browse(cr, uid, ids, context) return recs.pay_and_reconcile(pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, name=name) class account_invoice_line(models.Model): _name = "account.invoice.line" _description = "Invoice Line" _order = "invoice_id,sequence,id" @api.one @api.depends('price_unit', 'discount', 'invoice_line_tax_id', 'quantity', 'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id') def _compute_price(self): price = self.price_unit * (1 - (self.discount or 0.0) / 100.0) taxes = self.invoice_line_tax_id.compute_all(price, self.quantity, product=self.product_id, partner=self.invoice_id.partner_id) self.price_subtotal = taxes['total'] if self.invoice_id: self.price_subtotal = self.invoice_id.currency_id.round(self.price_subtotal) @api.model def _default_price_unit(self): if not self._context.get('check_total'): return 0 total = self._context['check_total'] for l in self._context.get('invoice_line', []): if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]: vals = l[2] price = vals.get('price_unit', 0) * (1 - vals.get('discount', 0) / 100.0) total = total - (price * vals.get('quantity')) taxes = vals.get('invoice_line_tax_id') if taxes and len(taxes[0]) >= 3 and taxes[0][2]: taxes = self.env['account.tax'].browse(taxes[0][2]) tax_res = taxes.compute_all(price, vals.get('quantity'), product=vals.get('product_id'), partner=self._context.get('partner_id')) for tax in tax_res['taxes']: total = total - tax['amount'] return total @api.model def _default_account(self): # XXX this gets the default account for the user's company, # it should get the default account for the invoice's company # however, the invoice's company does not reach this point if self._context.get('type') in ('out_invoice', 'out_refund'): return self.env['ir.property'].get('property_account_income_categ', 'product.category') else: return self.env['ir.property'].get('property_account_expense_categ', 'product.category') name = fields.Text(string='Description', required=True) origin = fields.Char(string='Source Document', help="Reference of the document that produced this invoice.") sequence = fields.Integer(string='Sequence', default=10, help="Gives the sequence of this line when displaying the invoice.") invoice_id = fields.Many2one('account.invoice', string='Invoice Reference', ondelete='cascade', index=True) uos_id = fields.Many2one('product.uom', string='Unit of Measure', ondelete='set null', index=True) product_id = fields.Many2one('product.product', string='Product', ondelete='set null', index=True) account_id = fields.Many2one('account.account', string='Account', required=True, domain=[('type', 'not in', ['view', 'closed'])], default=_default_account, help="The income or expense account related to the selected product.") price_unit = fields.Float(string='Unit Price', required=True, digits= dp.get_precision('Product Price'), default=_default_price_unit) price_subtotal = fields.Float(string='Amount', digits= dp.get_precision('Account'), store=True, readonly=True, compute='_compute_price') quantity = fields.Float(string='Quantity', digits= dp.get_precision('Product Unit of Measure'), required=True, default=1) discount = fields.Float(string='Discount (%)', digits= dp.get_precision('Discount'), default=0.0) invoice_line_tax_id = fields.Many2many('account.tax', 'account_invoice_line_tax', 'invoice_line_id', 'tax_id', string='Taxes', domain=[('parent_id', '=', False)]) account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account') company_id = fields.Many2one('res.company', string='Company', related='invoice_id.company_id', store=True, readonly=True) partner_id = fields.Many2one('res.partner', string='Partner', related='invoice_id.partner_id', store=True, readonly=True) @api.model def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False): res = super(account_invoice_line, self).fields_view_get( view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu) if self._context.get('type'): doc = etree.XML(res['arch']) for node in doc.xpath("//field[@name='product_id']"): if self._context['type'] in ('in_invoice', 'in_refund'): node.set('domain', "[('purchase_ok', '=', True)]") else: node.set('domain', "[('sale_ok', '=', True)]") res['arch'] = etree.tostring(doc) return res @api.multi def product_id_change(self, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None): context = self._context company_id = company_id if company_id is not None else context.get('company_id', False) self = self.with_context(company_id=company_id, force_company=company_id) if not partner_id: raise except_orm(_('No Partner Defined!'), _("You must first select a partner!")) if not product: if type in ('in_invoice', 'in_refund'): return {'value': {}, 'domain': {'product_uom': []}} else: return {'value': {'price_unit': 0.0}, 'domain': {'product_uom': []}} values = {} part = self.env['res.partner'].browse(partner_id) fpos = self.env['account.fiscal.position'].browse(fposition_id) if part.lang: self = self.with_context(lang=part.lang) product = self.env['product.product'].browse(product) values['name'] = product.partner_ref if type in ('out_invoice', 'out_refund'): account = product.property_account_income or product.categ_id.property_account_income_categ else: account = product.property_account_expense or product.categ_id.property_account_expense_categ account = fpos.map_account(account) if account: values['account_id'] = account.id if type in ('out_invoice', 'out_refund'): taxes = product.taxes_id or account.tax_ids if product.description_sale: values['name'] += '\n' + product.description_sale else: taxes = product.supplier_taxes_id or account.tax_ids if product.description_purchase: values['name'] += '\n' + product.description_purchase taxes = fpos.map_tax(taxes) values['invoice_line_tax_id'] = taxes.ids if type in ('in_invoice', 'in_refund'): values['price_unit'] = price_unit or product.standard_price else: values['price_unit'] = product.list_price values['uos_id'] = uom_id or product.uom_id.id domain = {'uos_id': [('category_id', '=', product.uom_id.category_id.id)]} company = self.env['res.company'].browse(company_id) currency = self.env['res.currency'].browse(currency_id) if company and currency: if company.currency_id != currency: if type in ('in_invoice', 'in_refund'): values['price_unit'] = product.standard_price values['price_unit'] = values['price_unit'] * currency.rate if values['uos_id'] and values['uos_id'] != product.uom_id.id: values['price_unit'] = self.env['product.uom']._compute_price( product.uom_id.id, values['price_unit'], values['uos_id']) return {'value': values, 'domain': domain} @api.multi def uos_id_change(self, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None): context = self._context company_id = company_id if company_id != None else context.get('company_id', False) self = self.with_context(company_id=company_id) result = self.product_id_change( product, uom, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id=company_id, ) warning = {} if not uom: result['value']['price_unit'] = 0.0 if product and uom: prod = self.env['product.product'].browse(product) prod_uom = self.env['product.uom'].browse(uom) if prod.uom_id.category_id != prod_uom.category_id: warning = { 'title': _('Warning!'), 'message': _('The selected unit of measure is not compatible with the unit of measure of the product.'), } result['value']['uos_id'] = prod.uom_id.id if warning: result['warning'] = warning return result @api.model def move_line_get(self, invoice_id): inv = self.env['account.invoice'].browse(invoice_id) currency = inv.currency_id.with_context(date=inv.date_invoice) company_currency = inv.company_id.currency_id res = [] for line in inv.invoice_line: mres = self.move_line_get_item(line) mres['invl_id'] = line.id res.append(mres) tax_code_found = False taxes = line.invoice_line_tax_id.compute_all( (line.price_unit * (1.0 - (line.discount or 0.0) / 100.0)), line.quantity, line.product_id, inv.partner_id)['taxes'] for tax in taxes: if inv.type in ('out_invoice', 'in_invoice'): tax_code_id = tax['base_code_id'] tax_amount = line.price_subtotal * tax['base_sign'] else: tax_code_id = tax['ref_base_code_id'] tax_amount = line.price_subtotal * tax['ref_base_sign'] if tax_code_found: if not tax_code_id: continue res.append(dict(mres)) res[-1]['price'] = 0.0 res[-1]['account_analytic_id'] = False elif not tax_code_id: continue tax_code_found = True res[-1]['tax_code_id'] = tax_code_id res[-1]['tax_amount'] = currency.compute(tax_amount, company_currency) return res @api.model def move_line_get_item(self, line): return { 'type': 'src', 'name': line.name.split('\n')[0][:64], 'price_unit': line.price_unit, 'quantity': line.quantity, 'price': line.price_subtotal, 'account_id': line.account_id.id, 'product_id': line.product_id.id, 'uos_id': line.uos_id.id, 'account_analytic_id': line.account_analytic_id.id, 'taxes': line.invoice_line_tax_id, } # # Set the tax field according to the account and the fiscal position # @api.multi def onchange_account_id(self, product_id, partner_id, inv_type, fposition_id, account_id): if not account_id: return {} unique_tax_ids = [] account = self.env['account.account'].browse(account_id) if not product_id: fpos = self.env['account.fiscal.position'].browse(fposition_id) unique_tax_ids = fpos.map_tax(account.tax_ids).ids else: product_change_result = self.product_id_change(product_id, False, type=inv_type, partner_id=partner_id, fposition_id=fposition_id, company_id=account.company_id.id) if 'invoice_line_tax_id' in product_change_result.get('value', {}): unique_tax_ids = product_change_result['value']['invoice_line_tax_id'] return {'value': {'invoice_line_tax_id': unique_tax_ids}} class account_invoice_tax(models.Model): _name = "account.invoice.tax" _description = "Invoice Tax" _order = 'sequence' @api.one @api.depends('base', 'base_amount', 'amount', 'tax_amount') def _compute_factors(self): self.factor_base = self.base_amount / self.base if self.base else 1.0 self.factor_tax = self.tax_amount / self.amount if self.amount else 1.0 invoice_id = fields.Many2one('account.invoice', string='Invoice Line', ondelete='cascade', index=True) name = fields.Char(string='Tax Description', required=True) account_id = fields.Many2one('account.account', string='Tax Account', required=True, domain=[('type', 'not in', ['view', 'income', 'closed'])]) account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic account') base = fields.Float(string='Base', digits=dp.get_precision('Account')) amount = fields.Float(string='Amount', digits=dp.get_precision('Account')) manual = fields.Boolean(string='Manual', default=True) sequence = fields.Integer(string='Sequence', help="Gives the sequence order when displaying a list of invoice tax.") base_code_id = fields.Many2one('account.tax.code', string='Base Code', help="The account basis of the tax declaration.") base_amount = fields.Float(string='Base Code Amount', digits=dp.get_precision('Account'), default=0.0) tax_code_id = fields.Many2one('account.tax.code', string='Tax Code', help="The tax basis of the tax declaration.") tax_amount = fields.Float(string='Tax Code Amount', digits=dp.get_precision('Account'), default=0.0) company_id = fields.Many2one('res.company', string='Company', related='account_id.company_id', store=True, readonly=True) factor_base = fields.Float(string='Multipication factor for Base code', compute='_compute_factors') factor_tax = fields.Float(string='Multipication factor Tax code', compute='_compute_factors') @api.multi def base_change(self, base, currency_id=False, company_id=False, date_invoice=False): factor = self.factor_base if self else 1 company = self.env['res.company'].browse(company_id) if currency_id and company.currency_id: currency = self.env['res.currency'].browse(currency_id) currency = currency.with_context(date=date_invoice or fields.Date.context_today(self)) base = currency.compute(base * factor, company.currency_id, round=False) return {'value': {'base_amount': base}} @api.multi def amount_change(self, amount, currency_id=False, company_id=False, date_invoice=False): factor = self.factor_tax if self else 1 company = self.env['res.company'].browse(company_id) if currency_id and company.currency_id: currency = self.env['res.currency'].browse(currency_id) currency = currency.with_context(date=date_invoice or fields.Date.context_today(self)) amount = currency.compute(amount * factor, company.currency_id, round=False) return {'value': {'tax_amount': amount}} @api.v8 def compute(self, invoice): tax_grouped = {} currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice)) company_currency = invoice.company_id.currency_id for line in invoice.invoice_line: taxes = line.invoice_line_tax_id.compute_all( (line.price_unit * (1 - (line.discount or 0.0) / 100.0)), line.quantity, line.product_id, invoice.partner_id)['taxes'] for tax in taxes: val = { 'invoice_id': invoice.id, 'name': tax['name'], 'amount': tax['amount'], 'manual': False, 'sequence': tax['sequence'], 'base': currency.round(tax['price_unit'] * line['quantity']), } if invoice.type in ('out_invoice','in_invoice'): val['base_code_id'] = tax['base_code_id'] val['tax_code_id'] = tax['tax_code_id'] val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False) val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False) val['account_id'] = tax['account_collected_id'] or line.account_id.id val['account_analytic_id'] = tax['account_analytic_collected_id'] else: val['base_code_id'] = tax['ref_base_code_id'] val['tax_code_id'] = tax['ref_tax_code_id'] val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False) val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False) val['account_id'] = tax['account_paid_id'] or line.account_id.id val['account_analytic_id'] = tax['account_analytic_paid_id'] # If the taxes generate moves on the same financial account as the invoice line # and no default analytic account is defined at the tax level, propagate the # analytic account from the invoice line to the tax line. This is necessary # in situations were (part of) the taxes cannot be reclaimed, # to ensure the tax move is allocated to the proper analytic account. if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id: val['account_analytic_id'] = line.account_analytic_id.id key = (val['tax_code_id'], val['base_code_id'], val['account_id']) if not key in tax_grouped: tax_grouped[key] = val else: tax_grouped[key]['base'] += val['base'] tax_grouped[key]['amount'] += val['amount'] tax_grouped[key]['base_amount'] += val['base_amount'] tax_grouped[key]['tax_amount'] += val['tax_amount'] for t in tax_grouped.values(): t['base'] = currency.round(t['base']) t['amount'] = currency.round(t['amount']) t['base_amount'] = currency.round(t['base_amount']) t['tax_amount'] = currency.round(t['tax_amount']) return tax_grouped @api.v7 def compute(self, cr, uid, invoice_id, context=None): recs = self.browse(cr, uid, [], context) invoice = recs.env['account.invoice'].browse(invoice_id) return recs.compute(invoice) @api.model def move_line_get(self, invoice_id): res = [] self._cr.execute( 'SELECT * FROM account_invoice_tax WHERE invoice_id = %s', (invoice_id,) ) for row in self._cr.dictfetchall(): if not (row['amount'] or row['tax_code_id'] or row['tax_amount']): continue res.append({ 'type': 'tax', 'name': row['name'], 'price_unit': row['amount'], 'quantity': 1, 'price': row['amount'] or 0.0, 'account_id': row['account_id'], 'tax_code_id': row['tax_code_id'], 'tax_amount': row['tax_amount'], 'account_analytic_id': row['account_analytic_id'], }) return res class res_partner(models.Model): # Inherits partner and adds invoice information in the partner form _inherit = 'res.partner' invoice_ids = fields.One2many('account.invoice', 'partner_id', string='Invoices', readonly=True) def _find_accounting_partner(self, partner): ''' Find the partner for which the accounting entries will be created ''' return partner.commercial_partner_id class mail_compose_message(models.Model): _inherit = 'mail.compose.message' @api.multi def send_mail(self): context = self._context if context.get('default_model') == 'account.invoice' and \ context.get('default_res_id') and context.get('mark_invoice_as_sent'): invoice = self.env['account.invoice'].browse(context['default_res_id']) invoice = invoice.with_context(mail_post_autofollow=True) invoice.write({'sent': True}) invoice.message_post(body=_("Invoice sent")) return super(mail_compose_message, self).send_mail() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
MikeAmy/django
tests/admin_widgets/models.py
227
4760
from __future__ import unicode_literals from django.contrib.auth.models import User from django.db import models from django.utils.encoding import python_2_unicode_compatible class MyFileField(models.FileField): pass @python_2_unicode_compatible class Member(models.Model): name = models.CharField(max_length=100) birthdate = models.DateTimeField(blank=True, null=True) gender = models.CharField(max_length=1, blank=True, choices=[('M', 'Male'), ('F', 'Female')]) email = models.EmailField(blank=True) def __str__(self): return self.name @python_2_unicode_compatible class Band(models.Model): name = models.CharField(max_length=100) style = models.CharField(max_length=20) members = models.ManyToManyField(Member) def __str__(self): return self.name @python_2_unicode_compatible class Album(models.Model): band = models.ForeignKey(Band, models.CASCADE) name = models.CharField(max_length=100) cover_art = models.FileField(upload_to='albums') backside_art = MyFileField(upload_to='albums_back', null=True) def __str__(self): return self.name class HiddenInventoryManager(models.Manager): def get_queryset(self): return super(HiddenInventoryManager, self).get_queryset().filter(hidden=False) @python_2_unicode_compatible class Inventory(models.Model): barcode = models.PositiveIntegerField(unique=True) parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True) name = models.CharField(blank=False, max_length=20) hidden = models.BooleanField(default=False) # see #9258 default_manager = models.Manager() objects = HiddenInventoryManager() def __str__(self): return self.name class Event(models.Model): main_band = models.ForeignKey( Band, models.CASCADE, limit_choices_to=models.Q(pk__gt=0), related_name='events_main_band_at', ) supporting_bands = models.ManyToManyField(Band, blank=True, related_name='events_supporting_band_at') start_date = models.DateField(blank=True, null=True) start_time = models.TimeField(blank=True, null=True) description = models.TextField(blank=True) link = models.URLField(blank=True) min_age = models.IntegerField(blank=True, null=True) @python_2_unicode_compatible class Car(models.Model): owner = models.ForeignKey(User, models.CASCADE) make = models.CharField(max_length=30) model = models.CharField(max_length=30) def __str__(self): return "%s %s" % (self.make, self.model) class CarTire(models.Model): """ A single car tire. This to test that a user can only select their own cars. """ car = models.ForeignKey(Car, models.CASCADE) class Honeycomb(models.Model): location = models.CharField(max_length=20) class Bee(models.Model): """ A model with a FK to a model that won't be registered with the admin (Honeycomb) so the corresponding raw ID widget won't have a magnifying glass link to select related honeycomb instances. """ honeycomb = models.ForeignKey(Honeycomb, models.CASCADE) class Individual(models.Model): """ A model with a FK to itself. It won't be registered with the admin, so the corresponding raw ID widget won't have a magnifying glass link to select related instances (rendering will be called programmatically in this case). """ name = models.CharField(max_length=20) parent = models.ForeignKey('self', models.SET_NULL, null=True) soulmate = models.ForeignKey('self', models.CASCADE, null=True, related_name='soulmates') class Company(models.Model): name = models.CharField(max_length=20) class Advisor(models.Model): """ A model with a m2m to a model that won't be registered with the admin (Company) so the corresponding raw ID widget won't have a magnifying glass link to select related company instances. """ name = models.CharField(max_length=20) companies = models.ManyToManyField(Company) @python_2_unicode_compatible class Student(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: ordering = ('name',) @python_2_unicode_compatible class School(models.Model): name = models.CharField(max_length=255) students = models.ManyToManyField(Student, related_name='current_schools') alumni = models.ManyToManyField(Student, related_name='previous_schools') def __str__(self): return self.name @python_2_unicode_compatible class Profile(models.Model): user = models.ForeignKey('auth.User', models.CASCADE, to_field='username') def __str__(self): return self.user.username
bsd-3-clause
cjhak/b2share
invenio/legacy/websubmit/functions/Send_SRV_Mail.py
13
4192
# This file is part of Invenio. # Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" ## Description: function Send_SRV_Mail ## This function sends an email confirming the revision ## has been carried on with success ## Author: T.Baron ## PARAMETERS: addressesSRV: list of addresses to send this email to. ## categformatDAM: variable used to derive the category of ## the document from its reference. This value might then ## be used to derive the list of addresses ## emailFile: name of the file in which the user's email is ## noteFile: name of the file containing a note from the user import os from invenio.config import CFG_SITE_URL, \ CFG_SITE_NAME, \ CFG_SITE_SUPPORT_EMAIL, \ CFG_SITE_RECORD from invenio.legacy.websubmit.config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN from invenio.ext.email import send_email from invenio.legacy.websubmit.functions.Retrieve_Data import Get_Field def Send_SRV_Mail(parameters, curdir, form, user_info=None): """ This function sends an email to warn people a revision has been carried out. Parameters: * notefile: name of the file in which the note can be found * emailfile: name of the file containing the submitter's email * addressesSRV: email addresses of the people who will receive this email (comma separated list). this parameter may contain the <CATEG> string. In which case the variable computed from the [categformatDAM] parameter replaces this string. eg.:"<CATEG>-email@cern.ch" * categformatDAM: contains a regular expression used to compute the category of the document given the reference of the document. eg.: if [categformatAFP]="TEST-<CATEG>-.*" and the reference of the document is "TEST-CATEGORY1-2001-001", then the computed category equals "CATEGORY1" """ global rn,doctype,sysno # variables declaration FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL) addresses = parameters['addressesSRV'] addresses = addresses.strip() if parameters['emailFile'] is not None and parameters['emailFile']!="" and os.path.exists("%s/%s" % (curdir,parameters['emailFile'])): fp = open("%s/%s" % (curdir,parameters['emailFile']), "r") SuE = fp.read() fp.close() else: SuE = "" SuE = SuE.replace("\n",",") if parameters['noteFile'] is not None and parameters['noteFile']!= "" and os.path.exists("%s/%s" % (curdir,parameters['noteFile'])): fp = open("%s/%s" % (curdir,parameters['noteFile']), "r") note = fp.read() fp.close() else: note = "" title = Get_Field("245__a",sysno) author = Get_Field('100__a',sysno) author += Get_Field('700__a',sysno) # create message message = "A revised version of document %s has been submitted.\n\nTitle: %s\nAuthor(s): %s\nURL: <%s/%s/%s>%s" % (rn,title,author,CFG_SITE_URL,CFG_SITE_RECORD,sysno,note) # send the email send_email(FROMADDR, SuE, "%s revised" % rn, message, copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN) return ""
gpl-2.0
ksmithbaylor/tomorrow-theme
ipythonqt/tomorrownight.py
31
2459
# -*- coding: utf-8 -*- """ pygments.styles.autumn ~~~~~~~~~~~~~~~~~~~~~~ Tomorrow Night theme for ipython qtconsole (invoke with ipython qtconsole --style=tomorrownight) See https://github.com/chriskempson/tomorrow-theme for theme info :copyright: Copyright 2012 André Risnes, risnes@gmail.com :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Text, Punctuation class TomorrownightStyle(Style): """ Tomorrow Night theme for ipython qtconsole """ background_color = '#1d1f21' highlight_color = '#373b41' styles = { Whitespace: background_color, Text: '#c5c8c6', Punctuation: '#81a2be', Comment: '#b5bd68', Comment.Preproc: 'italic #b5bd68', Comment.Special: 'italic #b5bd68', Keyword: '#81a2be', Keyword.Type: '#f0c674', Keyword.Namespace: '#de935f', Operator.Word: '#81a2be', Name: '#de935f', Name.Builtin: '#de935f', Name.Function: '#8abeb7', Name.Class: '#f0c674', Name.Namespace: '#81a2be', Name.Variable: '#de935f', Name.Constant: '#c5c8c6', Name.Entity: 'bold #00aaaa', Name.Attribute: '#de935f', Name.Tag: 'bold #b5bd68', Name.Decorator: '#cc6666', String: '#b5bd68', String.Symbol: '#b5bd68', String.Regex: '#b5bd68', Number: '#cc6666', Generic.Heading: 'bold #c5c8c6', Generic.Subheading: 'bold #c5c8c6', Generic.Deleted: '#de935f', Generic.Inserted: '#8abeb7', Generic.Error: '#cc6666', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#b5bd68', Generic.Output: '#c5c8c6', Generic.Traceback: '#c5c8c6', Error: '#cc6666' }
mit
xinhunbie/NS3-
src/aodv/bindings/callbacks_list.py
22
2145
callback_classes = [ ['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::WifiMacHeader const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::ArpCache const>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], ]
gpl-2.0
janocat/odoo
addons/account_bank_statement_extensions/res_partner_bank.py
381
1582
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class res_partner_bank(osv.osv): _inherit = 'res.partner.bank' def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100): if not args: args = [] ids = [] if name: ids = self.search(cr, user, [('acc_number', operator, name)] + args, limit=limit) else: ids = self.search(cr, user, args, context=context, limit=limit) return self.name_get(cr, user, ids, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
dob71/x2swn
skeinforge/skeinforge_application/skeinforge_plugins/craft_plugins/export_plugins/gcode_step.py
12
9483
""" This page is in the table of contents. Gcode step is an export plugin to convert gcode from float position to number of steps. An export plugin is a script in the export_plugins folder which has the getOutput function, the globalIsReplaceable variable and if it's output is not replaceable, the writeOutput function. It is meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name. The getOutput function of this script takes a gcode text and returns it with the positions converted into number of steps. The writeOutput function of this script takes a gcode text and writes that with the positions converted into number of steps. ==Settings== ===Add Feed Rate Even When Unchanging=== Default is on. When selected, the feed rate will be added even when it did not change from the previous line. ===Add Space Between Words=== Default is on. When selected, a space will be added between each gcode word. ===Add Z Even When Unchanging=== Default is on. When selected, the z word will be added even when it did not change. ===Feed Rate Step Length=== Default is 0.1 millimeters/second. Defines the feed rate step length. ===Offset=== ====X Offset==== Default is zero. Defines the X Offset. ====Y Offset==== Default is zero. Defines the Y Offset. ====Z Offset==== Default is zero. Defines the Z Offset. ===Step Length=== ====E Step Length==== Default is 0.1 millimeters. Defines the E extrusion distance step length. ===Radius Rate Step Length=== Default is 0.1 millimeters/second. Defines the radius step length. ====X Step Length==== Default is 0.1 millimeters. Defines the X axis step length. ====Y Step Length==== Default is 0.1 millimeters. Defines the Y axis step length. ====Z Step Length==== Default is 0.01 millimeters. Defines the Z axis step length. """ from __future__ import absolute_import import __init__ from fabmetheus_utilities import archive from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret from skeinforge_application.skeinforge_utilities import skeinforge_polyfile from skeinforge_application.skeinforge_utilities import skeinforge_profile from struct import Struct import cStringIO import os import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' # This is true if the output is text and false if it is binary. globalIsReplaceable = True def getCharacterIntegerString(character, offset, splitLine, stepLength): 'Get a character and integer string.' floatValue = getFloatFromCharacterSplitLine(character, splitLine) if floatValue == None: return '' floatValue += offset integerValue = int(round(float(floatValue / stepLength))) return character + str(integerValue) def getFloatFromCharacterSplitLine(character, splitLine): 'Get the float after the first occurence of the character in the split line.' lineFromCharacter = gcodec.getStringFromCharacterSplitLine(character, splitLine) if lineFromCharacter == None: return None return float(lineFromCharacter) def getNewRepository(): 'Get new repository.' return GcodeStepRepository() def getOutput(gcodeText, repository=None): 'Get the exported version of a gcode file.' if gcodeText == '': return '' if repository == None: repository = GcodeStepRepository() settings.getReadRepository(repository) return GcodeStepSkein().getCraftedGcode(repository, gcodeText) def writeOutput( fileName, gcodeText = ''): 'Write the exported version of a gcode file.' gcodeText = gcodec.getGcodeFileText(fileName, gcodeText) repository = GcodeStepRepository() settings.getReadRepository(repository) output = getOutput(gcodeText, repository) suffixFileName = fileName[: fileName.rfind('.')] + '_gcode_step.gcode' archive.writeFileText(suffixFileName, output) print('The converted file is saved as ' + archive.getSummarizedFileName(suffixFileName)) class GcodeStepRepository: 'A class to handle the export settings.' def __init__(self): 'Set the default settings, execute title & settings fileName.' skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.export_plugins.gcode_step.html', self) self.addFeedRateEvenWhenUnchanging = settings.BooleanSetting().getFromValue('Add Feed Rate Even When Unchanging', self, True) self.addSpaceBetweenWords = settings.BooleanSetting().getFromValue('Add Space Between Words', self, True) self.addZEvenWhenUnchanging = settings.BooleanSetting().getFromValue('Add Z Even When Unchanging', self, True) self.fileNameInput = settings.FileNameInput().getFromFileName([('Gcode text files', '*.gcode')], 'Open File to be Converted to Gcode Step', self, '') self.feedRateStepLength = settings.FloatSpin().getFromValue(0.0, 'Feed Rate Step Length (millimeters/second)', self, 1.0, 0.1) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Offset -', self ) self.xOffset = settings.FloatSpin().getFromValue(-100.0, 'X Offset (millimeters)', self, 100.0, 0.0) self.yOffset = settings.FloatSpin().getFromValue(-100.0, 'Y Offset (millimeters)', self, 100.0, 0.0) self.zOffset = settings.FloatSpin().getFromValue(-10.0, 'Z Offset (millimeters)', self, 10.0, 0.0) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Step Length -', self ) self.eStepLength = settings.FloatSpin().getFromValue(0.0, 'E Step Length (float)', self, 1.0, 0.1) self.radiusStepLength = settings.FloatSpin().getFromValue(0.0, 'Radius Step Length (millimeters)', self, 1.0, 0.1) self.xStepLength = settings.FloatSpin().getFromValue(0.0, 'X Step Length (millimeters)', self, 1.0, 0.1) self.yStepLength = settings.FloatSpin().getFromValue(0.0, 'Y Step Length (millimeters)', self, 1.0, 0.1) self.zStepLength = settings.FloatSpin().getFromValue(0.0, 'Z Step Length (millimeters)', self, 0.2, 0.01) self.executeTitle = 'Convert to Gcode Step' def execute(self): 'Convert to gcode step button has been clicked.' fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, ['.gcode'], self.fileNameInput.wasCancelled) for fileName in fileNames: writeOutput(fileName) class GcodeStepSkein: 'A class to convert gcode into 16 byte binary segments.' def __init__(self): self.oldFeedRateString = None self.oldZString = None self.output = cStringIO.StringIO() def addCharacterInteger(self, character, lineStringIO, offset, splitLine, stepLength): 'Add a character and integer to line string.' characterIntegerString = getCharacterIntegerString(character, offset, splitLine, stepLength) self.addStringToLine(lineStringIO, characterIntegerString) def addLine(self, line): 'Add a line of text and a newline to the output.' self.output.write(line + '\n') def addStringToLine(self, lineStringIO, wordString): 'Add a character and integer to line string.' if wordString == '': return if self.repository.addSpaceBetweenWords.value: lineStringIO.write(' ') lineStringIO.write(wordString) def getCraftedGcode(self, repository, gcodeText): 'Parse gcode text and store the gcode.' self.repository = repository lines = archive.getTextLines(gcodeText) for line in lines: self.parseLine(line) return self.output.getvalue() def parseLine(self, line): 'Parse a gcode line.' splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if len(firstWord) < 1: return firstLetter = firstWord[0] if firstLetter == '(': return if firstWord != 'G1' and firstWord != 'G2' and firstWord != 'G3': self.addLine(line) return lineStringIO = cStringIO.StringIO() lineStringIO.write(firstWord) self.addCharacterInteger('I', lineStringIO, 0.0, splitLine, self.repository.xStepLength.value) self.addCharacterInteger('J', lineStringIO, 0.0, splitLine, self.repository.yStepLength.value) self.addCharacterInteger('R', lineStringIO, 0.0, splitLine, self.repository.radiusStepLength.value) self.addCharacterInteger('X', lineStringIO, self.repository.xOffset.value, splitLine, self.repository.xStepLength.value) self.addCharacterInteger('Y', lineStringIO, self.repository.yOffset.value, splitLine, self.repository.yStepLength.value) zString = getCharacterIntegerString('Z', self.repository.zOffset.value, splitLine, self.repository.zStepLength.value) feedRateString = getCharacterIntegerString('F', 0.0, splitLine, self.repository.feedRateStepLength.value) if zString != '': if zString != self.oldZString or self.repository.addZEvenWhenUnchanging.value: self.addStringToLine(lineStringIO, zString) if feedRateString != '': if feedRateString != self.oldFeedRateString or self.repository.addFeedRateEvenWhenUnchanging.value: self.addStringToLine(lineStringIO, feedRateString) self.addCharacterInteger('E', lineStringIO, 0.0, splitLine, self.repository.eStepLength.value) self.addLine(lineStringIO.getvalue()) self.oldFeedRateString = feedRateString self.oldZString = zString def main(): 'Display the export dialog.' if len(sys.argv) > 1: writeOutput(' '.join(sys.argv[1 :])) else: settings.startMainLoopFromConstructor(getNewRepository()) if __name__ == '__main__': main()
gpl-3.0
PnX-SI/GeoNature
contrib/gn_module_validation/backend/models.py
1
3824
from sqlalchemy import ForeignKey from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import column_property from sqlalchemy.sql import exists from geonature.core.gn_commons.models import TMedias from utils_flask_sqla.serializers import serializable from utils_flask_sqla_geo.serializers import geoserializable from geonature.utils.env import DB from geoalchemy2 import Geometry @serializable @geoserializable class VSyntheseValidation(DB.Model): __tablename__ = "v_synthese_validation_forwebapp" __table_args__ = {"schema": "gn_commons"} id_synthese = DB.Column( DB.Integer, ForeignKey("gn_synthese.v_synthese_decode_nomenclatures.id_synthese"), primary_key=True, ) unique_id_sinp = DB.Column(UUID(as_uuid=True)) unique_id_sinp_grp = DB.Column(UUID(as_uuid=True)) id_source = DB.Column(DB.Integer) entity_source_pk_value = DB.Column(DB.Integer) id_dataset = DB.Column(DB.Integer) dataset_name = DB.Column(DB.Integer) id_acquisition_framework = DB.Column(DB.Integer) count_min = DB.Column(DB.Integer) count_max = DB.Column(DB.Integer) cd_nom = DB.Column(DB.Integer) cd_ref = DB.Column(DB.Unicode) nom_cite = DB.Column(DB.Unicode) nom_valide = DB.Column(DB.Unicode) nom_vern = DB.Column(DB.Unicode) lb_nom = DB.Column(DB.Unicode) meta_v_taxref = DB.Column(DB.Unicode) sample_number_proof = DB.Column(DB.Unicode) digital_proof = DB.Column(DB.Unicode) non_digital_proof = DB.Column(DB.Unicode) altitude_min = DB.Column(DB.Unicode) altitude_max = DB.Column(DB.Unicode) the_geom_4326 = DB.Column(Geometry("GEOMETRY", 4326)) date_min = DB.Column(DB.DateTime) date_max = DB.Column(DB.DateTime) validator = DB.Column(DB.Unicode) observers = DB.Column(DB.Unicode) determiner = DB.Column(DB.Unicode) id_digitiser = DB.Column(DB.Integer) comment_context = DB.Column(DB.Unicode) comment_description = DB.Column(DB.Unicode) meta_validation_date = DB.Column(DB.DateTime) meta_create_date = DB.Column(DB.DateTime) meta_update_date = DB.Column(DB.DateTime) last_action = DB.Column(DB.Unicode) id_nomenclature_geo_object_nature = DB.Column(DB.Integer) id_nomenclature_info_geo_type = DB.Column(DB.Integer) id_nomenclature_grp_typ = DB.Column(DB.Integer) id_nomenclature_obs_technique = DB.Column(DB.Integer) id_nomenclature_bio_status = DB.Column(DB.Integer) id_nomenclature_bio_condition = DB.Column(DB.Integer) id_nomenclature_naturalness = DB.Column(DB.Integer) id_nomenclature_exist_proof = DB.Column(DB.Integer) id_nomenclature_diffusion_level = DB.Column(DB.Integer) id_nomenclature_life_stage = DB.Column(DB.Integer) id_nomenclature_sex = DB.Column(DB.Integer) id_nomenclature_obj_count = DB.Column(DB.Integer) id_nomenclature_type_count = DB.Column(DB.Integer) id_nomenclature_sensitivity = DB.Column(DB.Integer) id_nomenclature_observation_status = DB.Column(DB.Integer) id_nomenclature_blurring = DB.Column(DB.Integer) id_nomenclature_source_status = DB.Column(DB.Integer) id_nomenclature_source_status = DB.Column(DB.Integer) id_nomenclature_valid_status = DB.Column(DB.Integer) mnemonique = DB.Column(DB.Unicode) cd_nomenclature_validation_status = DB.Column(DB.Unicode) label_default = DB.Column(DB.Unicode) validation_auto = DB.Column(DB.Boolean) validation_date = DB.Column(DB.DateTime) geojson = DB.Column(DB.Unicode) has_medias = column_property( exists([TMedias.id_media]).\ where(TMedias.uuid_attached_row==unique_id_sinp) ) def get_geofeature(self, recursif=False, fields=()): return self.as_geofeature( "the_geom_4326", "id_synthese", recursif, fields=fields )
gpl-3.0
Sixshaman/networkx
networkx/readwrite/tests/test_yaml.py
23
1345
""" Unit tests for yaml. """ import os,tempfile from nose import SkipTest from nose.tools import assert_equal import networkx as nx from networkx.testing import assert_edges_equal, assert_nodes_equal class TestYaml(object): @classmethod def setupClass(cls): global yaml try: import yaml except ImportError: raise SkipTest('yaml not available.') def setUp(self): self.build_graphs() def build_graphs(self): self.G = nx.Graph(name="test") e = [('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')] self.G.add_edges_from(e) self.G.add_node('g') self.DG = nx.DiGraph(self.G) self.MG = nx.MultiGraph() self.MG.add_weighted_edges_from([(1,2,5),(1,2,5),(1,2,1),(3,3,42)]) def assert_equal(self, G, data=False): (fd, fname) = tempfile.mkstemp() nx.write_yaml(G, fname) Gin = nx.read_yaml(fname) assert_nodes_equal(list(G), list(Gin)) assert_edges_equal(G.edges(data=data), Gin.edges(data=data)) os.close(fd) os.unlink(fname) def testUndirected(self): self.assert_equal(self.G, False) def testDirected(self): self.assert_equal(self.DG, False) def testMultiGraph(self): self.assert_equal(self.MG, True)
bsd-3-clause
madscatt/zazzie_1.5
trunk/sassie/simulate/energy_minimization/namd_minimize.py
1
14848
''' SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import os import sys import locale import time import numpy import subprocess from write_namd_input import * from sassie.simulate.energy_minimization.prepend_namd_input import * import sasmol.sasmol as sasmol import sassie.util.sasconfig as sasconfig # NAMD_MINIMIZE # # 12/05/2004 -- initial coding : jc # 01/02/2011 -- added sasmol support : jc # 08/26/2011 -- adapted for mdx : jc # 06/16/2012 -- adapted for namd v. 2.9 : jc # # LC 1 2 3 4 5 6 7 # LC4567890123456789012345678901234567890123456789012345678901234567890123456789 # * ** ''' MINIMIZE is the module that contains the functions that are used to run a series of energy minimization calculations on a set of structures in a supplied pdb/dcd file. This module is called from Structure Minization in the main GUI through the graphical_minimize.py script. REFERENCE: J. C. Phillips et al. Journal of Computational Chemistry 26 1781-1802 (2005) ''' def unpack_variables(variables): runname = variables['runname'][0] infile = variables['infile'][0] pdbfile = variables['pdbfile'][0] outfile = variables['outfile'][0] nsteps = variables['nsteps'][0] parmfile = variables['parmfile'][0] psffile = variables['psffile'][0] ncpu = variables['ncpu'][0] keepout = variables['keepout'][0] dcdfreq = variables['dcdfreq'][0] infiletype = variables['infiletype'][0] md = variables['md'][0] mdsteps = variables['mdsteps'][0] dielect = variables['dielect'][0] temperature = variables['temperature'][0] use_external_input_file = variables['use_external_input_file'][0] external_input_file = variables['external_input_file'][0] velocity_restart_file = variables['velocity_restart_file'][0] extended_system_restart_file = variables['extended_system_restart_file'][0] return runname, infile, pdbfile, outfile, nsteps, parmfile, psffile, ncpu, infiletype, keepout, dcdfreq, md, mdsteps, dielect, temperature, use_external_input_file, external_input_file, velocity_restart_file, extended_system_restart_file def print_failure(message, txtOutput): txtOutput.put("\n\n>>>> RUN FAILURE <<<<\n") txtOutput.put(message) st = ''.join(['=' for x in xrange(60)]) txtOutput.put("\n%s \n" % (st)) time.sleep(1.5) return def minimize(variables, txtOutput): ''' MINIMIZE is the function to read in variables from GUI input and used to run a series of energy minimization calculations on a set of structures in a supplied pdb/dcd file. INPUT: variable descriptions: runname: run_name infile: input pdb or dcd filename pdbfile: input pdb file (reference) nsteps: number of steps parmfile: path nand name of topology file psffile: name of psf file ncpu number of cpu to use keepout: keep output files (0==no, 1==yes) dcdfreq: save individual dcd frequency OUTPUT: txtOutput: TK handler for output to GUI textbox files stored in ~/run_name/energy_minimization directory: outfile: output filename (dcd usually) ''' runname, infile, pdbfile, outfile, nsteps, parmfile, psffile, ncpu, infiletype, keepout, dcdfreq, md, mdsteps, dielect, temperature, use_external_input_file, external_input_file, velocity_restart_file, extended_system_restart_file = unpack_variables( variables) path = runname + '/energy_minimization/' print 'path = ', path print 'infile = ', infile vers = 'version 0.7 : 06/16/12 : jc' direxist = os.path.exists(path) if(direxist == 0): try: result = os.system('mkdir -p ' + path) os.system('cp ' + psffile + ' ' + path) os.system('cp ' + pdbfile + ' ' + path) except: message = 'can not create project directory: ' + path message += '\nstopping here\n' print_failure(message, txtOutput) if(result != 0): message = 'can not create project directory: ' + path message += '\nstopping here\n' print_failure(message, txtOutput) m1 = sasmol.SasMol(0) m1.read_pdb(pdbfile) print 'infiletype = ', infiletype try: if(infile[-3:] == 'dcd'): infiletype = 'dcd' elif(infile[-3:] == 'pdb'): infiletype = 'pdb' except: message = 'input filename is a PDB or DCD file but it must end with ".pdb" or ".dcd" ' message += ' : stopping here' print_failure(message, txtOutput) if(infiletype == 'dcd'): dcdfile = m1.open_dcd_read(infile) nf = dcdfile[2] else: m1.read_pdb(infile) nf = m1.coor()[:, 0, 0].shape[0] print 'number of frames = ', nf # ttxt=time.ctime() ttxt = time.asctime(time.gmtime(time.time())) st = ''.join(['=' for x in xrange(60)]) txtOutput.put("\n%s \n" % (st)) txtOutput.put("DATA FROM RUN: %s \n\n" % (ttxt)) dcdlist = [] coorlist = [] for i in range(nf): print '\nminimizing frame ', i + 1, ' of ', nf print 'minimizing frame ', i + 1, ' of ', nf print 'minimizing frame ', i + 1, ' of ', nf print 'writing temporary PDB file' if(infiletype == 'dcd'): m1.read_dcd_step(dcdfile, i) m1.write_pdb(path + 'junk.pdb', 0, 'w') else: m1.write_pdb(path + 'junk.pdb', i, 'w') print 'writing temporary NAMD input file' if(i < 9): istr = '0000' + str(i + 1) elif(i < 99): istr = '000' + str(i + 1) elif(i < 999): istr = '00' + str(i + 1) elif(i < 9999): istr = '0' + str(i + 1) elif(i < 99999): istr = str(i + 1) else: print 'wow, man!' istr = str(i + 1) thisdcd = path + 'min_' + istr + '.dcd' dcdlist.append(thisdcd) print 'use_external_input_file = ', use_external_input_file print 'type(UEIF) = ', str(type(use_external_input_file)) if use_external_input_file: prepend_namd_input('temp.inp', path + 'junk.pdb', psffile, thisdcd, parmfile, external_input_file, velocity_restart_file, extended_system_restart_file) else: write_namd_input('temp.inp', str(nsteps), str( dcdfreq), path + 'junk.pdb', psffile, thisdcd, parmfile, md, mdsteps, dielect, temperature) print 'starting minimization ( nfiles = ', nf, ')' ttime = time.ctime() runstring = vers + ' : ' + outfile + ' run stated at : ' + ttime print runstring print 'starting namd' bin_path = sasconfig.__bin_path__ + os.sep nst = bin_path + sasconfig.__namd_run_command__ + ' ' + sasconfig.__namd_run_additional_arguments__ + ' +p' + str(ncpu) + ' temp.inp >& junk.out &' p = subprocess.Popen(nst, shell=True, executable='/bin/bash') sts = os.waitpid(p.pid, 0)[1] print 'p.pid = ', p.pid thisjob = str(int(p.pid) + 1) completion_string = sasconfig.__namd_completion_string__ run = 1 esteps = 0 while(run == 1): time.sleep(10) lsst = 'ls junk.out | grep -c "junk.out" ' lsfile = os.popen(lsst, 'r').readlines() stls = string.split(lsfile[0]) nstls = locale.atoi(stls[0]) if(nstls > 0): check_completion = os.popen( 'tail -15 junk.out | grep ' + completion_string, 'r').readlines() fail_check = os.popen( 'tail -200 junk.out | grep "Abort"', 'r').readlines() if len(fail_check) == 0: fail_check = os.popen( 'tail -200 junk.out | grep "FATAL"', 'r').readlines() if len(fail_check) == 0: fail_check = os.popen( 'tail -200 junk.out | grep "ERROR: Exiting prematurely"', 'r').readlines() if(len(check_completion) > 0): print 'finished minimization' run = 0 if(len(fail_check) > 0): message = '\n>>>> investigate error in junk.out file <<<<\n\n' message += "".join(fail_check) + '\n' print_failure(message, txtOutput) return() time.sleep(10) fraction_done = (float(i + 1) / float(nf)) progress_string = 'COMPLETED ' + \ str(i + 1) + ' of ' + str(nf) + ' : ' + \ str(fraction_done * 100.0) + ' % done' print('%s\n' % progress_string) print('%s\n' % progress_string) report_string = 'STATUS\t' + str(fraction_done) txtOutput.put(report_string) if(keepout == 1): os.system('mv junk.out ' + path + 'min_' + istr + '.out') else: os.system('rm -f junk.out') os.system('rm -f junk.coor junk.xs* junk.vel ') try: os.system('rm -f ' + path + 'junk.pdb ') except: print '\n> could not move junk.pdb' os.system('rm -f ' + path + '*.BAK') print 'thisdcd = ', thisdcd temp_mol = sasmol.SasMol(0) temp_mol.read_pdb(pdbfile, fastread=True) header = temp_mol.open_dcd_read(thisdcd) temp_mol.close_dcd_read(header[0]) ndcdfiles = header[2] # get the last configuration from the dcd file if ndcdfiles > 1: temp_mol.read_dcd(thisdcd) nframes = temp_mol.number_of_frames() natoms = temp_mol.natoms() coor = numpy.zeros((1, natoms, 3), numpy.float32) coor[0, :, :] = temp_mol.coor()[nframes - 1] os.system('rm -f ' + thisdcd) temp_mol.setCoor(coor) temp_mol.write_dcd(thisdcd) # temp_mol.write_pdb(thisdcd+'.pdb',0,'w') if ndcdfiles < 1: print 'ndcdfiles = ', ndcdfiles message = 'Did not save any dcd files. Decrease dcd write frequency?' message + ' : stopping here' print_failure(message, txtOutput) sys.exit() print '\n> finished minimizing all frames\n' if(infiletype == 'dcd'): m1.close_dcd_read(dcdfile[0]) final_mol = sasmol.SasMol(0) final_molw = sasmol.SasMol(1) final_mol.read_pdb(pdbfile, fastread=True) final_molw.read_pdb(pdbfile, fastread=True) finaldcdfile = final_molw.open_dcd_write(path + outfile) for i in range(len(dcdlist)): print 'i = ', i final_mol.read_dcd(dcdlist[i]) final_molw.setCoor(final_mol.coor()) final_molw.write_dcd_step(finaldcdfile, 0, i + 1) final_molw.write_pdb(path + outfile + '.pdb', 0, 'w') final_molw.close_dcd_write(finaldcdfile) rmcmd = 'rm -f ' for i in range(len(dcdlist)): rmcmd = rmcmd + dcdlist[i] + ' ' print 'rmcmd = ', rmcmd os.system(rmcmd) os.system('mv temp.inp ' + path) txtOutput.put("Total number of frames = %d\n\n" % (nf)) txtOutput.put("Minimized structures saved to : %s\n" % ('./' + path)) txtOutput.put("\n%s \n" % (st)) time.sleep(0.5) print 'NAMD MINIMIZATION IS DONE' return() if __name__ == '__main__': ### BEGIN USER EDIT ### ### BEGIN USER EDIT ### ### BEGIN USER EDIT ### runname = 'run_0' infile = 'ten_mer.pdb' pdbfile = 'ten_mer.pdb' outfile = 'min_ten_mer.dcd' nsteps = '100' parmfile = '/share/apps/local/bin/toppar/par_all27_prot_na.inp' psffile = 'ten_mer.psf' ncpu = '2' keepout = '1' dcdfreq = '20' infiletype = 'pdb' md = '0' mdsteps = '10' dielect = '80' temperature = '300.0' use_external_input_file = 'True' external_input_file = 'external_input_2.inp' velocity_restart_file = 'False' extended_system_restart_file = 'False' ### END USER EDIT ### ### END USER EDIT ### ### END USER EDIT ### svariables = {} svariables['runname'] = (runname, 'string') svariables['infile'] = (infile, 'string') svariables['pdbfile'] = (pdbfile, 'string') svariables['outfile'] = (outfile, 'string') svariables['nsteps'] = (nsteps, 'int') svariables['parmfile'] = (parmfile, 'string') svariables['psffile'] = (psffile, 'string') svariables['ncpu'] = (ncpu, 'int') svariables['keepout'] = (keepout, 'int') svariables['dcdfreq'] = (dcdfreq, 'int') svariables['infiletype'] = (infiletype, 'string') svariables['md'] = (md, 'int') svariables['mdsteps'] = (mdsteps, 'int') svariables['dielect'] = (dielect, 'float') svariables['temperature'] = (temperature, 'float') svariables['use_external_input_file'] = ( use_external_input_file, 'boolean') svariables['external_input_file'] = (external_input_file, 'string') svariables['velocity_restart_file'] = (velocity_restart_file, 'string') svariables['extended_system_restart_file'] = ( extended_system_restart_file, 'string') import sassie.interface.input_filter as input_filter import sassie.interface.minimize_filter as minimize_filter error, variables = input_filter.type_check_and_convert(svariables) if(len(error) != 0): print 'error = ', error sys.exit() else: error = minimize_filter.check_minimize(variables) if(len(error) != 0): print 'error = ', error sys.exit() import multiprocessing txtQueue = multiprocessing.JoinableQueue() process = multiprocessing.Process( target=minimize, args=(variables, txtQueue)) process.start()
gpl-3.0
mjirayu/sit_academy
lms/djangoapps/shoppingcart/reports.py
70
10708
""" Objects and functions related to generating CSV reports """ from decimal import Decimal import unicodecsv from django.utils.translation import ugettext as _ from courseware.courses import get_course_by_id from course_modes.models import CourseMode from shoppingcart.models import CertificateItem, OrderItem from student.models import CourseEnrollment from util.query import use_read_replica_if_available from xmodule.modulestore.django import modulestore class Report(object): """ Base class for making CSV reports related to revenue, enrollments, etc To make a different type of report, write a new subclass that implements the methods rows and header. """ def __init__(self, start_date, end_date, start_word=None, end_word=None): self.start_date = start_date self.end_date = end_date self.start_word = start_word self.end_word = end_word def rows(self): """ Performs database queries necessary for the report and eturns an generator of lists, in which each list is a separate row of the report. Arguments are start_date (datetime), end_date (datetime), start_word (str), and end_word (str). Date comparisons are start_date <= [date of item] < end_date. """ raise NotImplementedError def header(self): """ Returns the appropriate header based on the report type, in the form of a list of strings. """ raise NotImplementedError def write_csv(self, filelike): """ Given a file object to write to and {start/end date, start/end letter} bounds, generates a CSV report of the appropriate type. """ items = self.rows() writer = unicodecsv.writer(filelike, encoding="utf-8") writer.writerow(self.header()) for item in items: writer.writerow(item) class RefundReport(Report): """ Subclass of Report, used to generate Refund Reports for finance purposes. For each refund between a given start_date and end_date, we find the relevant order number, customer name, date of transaction, date of refund, and any service fees. """ def rows(self): query1 = use_read_replica_if_available( CertificateItem.objects.select_related('user__profile').filter( status="refunded", refund_requested_time__gte=self.start_date, refund_requested_time__lt=self.end_date, ).order_by('refund_requested_time')) query2 = use_read_replica_if_available( CertificateItem.objects.select_related('user__profile').filter( status="refunded", refund_requested_time=None, )) query = query1 | query2 for item in query: yield [ item.order_id, item.user.profile.name, item.fulfilled_time, item.refund_requested_time, item.line_cost, item.service_fee, ] def header(self): return [ _("Order Number"), _("Customer Name"), _("Date of Original Transaction"), _("Date of Refund"), _("Amount of Refund"), _("Service Fees (if any)"), ] class ItemizedPurchaseReport(Report): """ Subclass of Report, used to generate itemized purchase reports. For all purchases (verified certificates, paid course registrations, etc) between a given start_date and end_date, we find that purchase's time, order ID, status, quantity, unit cost, total cost, currency, description, and related comments. """ def rows(self): query = use_read_replica_if_available( OrderItem.objects.filter( status="purchased", fulfilled_time__gte=self.start_date, fulfilled_time__lt=self.end_date, ).order_by("fulfilled_time")) for item in query: yield [ item.fulfilled_time, item.order_id, item.status, item.qty, item.unit_cost, item.line_cost, item.currency, item.line_desc, item.report_comments, ] def header(self): return [ _("Purchase Time"), _("Order ID"), _("Status"), _("Quantity"), _("Unit Cost"), _("Total Cost"), _("Currency"), _("Description"), _("Comments") ] class CertificateStatusReport(Report): """ Subclass of Report, used to generate Certificate Status Reports for Ed Services. For each course in each university whose name is within the range start_word and end_word, inclusive, (i.e., the letter range H-J includes both Ithaca College and Harvard University), we calculate the total enrollment, audit enrollment, honor enrollment, verified enrollment, total gross revenue, gross revenue over the minimum, and total dollars refunded. """ def rows(self): for course_id in course_ids_between(self.start_word, self.end_word): # If the first letter of the university is between start_word and end_word, then we include # it in the report. These comparisons are unicode-safe. cur_course = get_course_by_id(course_id) university = cur_course.org course = cur_course.number + " " + cur_course.display_name_with_default # TODO add term (i.e. Fall 2013)? counts = CourseEnrollment.objects.enrollment_counts(course_id) total_enrolled = counts['total'] audit_enrolled = counts['audit'] honor_enrolled = counts['honor'] if counts['verified'] == 0: verified_enrolled = 0 gross_rev = Decimal(0.00) gross_rev_over_min = Decimal(0.00) else: verified_enrolled = counts['verified'] gross_rev = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'unit_cost') gross_rev_over_min = gross_rev - (CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd') * verified_enrolled) num_verified_over_the_minimum = CertificateItem.verified_certificates_contributing_more_than_minimum(course_id) # should I be worried about is_active here? number_of_refunds = CertificateItem.verified_certificates_count(course_id, 'refunded') if number_of_refunds == 0: dollars_refunded = Decimal(0.00) else: dollars_refunded = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'refunded', 'unit_cost') course_announce_date = "" course_reg_start_date = "" course_reg_close_date = "" registration_period = "" yield [ university, course, course_announce_date, course_reg_start_date, course_reg_close_date, registration_period, total_enrolled, audit_enrolled, honor_enrolled, verified_enrolled, gross_rev, gross_rev_over_min, num_verified_over_the_minimum, number_of_refunds, dollars_refunded ] def header(self): return [ _("University"), _("Course"), _("Course Announce Date"), _("Course Start Date"), _("Course Registration Close Date"), _("Course Registration Period"), _("Total Enrolled"), _("Audit Enrollment"), _("Honor Code Enrollment"), _("Verified Enrollment"), _("Gross Revenue"), _("Gross Revenue over the Minimum"), _("Number of Verified Students Contributing More than the Minimum"), _("Number of Refunds"), _("Dollars Refunded"), ] class UniversityRevenueShareReport(Report): """ Subclass of Report, used to generate University Revenue Share Reports for finance purposes. For each course in each university whose name is within the range start_word and end_word, inclusive, (i.e., the letter range H-J includes both Ithaca College and Harvard University), we calculate the total revenue generated by that particular course. This includes the number of transactions, total payments collected, service fees, number of refunds, and total amount of refunds. """ def rows(self): for course_id in course_ids_between(self.start_word, self.end_word): cur_course = get_course_by_id(course_id) university = cur_course.org course = cur_course.number + " " + cur_course.display_name_with_default total_payments_collected = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'unit_cost') service_fees = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'purchased', 'service_fee') num_refunds = CertificateItem.verified_certificates_count(course_id, "refunded") amount_refunds = CertificateItem.verified_certificates_monetary_field_sum(course_id, 'refunded', 'unit_cost') num_transactions = (num_refunds * 2) + CertificateItem.verified_certificates_count(course_id, "purchased") yield [ university, course, num_transactions, total_payments_collected, service_fees, num_refunds, amount_refunds ] def header(self): return [ _("University"), _("Course"), _("Number of Transactions"), _("Total Payments Collected"), _("Service Fees (if any)"), _("Number of Successful Refunds"), _("Total Amount of Refunds"), ] def course_ids_between(start_word, end_word): """ Returns a list of all valid course_ids that fall alphabetically between start_word and end_word. These comparisons are unicode-safe. """ valid_courses = [] for course in modulestore().get_courses(): course_id = course.id.to_deprecated_string() if start_word.lower() <= course_id.lower() <= end_word.lower(): valid_courses.append(course.id) return valid_courses
agpl-3.0
tuck182/syslog-ng-mod-lumberjack-py
src/lumberjack/client/protocol.py
1
2871
from zope.interface import implementer, implements from twisted.internet import protocol from twisted.internet.protocol import ReconnectingClientFactory from twisted.internet.abstract import _ConsumerMixin import collections import struct import sys import warnings from lumberjack.client.encoder import Encoder class IncompleteResponse(Exception): """ Not enough data to parse a full response. """ class ParseError(Exception): """ The incoming data is not a valid packet. """ # FIXME: Have connection (protocol) manage message sequence, since it's only relevant per-connection class LumberjackProtocol(protocol.Protocol, _ConsumerMixin): connected = 0 disconnected = 0 disconnecting = 0 _remainingData = "" _pendingMessages = collections.OrderedDict() def connectionMade(self): self.compress_and_write(Encoder.int_frame('W', 0)) # window_size try: self.factory.producer.addConsumer(self) except: import traceback traceback.print_exc() print "addConsumer failed: {0}".format(sys.exc_info()[1]) self.producer.resumeProducing() self._remainingData = b"" def connectionLost(self, reason): self.producer.removeConsumer(self) self.sendData = False while self._pendingMessages: sequence, message = self._pendingMessages.popitem(True) try: # FIXME: Shouldn't need to pass message message.handler.errback(message) except: print("Got an error '{0}' trying to NAK message {1}".format(sys.exc_info()[1], sequence)) def write(self, message): # FIXME: Need to handle SSL disconnects self._pendingMessages[message.sequence] = message self.compress_and_write(Encoder.to_frame(message.data, message.sequence)) def compress_and_write(self, data): self.transport.write(Encoder.compress(data)) def dataReceived(self, data): self._remainingData += data while self._remainingData: try: self._consumeData() except IncompleteResponse: break def ackReceived(self, sequence): try: message = self._pendingMessages.pop(sequence) # FIXME: Shouldn't need to pass message message.handler.callback(message) except KeyError: warnings.warn("Received ack for unknown message {0}".format(sequence)) def _consumeData(self): if len(self._remainingData) < 6: raise IncompleteResponse packet, self._remainingData = self._remainingData[0:6], self._remainingData[6:] (_, packet_type, sequence) = struct.unpack("!ccI", packet) if packet_type != 'A': raise ParseError("Whoa we shouldn't get this frame: {0}".format(packet_type)) self.ackReceived(sequence) class LumberjackProtocolFactory(ReconnectingClientFactory): protocol = LumberjackProtocol def __init__(self, producer): self.producer = producer
gpl-2.0
boundlessgeo/QGIS
python/plugins/processing/algs/qgis/TinInterpolation.py
6
8360
# -*- coding: utf-8 -*- """ *************************************************************************** TinInterpolation.py --------------------- Date : October 2016 Copyright : (C) 2016 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'October 2016' __copyright__ = '(C) 2016, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsProcessingUtils, QgsProcessing, QgsProcessingParameterEnum, QgsProcessingParameterNumber, QgsProcessingParameterExtent, QgsProcessingParameterDefinition, QgsProcessingParameterRasterDestination, QgsWkbTypes, QgsProcessingParameterFeatureSink, QgsProcessingException, QgsCoordinateReferenceSystem) from qgis.analysis import (QgsInterpolator, QgsTinInterpolator, QgsGridFileWriter) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm from processing.algs.qgis.ui.InterpolationWidgets import ParameterInterpolationData, ParameterPixelSize pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class TinInterpolation(QgisAlgorithm): INTERPOLATION_DATA = 'INTERPOLATION_DATA' METHOD = 'METHOD' PIXEL_SIZE = 'PIXEL_SIZE' COLUMNS = 'COLUMNS' ROWS = 'ROWS' EXTENT = 'EXTENT' OUTPUT = 'OUTPUT' TRIANGULATION = 'TRIANGULATION' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'interpolation.png')) def group(self): return self.tr('Interpolation') def groupId(self): return 'interpolation' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.METHODS = [self.tr('Linear'), self.tr('Clough-Toucher (cubic)') ] self.addParameter(ParameterInterpolationData(self.INTERPOLATION_DATA, self.tr('Input layer(s)'))) self.addParameter(QgsProcessingParameterEnum(self.METHOD, self.tr('Interpolation method'), options=self.METHODS, defaultValue=0)) self.addParameter(QgsProcessingParameterExtent(self.EXTENT, self.tr('Extent'), optional=False)) pixel_size_param = ParameterPixelSize(self.PIXEL_SIZE, self.tr('Output raster size'), layersData=self.INTERPOLATION_DATA, extent=self.EXTENT, minValue=0.0, default=0.1) self.addParameter(pixel_size_param) cols_param = QgsProcessingParameterNumber(self.COLUMNS, self.tr('Number of columns'), optional=True, minValue=0, maxValue=10000000) cols_param.setFlags(cols_param.flags() | QgsProcessingParameterDefinition.FlagHidden) self.addParameter(cols_param) rows_param = QgsProcessingParameterNumber(self.ROWS, self.tr('Number of rows'), optional=True, minValue=0, maxValue=10000000) rows_param.setFlags(rows_param.flags() | QgsProcessingParameterDefinition.FlagHidden) self.addParameter(rows_param) self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Interpolated'))) triangulation_file_param = QgsProcessingParameterFeatureSink(self.TRIANGULATION, self.tr('Triangulation'), type=QgsProcessing.TypeVectorLine, optional=True) triangulation_file_param.setCreateByDefault(False) self.addParameter(triangulation_file_param) def name(self): return 'tininterpolation' def displayName(self): return self.tr('TIN interpolation') def processAlgorithm(self, parameters, context, feedback): interpolationData = ParameterInterpolationData.parseValue(parameters[self.INTERPOLATION_DATA]) method = self.parameterAsEnum(parameters, self.METHOD, context) bbox = self.parameterAsExtent(parameters, self.EXTENT, context) pixel_size = self.parameterAsDouble(parameters, self.PIXEL_SIZE, context) output = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) columns = self.parameterAsInt(parameters, self.COLUMNS, context) rows = self.parameterAsInt(parameters, self.ROWS, context) if columns == 0: columns = max(round(bbox.width() / pixel_size) + 1, 1) if rows == 0: rows = max(round(bbox.height() / pixel_size) + 1, 1) if interpolationData is None: raise QgsProcessingException( self.tr('You need to specify at least one input layer.')) layerData = [] layers = [] crs = QgsCoordinateReferenceSystem() for row in interpolationData.split('::|::'): v = row.split('::~::') data = QgsInterpolator.LayerData() # need to keep a reference until interpolation is complete layer = QgsProcessingUtils.variantToSource(v[0], context) data.source = layer layers.append(layer) if not crs.isValid(): crs = layer.sourceCrs() data.valueSource = int(v[1]) data.interpolationAttribute = int(v[2]) if v[3] == '0': data.sourceType = QgsInterpolator.SourcePoints elif v[3] == '1': data.sourceType = QgsInterpolator.SourceStructureLines else: data.sourceType = QgsInterpolator.SourceBreakLines layerData.append(data) if method == 0: interpolationMethod = QgsTinInterpolator.Linear else: interpolationMethod = QgsTinInterpolator.CloughTocher (triangulation_sink, triangulation_dest_id) = self.parameterAsSink(parameters, self.TRIANGULATION, context, QgsTinInterpolator.triangulationFields(), QgsWkbTypes.LineString, crs) interpolator = QgsTinInterpolator(layerData, interpolationMethod, feedback) if triangulation_sink is not None: interpolator.setTriangulationSink(triangulation_sink) writer = QgsGridFileWriter(interpolator, output, bbox, columns, rows) writer.writeFile(feedback) return {self.OUTPUT: output, self.TRIANGULATION: triangulation_dest_id}
gpl-2.0
dbacchet/foundation
3rdparty/googletest-release-1.8.0/googlemock/scripts/upload_gmock.py
770
2833
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review. This simple wrapper passes all command line flags and --cc=googlemock@googlegroups.com to upload.py. USAGE: upload_gmock.py [options for upload.py] """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import sys CC_FLAG = '--cc=' GMOCK_GROUP = 'googlemock@googlegroups.com' def main(): # Finds the path to upload.py, assuming it is in the same directory # as this file. my_dir = os.path.dirname(os.path.abspath(__file__)) upload_py_path = os.path.join(my_dir, 'upload.py') # Adds Google Mock discussion group to the cc line if it's not there # already. upload_py_argv = [upload_py_path] found_cc_flag = False for arg in sys.argv[1:]: if arg.startswith(CC_FLAG): found_cc_flag = True cc_line = arg[len(CC_FLAG):] cc_list = [addr for addr in cc_line.split(',') if addr] if GMOCK_GROUP not in cc_list: cc_list.append(GMOCK_GROUP) upload_py_argv.append(CC_FLAG + ','.join(cc_list)) else: upload_py_argv.append(arg) if not found_cc_flag: upload_py_argv.append(CC_FLAG + GMOCK_GROUP) # Invokes upload.py with the modified command line flags. os.execv(upload_py_path, upload_py_argv) if __name__ == '__main__': main()
mit
llgoncalves/harpia
test/model/test_connectionmodel.py
2
1097
from unittest import TestCase from harpia.model.connectionmodel import ConnectionModel from harpia.GUI.diagram import Diagram class TestConnectionModel(TestCase): def setUp(self): """Do the test basic setup.""" diagram = Diagram() self.connection_model = ConnectionModel() # ---------------------------------------------------------------------- def test_get_diagram(self): self.connection_model.get_diagram() # ---------------------------------------------------------------------- def test_type_match(self): self.connection_model.type_match() # ---------------------------------------------------------------------- def test_get_source_port_name(self): self.connection_model.get_source_port_name() # ---------------------------------------------------------------------- def test_get_sink_port_name(self): self.connection_model.get_sink_port_name() # ---------------------------------------------------------------------- def test_set_end(self): self.connection_model.set_end()
gpl-2.0
anielsen001/scipy
scipy/stats/tests/test_morestats.py
4
54238
# Author: Travis Oliphant, 2002 # # Further enhancements and tests added by numerous SciPy developers. # from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy.random import RandomState from numpy.testing import (TestCase, run_module_suite, assert_array_equal, assert_almost_equal, assert_array_less, assert_array_almost_equal, assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns) from scipy import stats from common_tests import check_named_results # Matplotlib is not a scipy dependency but is optionally used in probplot, so # check if it's available try: import matplotlib.pyplot as plt have_matplotlib = True except: have_matplotlib = False g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] class TestBayes_mvs(TestCase): def test_basic(self): # Expected values in this test simply taken from the function. For # some checks regarding correctness of implementation, see review in # gh-674 data = [6, 9, 12, 7, 8, 8, 13] mean, var, std = stats.bayes_mvs(data) assert_almost_equal(mean.statistic, 9.0) assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467), rtol=1e-14) assert_almost_equal(var.statistic, 10.0) assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018), rtol=1e-09) assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14) assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312), rtol=1e-14) def test_empty_input(self): assert_raises(ValueError, stats.bayes_mvs, []) def test_result_attributes(self): x = np.arange(15) attributes = ('statistic', 'minmax') res = stats.bayes_mvs(x) for i in res: check_named_results(i, attributes) class TestMvsdist(TestCase): def test_basic(self): data = [6, 9, 12, 7, 8, 8, 13] mean, var, std = stats.mvsdist(data) assert_almost_equal(mean.mean(), 9.0) assert_allclose(mean.interval(0.9), (7.1036502226125329, 10.896349777387467), rtol=1e-14) assert_almost_equal(var.mean(), 10.0) assert_allclose(var.interval(0.9), (3.1767242068607087, 24.45910381334018), rtol=1e-09) assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14) assert_allclose(std.interval(0.9), (1.7823367265645145, 4.9456146050146312), rtol=1e-14) def test_empty_input(self): assert_raises(ValueError, stats.mvsdist, []) def test_bad_arg(self): # Raise ValueError if fewer than two data points are given. data = [1] assert_raises(ValueError, stats.mvsdist, data) def test_warns(self): # regression test for gh-5270 # make sure there are no spurious divide-by-zero warnings with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) [x.mean() for x in stats.mvsdist([1, 2, 3])] [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])] class TestShapiro(TestCase): def test_basic(self): x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, 4.43, 0.21, 4.75, 0.71, 1.52, 3.24, 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66] w, pw = stats.shapiro(x1) assert_almost_equal(w, 0.90047299861907959, 6) assert_almost_equal(pw, 0.042089745402336121, 6) x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, 3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69, 0.08, 3.67, 2.81, 3.49] w, pw = stats.shapiro(x2) assert_almost_equal(w, 0.9590270, 6) assert_almost_equal(pw, 0.52460, 3) # Verified against R np.random.seed(12345678) x3 = stats.norm.rvs(loc=5, scale=3, size=100) w, pw = stats.shapiro(x3) assert_almost_equal(w, 0.9772805571556091, decimal=6) assert_almost_equal(pw, 0.08144091814756393, decimal=3) # Extracted from original paper x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614, 0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206, 3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351] W_expected = 0.83467 p_expected = 0.000914 w, pw = stats.shapiro(x4) assert_almost_equal(w, W_expected, decimal=4) assert_almost_equal(pw, p_expected, decimal=5) def test_2d(self): x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, 4.43, 0.21, 4.75], [0.71, 1.52, 3.24, 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]] w, pw = stats.shapiro(x1) assert_almost_equal(w, 0.90047299861907959, 6) assert_almost_equal(pw, 0.042089745402336121, 6) x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, 3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69, 0.08, 3.67, 2.81, 3.49]] w, pw = stats.shapiro(x2) assert_almost_equal(w, 0.9590270, 6) assert_almost_equal(pw, 0.52460, 3) def test_empty_input(self): assert_raises(ValueError, stats.shapiro, []) assert_raises(ValueError, stats.shapiro, [[], [], []]) def test_not_enough_values(self): assert_raises(ValueError, stats.shapiro, [1, 2]) assert_raises(ValueError, stats.shapiro, [[], [2]]) def test_bad_arg(self): # Length of x is less than 3. x = [1] assert_raises(ValueError, stats.shapiro, x) def test_nan_input(self): x = np.arange(10.) x[9] = np.nan w, pw = stats.shapiro(x) assert_equal(w, np.nan) assert_almost_equal(pw, 1.0) class TestAnderson(TestCase): def test_normal(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A, crit, sig = stats.anderson(x1) assert_array_less(crit[:-1], A) A, crit, sig = stats.anderson(x2) assert_array_less(A, crit[-2:]) v = np.ones(10) v[0] = 0 A, crit, sig = stats.anderson(v) # The expected statistic 3.208057 was computed independently of scipy. # For example, in R: # > library(nortest) # > v <- rep(1, 10) # > v[1] <- 0 # > result <- ad.test(v) # > result$statistic # A # 3.208057 assert_allclose(A, 3.208057) def test_expon(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A, crit, sig = stats.anderson(x1, 'expon') assert_array_less(A, crit[-2:]) olderr = np.seterr(all='ignore') try: A, crit, sig = stats.anderson(x2, 'expon') finally: np.seterr(**olderr) assert_(A > crit[-1]) def test_gumbel(self): # Regression test for gh-6306. Before that issue was fixed, # this case would return a2=inf. v = np.ones(100) v[0] = 0.0 a2, crit, sig = stats.anderson(v, 'gumbel') # A brief reimplementation of the calculation of the statistic. n = len(v) xbar, s = stats.gumbel_l.fit(v) logcdf = stats.gumbel_l.logcdf(v, xbar, s) logsf = stats.gumbel_l.logsf(v, xbar, s) i = np.arange(1, n+1) expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1])) assert_allclose(a2, expected_a2) def test_bad_arg(self): assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') def test_result_attributes(self): rs = RandomState(1234567890) x = rs.standard_exponential(size=50) res = stats.anderson(x) attributes = ('statistic', 'critical_values', 'significance_level') check_named_results(res, attributes) def test_gumbel_l(self): # gh-2592, gh-6337 # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. rs = RandomState(1234567890) x = rs.gumbel(size=100) A1, crit1, sig1 = stats.anderson(x, 'gumbel') A2, crit2, sig2 = stats.anderson(x, 'gumbel_l') assert_allclose(A2, A1) def test_gumbel_r(self): # gh-2592, gh-6337 # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. rs = RandomState(1234567890) x1 = rs.gumbel(size=100) x2 = np.ones(100) A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r') A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r') assert_array_less(A1, crit1[-2:]) assert_(A2 > crit2[-1]) class TestAndersonKSamp(TestCase): def test_example1a(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass a mixture of lists and arrays t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4), midrank=False) with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) assert_almost_equal(Tk, 4.449, 3) assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], tm, 4) assert_almost_equal(p, 0.0021, 4) def test_example1b(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass arrays t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]) t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True) assert_almost_equal(Tk, 4.480, 3) assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], tm, 4) assert_almost_equal(p, 0.0020, 4) def test_example2a(self): # Example data taken from an earlier technical report of # Scholz and Stephens # Pass lists instead of arrays t1 = [194, 15, 41, 29, 33, 181] t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, 118, 25, 156, 310, 76, 26, 44, 23, 62] t5 = [130, 208, 70, 101, 208] t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, 22, 139, 210, 97, 30, 23, 13, 14] t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] t12 = [50, 254, 5, 283, 35, 12] t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, 61, 34] with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14), midrank=False) assert_almost_equal(Tk, 3.288, 3) assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], tm, 4) assert_almost_equal(p, 0.0041, 4) def test_example2b(self): # Example data taken from an earlier technical report of # Scholz and Stephens t1 = [194, 15, 41, 29, 33, 181] t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, 118, 25, 156, 310, 76, 26, 44, 23, 62] t5 = [130, 208, 70, 101, 208] t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, 22, 139, 210, 97, 30, 23, 13, 14] t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] t12 = [50, 254, 5, 283, 35, 12] t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, 61, 34] with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14), midrank=True) assert_almost_equal(Tk, 3.294, 3) assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], tm, 4) assert_almost_equal(p, 0.0041, 4) def test_not_enough_samples(self): assert_raises(ValueError, stats.anderson_ksamp, np.ones(5)) def test_no_distinct_observations(self): assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), np.ones(5))) def test_empty_sample(self): assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), [])) def test_result_attributes(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass a mixture of lists and arrays t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='approximate p-value') res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) attributes = ('statistic', 'critical_values', 'significance_level') check_named_results(res, attributes) class TestAnsari(TestCase): def test_small(self): x = [1, 2, 3, 3, 4] y = [3, 2, 6, 1, 6, 1, 4, 1] with warnings.catch_warnings(record=True): # Ties preclude use ... W, pval = stats.ansari(x, y) assert_almost_equal(W, 23.5, 11) assert_almost_equal(pval, 0.13499256881897437, 11) def test_approx(self): ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)) with warnings.catch_warnings(): warnings.filterwarnings('ignore', message="Ties preclude use of exact statistic.") W, pval = stats.ansari(ramsay, parekh) assert_almost_equal(W, 185.5, 11) assert_almost_equal(pval, 0.18145819972867083, 11) def test_exact(self): W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12]) assert_almost_equal(W, 10.0, 11) assert_almost_equal(pval, 0.533333333333333333, 7) def test_bad_arg(self): assert_raises(ValueError, stats.ansari, [], [1]) assert_raises(ValueError, stats.ansari, [1], []) def test_result_attributes(self): x = [1, 2, 3, 3, 4] y = [3, 2, 6, 1, 6, 1, 4, 1] with warnings.catch_warnings(record=True): # Ties preclude use ... res = stats.ansari(x, y) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) class TestBartlett(TestCase): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] T, pval = stats.bartlett(*args) assert_almost_equal(T, 20.78587342806484, 7) assert_almost_equal(pval, 0.0136358632781, 7) def test_bad_arg(self): # Too few args raises ValueError. assert_raises(ValueError, stats.bartlett, [1]) def test_result_attributes(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] res = stats.bartlett(*args) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_empty_arg(self): args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, []) assert_equal((np.nan, np.nan), stats.bartlett(*args)) class TestLevene(TestCase): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] W, pval = stats.levene(*args) assert_almost_equal(W, 1.7059176930008939, 7) assert_almost_equal(pval, 0.0990829755522, 7) def test_trimmed1(self): # Test that center='trimmed' gives the same result as center='mean' # when proportiontocut=0. W1, pval1 = stats.levene(g1, g2, g3, center='mean') W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] np.random.seed(1234) x2 = np.random.permutation(x) # Use center='trimmed' W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125) W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(W0, W2) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_equal_mean_median(self): x = np.linspace(-1, 1, 21) np.random.seed(1234) x2 = np.random.permutation(x) y = x**3 W1, pval1 = stats.levene(x, y, center='mean') W2, pval2 = stats.levene(x2, y, center='median') assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1, 1, 21) assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1, 1, 21) assert_raises(ValueError, stats.levene, x, x, center='trim') def test_too_few_args(self): assert_raises(ValueError, stats.levene, [1]) def test_result_attributes(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] res = stats.levene(*args) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) class TestBinomP(TestCase): def test_data(self): pval = stats.binom_test(100, 250) assert_almost_equal(pval, 0.0018833009350757682, 11) pval = stats.binom_test(201, 405) assert_almost_equal(pval, 0.92085205962670713, 11) pval = stats.binom_test([682, 243], p=3.0/4) assert_almost_equal(pval, 0.38249155957481695, 11) def test_bad_len_x(self): # Length of x must be 1 or 2. assert_raises(ValueError, stats.binom_test, [1, 2, 3]) def test_bad_n(self): # len(x) is 1, but n is invalid. # Missing n assert_raises(ValueError, stats.binom_test, [100]) # n less than x[0] assert_raises(ValueError, stats.binom_test, [100], n=50) def test_bad_p(self): assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0) def test_alternatives(self): res = stats.binom_test(51, 235, p=1./6, alternative='less') assert_almost_equal(res, 0.982022657605858) res = stats.binom_test(51, 235, p=1./6, alternative='greater') assert_almost_equal(res, 0.02654424571169085) res = stats.binom_test(51, 235, p=1./6, alternative='two-sided') assert_almost_equal(res, 0.0437479701823997) class TestFligner(TestCase): def test_data(self): # numbers from R: fligner.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.fligner(x1, x1**2), (3.2282229927203536, 0.072379187848207877), 11) def test_trimmed1(self): # Test that center='trimmed' gives the same result as center='mean' # when proportiontocut=0. Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean') Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0) assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] # Use center='trimmed' Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) # The following test looks reasonable at first, but fligner() uses the # function stats.rankdata(), and in one of the cases in this test, # there are ties, while in the other (because of normal rounding # errors) there are not. This difference leads to differences in the # third significant digit of W. # #def test_equal_mean_median(self): # x = np.linspace(-1,1,21) # y = x**3 # W1, pval1 = stats.fligner(x, y, center='mean') # W2, pval2 = stats.fligner(x, y, center='median') # assert_almost_equal(W1, W2) # assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1, 1, 21) assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1, 1, 21) assert_raises(ValueError, stats.fligner, x, x, center='trim') def test_bad_num_args(self): # Too few args raises ValueError. assert_raises(ValueError, stats.fligner, [1]) def test_empty_arg(self): x = np.arange(5) assert_equal((np.nan, np.nan), stats.fligner(x, x**2, [])) class TestMood(TestCase): def test_mood(self): # numbers from R: mood.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.mood(x1, x1**2), (-1.3830857299399906, 0.16663858066771478), 11) def test_mood_order_of_args(self): # z should change sign when the order of arguments changes, pvalue # should not change np.random.seed(1234) x1 = np.random.randn(10, 1) x2 = np.random.randn(15, 1) z1, p1 = stats.mood(x1, x2) z2, p2 = stats.mood(x2, x1) assert_array_almost_equal([z1, p1], [-z2, p2]) def test_mood_with_axis_none(self): # Test with axis = None, compare with results from R x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047, 1.59528080213779, 0.329507771815361, -0.820468384118015, 0.487429052428485, 0.738324705129217, 0.575781351653492, -0.305388387156356, 1.51178116845085, 0.389843236411431, -0.621240580541804, -2.2146998871775, 1.12493091814311, -0.0449336090152309, -0.0161902630989461, 0.943836210685299, 0.821221195098089, 0.593901321217509] x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882, -1.13037567424629, -0.0802517565509893, 0.132420284381094, 0.707954729271733, -0.23969802417184, 1.98447393665293, -0.138787012119665, 0.417650750792556, 0.981752777463662, -0.392695355503813, -1.03966897694891, 1.78222896030858, -2.31106908460517, 0.878604580921265, 0.035806718015226, 1.01282869212708, 0.432265154539617, 2.09081920524915, -1.19992581964387, 1.58963820029007, 1.95465164222325, 0.00493777682814261, -2.45170638784613, 0.477237302613617, -0.596558168631403, 0.792203270299649, 0.289636710177348] x1 = np.array(x1) x2 = np.array(x2) x1.shape = (10, 2) x2.shape = (15, 2) assert_array_almost_equal(stats.mood(x1, x2, axis=None), [-1.31716607555, 0.18778296257]) def test_mood_2d(self): # Test if the results of mood test in 2-D case are consistent with the # R result for the same inputs. Numbers from R mood.test(). ny = 5 np.random.seed(1234) x1 = np.random.randn(10, ny) x2 = np.random.randn(15, ny) z_vectest, pval_vectest = stats.mood(x1, x2) for j in range(ny): assert_array_almost_equal([z_vectest[j], pval_vectest[j]], stats.mood(x1[:, j], x2[:, j])) # inverse order of dimensions x1 = x1.transpose() x2 = x2.transpose() z_vectest, pval_vectest = stats.mood(x1, x2, axis=1) for i in range(ny): # check axis handling is self consistent assert_array_almost_equal([z_vectest[i], pval_vectest[i]], stats.mood(x1[i, :], x2[i, :])) def test_mood_3d(self): shape = (10, 5, 6) np.random.seed(1234) x1 = np.random.randn(*shape) x2 = np.random.randn(*shape) for axis in range(3): z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis) # Tests that result for 3-D arrays is equal to that for the # same calculation on a set of 1-D arrays taken from the # 3-D array axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis for i in range(shape[axes_idx[axis][0]]): for j in range(shape[axes_idx[axis][1]]): if axis == 0: slice1 = x1[:, i, j] slice2 = x2[:, i, j] elif axis == 1: slice1 = x1[i, :, j] slice2 = x2[i, :, j] else: slice1 = x1[i, j, :] slice2 = x2[i, j, :] assert_array_almost_equal([z_vectest[i, j], pval_vectest[i, j]], stats.mood(slice1, slice2)) def test_mood_bad_arg(self): # Raise ValueError when the sum of the lengths of the args is # less than 3 assert_raises(ValueError, stats.mood, [1], []) class TestProbplot(TestCase): def test_basic(self): np.random.seed(12345) x = stats.norm.rvs(size=20) osm, osr = stats.probplot(x, fit=False) osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575, -0.73908135, -0.5857176, -0.44506467, -0.31273668, -0.18568928, -0.06158146, 0.06158146, 0.18568928, 0.31273668, 0.44506467, 0.5857176, 0.73908135, 0.91222575, 1.11829229, 1.38768012, 1.8241636] assert_allclose(osr, np.sort(x)) assert_allclose(osm, osm_expected) res, res_fit = stats.probplot(x, fit=True) res_fit_expected = [1.05361841, 0.31297795, 0.98741609] assert_allclose(res_fit, res_fit_expected) def test_sparams_keyword(self): np.random.seed(123456) x = stats.norm.rvs(size=100) # Check that None, () and 0 (loc=0, for normal distribution) all work # and give the same results osm1, osr1 = stats.probplot(x, sparams=None, fit=False) osm2, osr2 = stats.probplot(x, sparams=0, fit=False) osm3, osr3 = stats.probplot(x, sparams=(), fit=False) assert_allclose(osm1, osm2) assert_allclose(osm1, osm3) assert_allclose(osr1, osr2) assert_allclose(osr1, osr3) # Check giving (loc, scale) params for normal distribution osm, osr = stats.probplot(x, sparams=(), fit=False) def test_dist_keyword(self): np.random.seed(12345) x = stats.norm.rvs(size=20) osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,)) osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,)) assert_allclose(osm1, osm2) assert_allclose(osr1, osr2) assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name') assert_raises(AttributeError, stats.probplot, x, dist=[]) class custom_dist(object): """Some class that looks just enough like a distribution.""" def ppf(self, q): return stats.norm.ppf(q, loc=2) osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False) osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False) assert_allclose(osm1, osm2) assert_allclose(osr1, osr2) @dec.skipif(not have_matplotlib) def test_plot_kwarg(self): np.random.seed(7654321) fig = plt.figure() fig.add_subplot(111) x = stats.t.rvs(3, size=100) res1, fitres1 = stats.probplot(x, plot=plt) plt.close() res2, fitres2 = stats.probplot(x, plot=None) res3 = stats.probplot(x, fit=False, plot=plt) plt.close() res4 = stats.probplot(x, fit=False, plot=None) # Check that results are consistent between combinations of `fit` and # `plot` keywords. assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2) assert_allclose(res1, res2) assert_allclose(res1, res3) assert_allclose(res1, res4) assert_allclose(fitres1, fitres2) # Check that a Matplotlib Axes object is accepted fig = plt.figure() ax = fig.add_subplot(111) stats.probplot(x, fit=False, plot=ax) plt.close() def test_probplot_bad_args(self): # Raise ValueError when given an invalid distribution. assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp") def test_empty(self): assert_equal(stats.probplot([], fit=False), (np.array([]), np.array([]))) assert_equal(stats.probplot([], fit=True), ((np.array([]), np.array([])), (np.nan, np.nan, 0.0))) def test_array_of_size_one(self): with np.errstate(invalid='ignore'): assert_equal(stats.probplot([1], fit=True), ((np.array([0.]), np.array([1])), (np.nan, np.nan, 0.0))) def test_wilcoxon_bad_arg(): # Raise ValueError when two args of different lengths are given or # zero_method is unknown. assert_raises(ValueError, stats.wilcoxon, [1], [1, 2]) assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy") def test_wilcoxon_arg_type(): # Should be able to accept list as arguments. # Address issue 6070. arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2] _ = stats.wilcoxon(arr, zero_method="pratt") _ = stats.wilcoxon(arr, zero_method="zsplit") _ = stats.wilcoxon(arr, zero_method="wilcox") class TestKstat(TestCase): def test_moments_normal_distribution(self): np.random.seed(32149) data = np.random.randn(12345) moments = [] for n in [1, 2, 3, 4]: moments.append(stats.kstat(data, n)) expected = [0.011315, 1.017931, 0.05811052, 0.0754134] assert_allclose(moments, expected, rtol=1e-4) # test equivalence with `stats.moment` m1 = stats.moment(data, moment=1) m2 = stats.moment(data, moment=2) m3 = stats.moment(data, moment=3) assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2) def test_empty_input(self): assert_raises(ValueError, stats.kstat, []) def test_nan_input(self): data = np.arange(10.) data[6] = np.nan assert_equal(stats.kstat(data), np.nan) def test_kstat_bad_arg(self): # Raise ValueError if n > 4 or n < 1. data = np.arange(10) for n in [0, 4.001]: assert_raises(ValueError, stats.kstat, data, n=n) class TestKstatVar(TestCase): def test_empty_input(self): assert_raises(ValueError, stats.kstatvar, []) def test_nan_input(self): data = np.arange(10.) data[6] = np.nan assert_equal(stats.kstat(data), np.nan) def test_bad_arg(self): # Raise ValueError is n is not 1 or 2. data = [1] n = 10 assert_raises(ValueError, stats.kstatvar, data, n=n) class TestPpccPlot(TestCase): def setUp(self): np.random.seed(7654321) self.x = stats.loggamma.rvs(5, size=500) + 5 def test_basic(self): N = 5 svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N) ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298] assert_allclose(svals, np.linspace(-10, 10, num=N)) assert_allclose(ppcc, ppcc_expected) def test_dist(self): # Test that we can specify distributions both by name and as objects. svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda') svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda) assert_allclose(svals1, svals2, rtol=1e-20) assert_allclose(ppcc1, ppcc2, rtol=1e-20) # Test that 'tukeylambda' is the default dist svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10) assert_allclose(svals1, svals3, rtol=1e-20) assert_allclose(ppcc1, ppcc3, rtol=1e-20) @dec.skipif(not have_matplotlib) def test_plot_kwarg(self): # Check with the matplotlib.pyplot module fig = plt.figure() fig.add_subplot(111) stats.ppcc_plot(self.x, -20, 20, plot=plt) plt.close() # Check that a Matplotlib Axes object is accepted fig.add_subplot(111) ax = fig.add_subplot(111) stats.ppcc_plot(self.x, -20, 20, plot=ax) plt.close() def test_invalid_inputs(self): # `b` has to be larger than `a` assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0) # Raise ValueError when given an invalid distribution. assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1, dist="plate_of_shrimp") def test_empty(self): # For consistency with probplot return for one empty array, # ppcc contains all zeros and svals is the same as for normal array # input. svals, ppcc = stats.ppcc_plot([], 0, 1) assert_allclose(svals, np.linspace(0, 1, num=80)) assert_allclose(ppcc, np.zeros(80, dtype=float)) class TestPpccMax(TestCase): def test_ppcc_max_bad_arg(self): # Raise ValueError when given an invalid distribution. data = [1] assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") def test_ppcc_max_basic(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5) def test_dist(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 # Test that we can specify distributions both by name and as objects. max1 = stats.ppcc_max(x, dist='tukeylambda') max2 = stats.ppcc_max(x, dist=stats.tukeylambda) assert_almost_equal(max1, -0.71215366521264145, decimal=5) assert_almost_equal(max2, -0.71215366521264145, decimal=5) # Test that 'tukeylambda' is the default dist max3 = stats.ppcc_max(x) assert_almost_equal(max3, -0.71215366521264145, decimal=5) def test_brack(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5)) # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)), -0.71215366521264145, decimal=5) # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)), -0.71215366521264145, decimal=5) class TestBoxcox_llf(TestCase): def test_basic(self): np.random.seed(54321) x = stats.norm.rvs(size=10000, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2)) assert_allclose(llf, llf_expected) def test_array_like(self): np.random.seed(54321) x = stats.norm.rvs(size=100, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf2 = stats.boxcox_llf(lmbda, list(x)) assert_allclose(llf, llf2, rtol=1e-12) def test_2d_input(self): # Note: boxcox_llf() was already working with 2-D input (sort of), so # keep it like that. boxcox() doesn't work with 2-D input though, due # to brent() returning a scalar. np.random.seed(54321) x = stats.norm.rvs(size=100, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T) assert_allclose([llf, llf], llf2, rtol=1e-12) def test_empty(self): assert_(np.isnan(stats.boxcox_llf(1, []))) class TestBoxcox(TestCase): def test_fixed_lmbda(self): np.random.seed(12345) x = stats.loggamma.rvs(5, size=50) + 5 xt = stats.boxcox(x, lmbda=1) assert_allclose(xt, x - 1) xt = stats.boxcox(x, lmbda=-1) assert_allclose(xt, 1 - 1/x) xt = stats.boxcox(x, lmbda=0) assert_allclose(xt, np.log(x)) # Also test that array_like input works xt = stats.boxcox(list(x), lmbda=0) assert_allclose(xt, np.log(x)) def test_lmbda_None(self): np.random.seed(1234567) # Start from normal rv's, do inverse transform to check that # optimization function gets close to the right answer. np.random.seed(1245) lmbda = 2.5 x = stats.norm.rvs(loc=10, size=50000) x_inv = (x * lmbda + 1)**(-lmbda) xt, maxlog = stats.boxcox(x_inv) assert_almost_equal(maxlog, -1 / lmbda, decimal=2) def test_alpha(self): np.random.seed(1234) x = stats.loggamma.rvs(5, size=50) + 5 # Some regular values for alpha, on a small sample size _, _, interval = stats.boxcox(x, alpha=0.75) assert_allclose(interval, [4.004485780226041, 5.138756355035744]) _, _, interval = stats.boxcox(x, alpha=0.05) assert_allclose(interval, [1.2138178554857557, 8.209033272375663]) # Try some extreme values, see we don't hit the N=500 limit x = stats.loggamma.rvs(7, size=500) + 15 _, _, interval = stats.boxcox(x, alpha=0.001) assert_allclose(interval, [0.3988867, 11.40553131]) _, _, interval = stats.boxcox(x, alpha=0.999) assert_allclose(interval, [5.83316246, 5.83735292]) def test_boxcox_bad_arg(self): # Raise ValueError if any data value is negative. x = np.array([-1]) assert_raises(ValueError, stats.boxcox, x) def test_empty(self): assert_(stats.boxcox([]).shape == (0,)) class TestBoxcoxNormmax(TestCase): def setUp(self): np.random.seed(12345) self.x = stats.loggamma.rvs(5, size=50) + 5 def test_pearsonr(self): maxlog = stats.boxcox_normmax(self.x) assert_allclose(maxlog, 1.804465, rtol=1e-6) def test_mle(self): maxlog = stats.boxcox_normmax(self.x, method='mle') assert_allclose(maxlog, 1.758101, rtol=1e-6) # Check that boxcox() uses 'mle' _, maxlog_boxcox = stats.boxcox(self.x) assert_allclose(maxlog_boxcox, maxlog) def test_all(self): maxlog_all = stats.boxcox_normmax(self.x, method='all') assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6) class TestBoxcoxNormplot(TestCase): def setUp(self): np.random.seed(7654321) self.x = stats.loggamma.rvs(5, size=500) + 5 def test_basic(self): N = 5 lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N) ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057, 0.95843297] assert_allclose(lmbdas, np.linspace(-10, 10, num=N)) assert_allclose(ppcc, ppcc_expected) @dec.skipif(not have_matplotlib) def test_plot_kwarg(self): # Check with the matplotlib.pyplot module fig = plt.figure() fig.add_subplot(111) stats.boxcox_normplot(self.x, -20, 20, plot=plt) plt.close() # Check that a Matplotlib Axes object is accepted fig.add_subplot(111) ax = fig.add_subplot(111) stats.boxcox_normplot(self.x, -20, 20, plot=ax) plt.close() def test_invalid_inputs(self): # `lb` has to be larger than `la` assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0) # `x` can not contain negative values assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1) def test_empty(self): assert_(stats.boxcox_normplot([], 0, 1).size == 0) class TestCircFuncs(TestCase): def test_circfuncs(self): x = np.array([355, 5, 2, 359, 10, 350]) M = stats.circmean(x, high=360) Mval = 0.167690146 assert_allclose(M, Mval, rtol=1e-7) V = stats.circvar(x, high=360) Vval = 42.51955609 assert_allclose(V, Vval, rtol=1e-7) S = stats.circstd(x, high=360) Sval = 6.520702116 assert_allclose(S, Sval, rtol=1e-7) def test_circfuncs_small(self): x = np.array([20, 21, 22, 18, 19, 20.5, 19.2]) M1 = x.mean() M2 = stats.circmean(x, high=360) assert_allclose(M2, M1, rtol=1e-5) V1 = x.var() V2 = stats.circvar(x, high=360) assert_allclose(V2, V1, rtol=1e-4) S1 = x.std() S2 = stats.circstd(x, high=360) assert_allclose(S2, S1, rtol=1e-4) def test_circmean_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) M1 = stats.circmean(x, high=360) M2 = stats.circmean(x.ravel(), high=360) assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=1) M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])] assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=0) M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(M1, M2, rtol=1e-14) def test_circvar_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) V1 = stats.circvar(x, high=360) V2 = stats.circvar(x.ravel(), high=360) assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=1) V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])] assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=0) V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(V1, V2, rtol=1e-11) def test_circstd_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) S1 = stats.circstd(x, high=360) S2 = stats.circstd(x.ravel(), high=360) assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=1) S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])] assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=0) S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(S1, S2, rtol=1e-11) def test_circfuncs_array_like(self): x = [355, 5, 2, 359, 10, 350] assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7) assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7) assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7) def test_empty(self): assert_(np.isnan(stats.circmean([]))) assert_(np.isnan(stats.circstd([]))) assert_(np.isnan(stats.circvar([]))) def test_accuracy_wilcoxon(): freq = [1, 4, 16, 15, 8, 4, 5, 1, 2] nums = range(-4, 5) x = np.concatenate([[u] * v for u, v in zip(nums, freq)]) y = np.zeros(x.size) T, p = stats.wilcoxon(x, y, "pratt") assert_allclose(T, 423) assert_allclose(p, 0.00197547303533107) T, p = stats.wilcoxon(x, y, "zsplit") assert_allclose(T, 441) assert_allclose(p, 0.0032145343172473055) T, p = stats.wilcoxon(x, y, "wilcox") assert_allclose(T, 327) assert_allclose(p, 0.00641346115861) # Test the 'correction' option, using values computed in R with: # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE}) x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) T, p = stats.wilcoxon(x, y, correction=False) assert_equal(T, 34) assert_allclose(p, 0.6948866, rtol=1e-6) T, p = stats.wilcoxon(x, y, correction=True) assert_equal(T, 34) assert_allclose(p, 0.7240817, rtol=1e-6) def test_wilcoxon_result_attributes(): x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) res = stats.wilcoxon(x, y, correction=False) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_wilcoxon_tie(): # Regression test for gh-2391. # Corresponding R code is: # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE) # > result$p.value # [1] 0.001565402 # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE) # > result$p.value # [1] 0.001904195 stat, p = stats.wilcoxon([0.1] * 10) expected_p = 0.001565402 assert_equal(stat, 0) assert_allclose(p, expected_p, rtol=1e-6) stat, p = stats.wilcoxon([0.1] * 10, correction=True) expected_p = 0.001904195 assert_equal(stat, 0) assert_allclose(p, expected_p, rtol=1e-6) class TestMedianTest(TestCase): def test_bad_n_samples(self): # median_test requires at least two samples. assert_raises(ValueError, stats.median_test, [1, 2, 3]) def test_empty_sample(self): # Each sample must contain at least one value. assert_raises(ValueError, stats.median_test, [], [1, 2, 3]) def test_empty_when_ties_ignored(self): # The grand median is 1, and all values in the first argument are # equal to the grand median. With ties="ignore", those values are # ignored, which results in the first sample being (in effect) empty. # This should raise a ValueError. assert_raises(ValueError, stats.median_test, [1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore") def test_empty_contingency_row(self): # The grand median is 1, and with the default ties="below", all the # values in the samples are counted as being below the grand median. # This would result a row of zeros in the contingency table, which is # an error. assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1]) # With ties="above", all the values are counted as above the # grand median. assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1], ties="above") def test_bad_ties(self): assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo") def test_bad_nan_policy(self): assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar') def test_bad_keyword(self): assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo") def test_simple(self): x = [1, 2, 3] y = [1, 2, 3] stat, p, med, tbl = stats.median_test(x, y) # The median is floating point, but this equality test should be safe. assert_equal(med, 2.0) assert_array_equal(tbl, [[1, 1], [2, 2]]) # The expected values of the contingency table equal the contingency # table, so the statistic should be 0 and the p-value should be 1. assert_equal(stat, 0) assert_equal(p, 1) def test_ties_options(self): # Test the contingency table calculation. x = [1, 2, 3, 4] y = [5, 6] z = [7, 8, 9] # grand median is 5. # Default 'ties' option is "below". stat, p, m, tbl = stats.median_test(x, y, z) assert_equal(m, 5) assert_equal(tbl, [[0, 1, 3], [4, 1, 0]]) stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore") assert_equal(m, 5) assert_equal(tbl, [[0, 1, 3], [4, 0, 0]]) stat, p, m, tbl = stats.median_test(x, y, z, ties="above") assert_equal(m, 5) assert_equal(tbl, [[0, 2, 3], [4, 0, 0]]) def test_nan_policy_options(self): x = [1, 2, np.nan] y = [4, 5, 6] mt1 = stats.median_test(x, y, nan_policy='propagate') s, p, m, t = stats.median_test(x, y, nan_policy='omit') assert_equal(mt1, (np.nan, np.nan, np.nan, None)) assert_allclose(s, 0.31250000000000006) assert_allclose(p, 0.57615012203057869) assert_equal(m, 4.0) assert_equal(t, np.array([[0, 2],[2, 1]])) assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise') def test_basic(self): # median_test calls chi2_contingency to compute the test statistic # and p-value. Make sure it hasn't screwed up the call... x = [1, 2, 3, 4, 5] y = [2, 4, 6, 8] stat, p, m, tbl = stats.median_test(x, y) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p) stat, p, m, tbl = stats.median_test(x, y, lambda_=0) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p) stat, p, m, tbl = stats.median_test(x, y, correction=False) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p) if __name__ == "__main__": run_module_suite()
bsd-3-clause
youprofit/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/base_unittest.py
114
24658
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import optparse import sys import tempfile import unittest2 as unittest from webkitpy.common.system.executive import Executive, ScriptError from webkitpy.common.system import executive_mock from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.path import abspath_to_uri from webkitpy.thirdparty.mock import Mock from webkitpy.tool.mocktool import MockOptions from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2 from webkitpy.common.system.systemhost_mock import MockSystemHost from webkitpy.port import Port, Driver, DriverOutput from webkitpy.port.test import add_unit_tests_to_mock_filesystem, TestPort class PortTest(unittest.TestCase): def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs): host = MockSystemHost() if executive: host.executive = executive if with_tests: add_unit_tests_to_mock_filesystem(host.filesystem) return TestPort(host, **kwargs) return Port(host, port_name or 'baseport', **kwargs) def test_default_child_processes(self): port = self.make_port() self.assertIsNotNone(port.default_child_processes()) def test_format_wdiff_output_as_html(self): output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END) html = self.make_port()._format_wdiff_output_as_html(output) expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>" self.assertEqual(html, expected_html) def test_wdiff_command(self): port = self.make_port() port._path_to_wdiff = lambda: "/path/to/wdiff" command = port._wdiff_command("/actual/path", "/expected/path") expected_command = [ "/path/to/wdiff", "--start-delete=##WDIFF_DEL##", "--end-delete=##WDIFF_END##", "--start-insert=##WDIFF_ADD##", "--end-insert=##WDIFF_END##", "/actual/path", "/expected/path", ] self.assertEqual(command, expected_command) def _file_with_contents(self, contents, encoding="utf-8"): new_file = tempfile.NamedTemporaryFile() new_file.write(contents.encode(encoding)) new_file.flush() return new_file def test_pretty_patch_os_error(self): port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError)) oc = OutputCapture() oc.capture_output() self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) # This tests repeated calls to make sure we cache the result. self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) oc.restore_output() def test_pretty_patch_script_error(self): # FIXME: This is some ugly white-box test hacking ... port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError)) port._pretty_patch_available = True self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) # This tests repeated calls to make sure we cache the result. self.assertEqual(port.pretty_patch_text("patch.txt"), port._pretty_patch_error_html) def integration_test_run_wdiff(self): executive = Executive() # This may fail on some systems. We could ask the port # object for the wdiff path, but since we don't know what # port object to use, this is sufficient for now. try: wdiff_path = executive.run_command(["which", "wdiff"]).rstrip() except Exception, e: wdiff_path = None port = self.make_port(executive=executive) port._path_to_wdiff = lambda: wdiff_path if wdiff_path: # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5 actual = self._file_with_contents(u"foo") expected = self._file_with_contents(u"bar") wdiff = port._run_wdiff(actual.name, expected.name) expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>" self.assertEqual(wdiff, expected_wdiff) # Running the full wdiff_text method should give the same result. port._wdiff_available = True # In case it's somehow already disabled. wdiff = port.wdiff_text(actual.name, expected.name) self.assertEqual(wdiff, expected_wdiff) # wdiff should still be available after running wdiff_text with a valid diff. self.assertTrue(port._wdiff_available) actual.close() expected.close() # Bogus paths should raise a script error. self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2") self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2") # wdiff will still be available after running wdiff_text with invalid paths. self.assertTrue(port._wdiff_available) # If wdiff does not exist _run_wdiff should throw an OSError. port._path_to_wdiff = lambda: "/invalid/path/to/wdiff" self.assertRaises(OSError, port._run_wdiff, "foo", "bar") # wdiff_text should not throw an error if wdiff does not exist. self.assertEqual(port.wdiff_text("foo", "bar"), "") # However wdiff should not be available after running wdiff_text if wdiff is missing. self.assertFalse(port._wdiff_available) def test_wdiff_text(self): port = self.make_port() port.wdiff_available = lambda: True port._run_wdiff = lambda a, b: 'PASS' self.assertEqual('PASS', port.wdiff_text(None, None)) def test_diff_text(self): port = self.make_port() # Make sure that we don't run into decoding exceptions when the # filenames are unicode, with regular or malformed input (expected or # actual input is always raw bytes, not unicode). port.diff_text('exp', 'act', 'exp.txt', 'act.txt') port.diff_text('exp', 'act', u'exp.txt', 'act.txt') port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt') port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt') port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt') # Though expected and actual files should always be read in with no # encoding (and be stored as str objects), test unicode inputs just to # be safe. port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt') port.diff_text( u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt') # And make sure we actually get diff output. diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt') self.assertIn('foo', diff) self.assertIn('bar', diff) self.assertIn('exp.txt', diff) self.assertIn('act.txt', diff) self.assertNotIn('nosuchthing', diff) def test_setup_test_run(self): port = self.make_port() # This routine is a no-op. We just test it for coverage. port.setup_test_run() def test_test_dirs(self): port = self.make_port() port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '') port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '') dirs = port.test_dirs() self.assertIn('canvas', dirs) self.assertIn('css2.1', dirs) def test_skipped_perf_tests(self): port = self.make_port() def add_text_file(dirname, filename, content='some content'): dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname) port.host.filesystem.maybe_make_directory(dirname) port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content) add_text_file('inspector', 'test1.html') add_text_file('inspector', 'unsupported_test1.html') add_text_file('inspector', 'test2.html') add_text_file('inspector/resources', 'resource_file.html') add_text_file('unsupported', 'unsupported_test2.html') add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html'])) self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html']) def test_get_option__set(self): options, args = optparse.OptionParser().parse_args([]) options.foo = 'bar' port = self.make_port(options=options) self.assertEqual(port.get_option('foo'), 'bar') def test_get_option__unset(self): port = self.make_port() self.assertIsNone(port.get_option('foo')) def test_get_option__default(self): port = self.make_port() self.assertEqual(port.get_option('foo', 'bar'), 'bar') def test_additional_platform_directory(self): port = self.make_port(port_name='foo') port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo'] layout_test_dir = port.layout_tests_dir() test_file = 'fast/test.html' # No additional platform directory self.assertEqual( port.expected_baselines(test_file, '.txt'), [(None, 'fast/test-expected.txt')]) self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo') # Simple additional platform directory port._options.additional_platform_directory = ['/tmp/local-baselines'] port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo') self.assertEqual( port.expected_baselines(test_file, '.txt'), [('/tmp/local-baselines', 'fast/test-expected.txt')]) self.assertEqual(port.baseline_path(), '/tmp/local-baselines') # Multiple additional platform directories port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines'] self.assertEqual( port.expected_baselines(test_file, '.txt'), [('/tmp/local-baselines', 'fast/test-expected.txt')]) self.assertEqual(port.baseline_path(), '/foo') def test_nonexistant_expectations(self): port = self.make_port(port_name='foo') port.expectations_files = lambda: ['/mock-checkout/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/LayoutTests/platform/nonexistant/TestExpectations'] port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/exists/TestExpectations', '') self.assertEqual('\n'.join(port.expectations_dict().keys()), '/mock-checkout/LayoutTests/platform/exists/TestExpectations') def test_additional_expectations(self): port = self.make_port(port_name='foo') port.port_name = 'foo' port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/foo/TestExpectations', '') port._filesystem.write_text_file( '/tmp/additional-expectations-1.txt', 'content1\n') port._filesystem.write_text_file( '/tmp/additional-expectations-2.txt', 'content2\n') self.assertEqual('\n'.join(port.expectations_dict().values()), '') port._options.additional_expectations = [ '/tmp/additional-expectations-1.txt'] self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n') port._options.additional_expectations = [ '/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt'] self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n') port._options.additional_expectations = [ '/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt'] self.assertEqual('\n'.join(port.expectations_dict().values()), '\ncontent1\n\ncontent2\n') def test_additional_env_var(self): port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']})) self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO']) environment = port.setup_environ_for_server() self.assertTrue(('FOO' in environment) & ('BAR' in environment)) self.assertEqual(environment['FOO'], 'BAR') self.assertEqual(environment['BAR'], 'FOO') def test_uses_test_expectations_file(self): port = self.make_port(port_name='foo') port.port_name = 'foo' port.path_to_test_expectations_file = lambda: '/mock-results/TestExpectations' self.assertFalse(port.uses_test_expectations_file()) port._filesystem = MockFileSystem({'/mock-results/TestExpectations': ''}) self.assertTrue(port.uses_test_expectations_file()) def test_find_no_paths_specified(self): port = self.make_port(with_tests=True) layout_tests_dir = port.layout_tests_dir() tests = port.tests([]) self.assertNotEqual(len(tests), 0) def test_find_one_test(self): port = self.make_port(with_tests=True) tests = port.tests(['failures/expected/image.html']) self.assertEqual(len(tests), 1) def test_find_glob(self): port = self.make_port(with_tests=True) tests = port.tests(['failures/expected/im*']) self.assertEqual(len(tests), 2) def test_find_with_skipped_directories(self): port = self.make_port(with_tests=True) tests = port.tests(['userscripts']) self.assertNotIn('userscripts/resources/iframe.html', tests) def test_find_with_skipped_directories_2(self): port = self.make_port(with_tests=True) tests = port.tests(['userscripts/resources']) self.assertEqual(tests, []) def test_is_test_file(self): filesystem = MockFileSystem() self.assertTrue(Port._is_test_file(filesystem, '', 'foo.html')) self.assertTrue(Port._is_test_file(filesystem, '', 'foo.shtml')) self.assertTrue(Port._is_test_file(filesystem, '', 'foo.svg')) self.assertTrue(Port._is_test_file(filesystem, '', 'test-ref-test.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo.png')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.svg')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected.xht')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.svg')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-notref.xht')) self.assertFalse(Port._is_test_file(filesystem, '', 'foo-ref.xhtml')) self.assertFalse(Port._is_test_file(filesystem, '', 'ref-foo.html')) self.assertFalse(Port._is_test_file(filesystem, '', 'notref-foo.xhr')) def test_is_reference_html_file(self): filesystem = MockFileSystem() self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected.html')) self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected-mismatch.xml')) self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-ref.xhtml')) self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-notref.svg')) self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo.html')) self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.txt')) self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.shtml')) self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.php')) self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.mht')) def test_parse_reftest_list(self): port = self.make_port(with_tests=True) port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html", "", "# some comment", "!= test-2.html test-notref.html # more comments", "== test-3.html test-ref.html", "== test-3.html test-ref2.html", "!= test-3.html test-notref.html"]) reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar') self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')], 'bar/test-2.html': [('!=', 'bar/test-notref.html')], 'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]}) def test_reference_files(self): port = self.make_port(with_tests=True) self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')]) self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')]) self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')]) def test_operating_system(self): self.assertEqual('mac', self.make_port().operating_system()) def test_http_server_supports_ipv6(self): port = self.make_port() self.assertTrue(port.http_server_supports_ipv6()) port.host.platform.os_name = 'cygwin' self.assertFalse(port.http_server_supports_ipv6()) port.host.platform.os_name = 'win' self.assertTrue(port.http_server_supports_ipv6()) def test_check_httpd_success(self): port = self.make_port(executive=MockExecutive2()) port._path_to_apache = lambda: '/usr/sbin/httpd' capture = OutputCapture() capture.capture_output() self.assertTrue(port.check_httpd()) _, _, logs = capture.restore_output() self.assertEqual('', logs) def test_httpd_returns_error_code(self): port = self.make_port(executive=MockExecutive2(exit_code=1)) port._path_to_apache = lambda: '/usr/sbin/httpd' capture = OutputCapture() capture.capture_output() self.assertFalse(port.check_httpd()) _, _, logs = capture.restore_output() self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs) def test_test_exists(self): port = self.make_port(with_tests=True) self.assertTrue(port.test_exists('passes')) self.assertTrue(port.test_exists('passes/text.html')) self.assertFalse(port.test_exists('passes/does_not_exist.html')) self.assertTrue(port.test_exists('virtual')) self.assertFalse(port.test_exists('virtual/does_not_exist.html')) self.assertTrue(port.test_exists('virtual/passes/text.html')) def test_test_isfile(self): port = self.make_port(with_tests=True) self.assertFalse(port.test_isfile('passes')) self.assertTrue(port.test_isfile('passes/text.html')) self.assertFalse(port.test_isfile('passes/does_not_exist.html')) self.assertFalse(port.test_isfile('virtual')) self.assertTrue(port.test_isfile('virtual/passes/text.html')) self.assertFalse(port.test_isfile('virtual/does_not_exist.html')) def test_test_isdir(self): port = self.make_port(with_tests=True) self.assertTrue(port.test_isdir('passes')) self.assertFalse(port.test_isdir('passes/text.html')) self.assertFalse(port.test_isdir('passes/does_not_exist.html')) self.assertFalse(port.test_isdir('passes/does_not_exist/')) self.assertTrue(port.test_isdir('virtual')) self.assertFalse(port.test_isdir('virtual/does_not_exist.html')) self.assertFalse(port.test_isdir('virtual/does_not_exist/')) self.assertFalse(port.test_isdir('virtual/passes/text.html')) def test_tests(self): port = self.make_port(with_tests=True) tests = port.tests([]) self.assertIn('passes/text.html', tests) self.assertIn('virtual/passes/text.html', tests) tests = port.tests(['passes']) self.assertIn('passes/text.html', tests) self.assertIn('passes/passes/test-virtual-passes.html', tests) self.assertNotIn('virtual/passes/text.html', tests) tests = port.tests(['virtual/passes']) self.assertNotIn('passes/text.html', tests) self.assertIn('virtual/passes/test-virtual-passes.html', tests) self.assertIn('virtual/passes/passes/test-virtual-passes.html', tests) self.assertNotIn('virtual/passes/test-virtual-virtual/passes.html', tests) self.assertNotIn('virtual/passes/virtual/passes/test-virtual-passes.html', tests) def test_build_path(self): port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'})) self.assertEqual(port._build_path(), '/my-build-directory/Release') class NaturalCompareTest(unittest.TestCase): def setUp(self): self._port = TestPort(MockSystemHost()) def assert_cmp(self, x, y, result): self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result) def test_natural_compare(self): self.assert_cmp('a', 'a', 0) self.assert_cmp('ab', 'a', 1) self.assert_cmp('a', 'ab', -1) self.assert_cmp('', '', 0) self.assert_cmp('', 'ab', -1) self.assert_cmp('1', '2', -1) self.assert_cmp('2', '1', 1) self.assert_cmp('1', '10', -1) self.assert_cmp('2', '10', -1) self.assert_cmp('foo_1.html', 'foo_2.html', -1) self.assert_cmp('foo_1.1.html', 'foo_2.html', -1) self.assert_cmp('foo_1.html', 'foo_10.html', -1) self.assert_cmp('foo_2.html', 'foo_10.html', -1) self.assert_cmp('foo_23.html', 'foo_10.html', 1) self.assert_cmp('foo_23.html', 'foo_100.html', -1) class KeyCompareTest(unittest.TestCase): def setUp(self): self._port = TestPort(MockSystemHost()) def assert_cmp(self, x, y, result): self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result) def test_test_key(self): self.assert_cmp('/a', '/a', 0) self.assert_cmp('/a', '/b', -1) self.assert_cmp('/a2', '/a10', -1) self.assert_cmp('/a2/foo', '/a10/foo', -1) self.assert_cmp('/a/foo11', '/a/foo2', 1) self.assert_cmp('/ab', '/a/a/b', -1) self.assert_cmp('/a/a/b', '/ab', 1) self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
bsd-3-clause
msdx321/android_kernel_samsung_heroXqltechn
lazy-prebuilt/aarch64-linux-android-4.9/share/gdb/python/gdb/types.py
126
5421
# Type utilities. # Copyright (C) 2010-2014 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Utilities for working with gdb.Types.""" import gdb def get_basic_type(type_): """Return the "basic" type of a type. Arguments: type_: The type to reduce to its basic type. Returns: type_ with const/volatile is stripped away, and typedefs/references converted to the underlying type. """ while (type_.code == gdb.TYPE_CODE_REF or type_.code == gdb.TYPE_CODE_TYPEDEF): if type_.code == gdb.TYPE_CODE_REF: type_ = type_.target() else: type_ = type_.strip_typedefs() return type_.unqualified() def has_field(type_, field): """Return True if a type has the specified field. Arguments: type_: The type to examine. It must be one of gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION. field: The name of the field to look up. Returns: True if the field is present either in type_ or any baseclass. Raises: TypeError: The type is not a struct or union. """ type_ = get_basic_type(type_) if (type_.code != gdb.TYPE_CODE_STRUCT and type_.code != gdb.TYPE_CODE_UNION): raise TypeError("not a struct or union") for f in type_.fields(): if f.is_base_class: if has_field(f.type, field): return True else: # NOTE: f.name could be None if f.name == field: return True return False def make_enum_dict(enum_type): """Return a dictionary from a program's enum type. Arguments: enum_type: The enum to compute the dictionary for. Returns: The dictionary of the enum. Raises: TypeError: The type is not an enum. """ if enum_type.code != gdb.TYPE_CODE_ENUM: raise TypeError("not an enum type") enum_dict = {} for field in enum_type.fields(): # The enum's value is stored in "enumval". enum_dict[field.name] = field.enumval return enum_dict def deep_items (type_): """Return an iterator that recursively traverses anonymous fields. Arguments: type_: The type to traverse. It should be one of gdb.TYPE_CODE_STRUCT or gdb.TYPE_CODE_UNION. Returns: an iterator similar to gdb.Type.iteritems(), i.e., it returns pairs of key, value, but for any anonymous struct or union field that field is traversed recursively, depth-first. """ for k, v in type_.iteritems (): if k: yield k, v else: for i in deep_items (v.type): yield i class TypePrinter(object): """The base class for type printers. Instances of this type can be used to substitute type names during 'ptype'. A type printer must have at least 'name' and 'enabled' attributes, and supply an 'instantiate' method. The 'instantiate' method must either return None, or return an object which has a 'recognize' method. This method must accept a gdb.Type argument and either return None, meaning that the type was not recognized, or a string naming the type. """ def __init__(self, name): self.name = name self.enabled = True def instantiate(self): return None # Helper function for computing the list of type recognizers. def _get_some_type_recognizers(result, plist): for printer in plist: if printer.enabled: inst = printer.instantiate() if inst is not None: result.append(inst) return None def get_type_recognizers(): "Return a list of the enabled type recognizers for the current context." result = [] # First try the objfiles. for objfile in gdb.objfiles(): _get_some_type_recognizers(result, objfile.type_printers) # Now try the program space. _get_some_type_recognizers(result, gdb.current_progspace().type_printers) # Finally, globals. _get_some_type_recognizers(result, gdb.type_printers) return result def apply_type_recognizers(recognizers, type_obj): """Apply the given list of type recognizers to the type TYPE_OBJ. If any recognizer in the list recognizes TYPE_OBJ, returns the name given by the recognizer. Otherwise, this returns None.""" for r in recognizers: result = r.recognize(type_obj) if result is not None: return result return None def register_type_printer(locus, printer): """Register a type printer. PRINTER is the type printer instance. LOCUS is either an objfile, a program space, or None, indicating global registration.""" if locus is None: locus = gdb locus.type_printers.insert(0, printer)
gpl-2.0
lorensen/VTKExamples
src/Python/Rendering/WalkCow.py
1
12890
#!/usr/local/bin/python # Translated from walkCow.tcl import vtk def walk_cow(file_name, figure): figure = abs(figure) if figure > 2: figure = 0 colors = vtk.vtkNamedColors() # Set the background color. colors.SetColor("BkgColor1", [60, 93, 144, 255]) colors.SetColor("BkgColor2", [26, 51, 102, 255]) ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # The cow pipeline. cow = vtk.vtkBYUReader() cow.SetGeometryFileName(file_name) cow.Update() cowMapper = vtk.vtkPolyDataMapper() cowMapper.SetInputConnection(cow.GetOutputPort()) cowMapper.ScalarVisibilityOff() cowActor = vtk.vtkActor() cowActor.SetMapper(cowMapper) cowActor.GetProperty().SetColor(colors.GetColor3d("Wheat")) ren.AddActor(cowActor) # Axes pipeline. cowAxesSource = vtk.vtkAxes() cowAxesSource.SetScaleFactor(10.0) cowAxesSource.SetOrigin(0, 0, 0) cowAxesMapper = vtk.vtkPolyDataMapper() cowAxesMapper.SetInputConnection(cowAxesSource.GetOutputPort()) cowAxes = vtk.vtkActor() cowAxes.SetMapper(cowAxesMapper) cowAxes.VisibilityOff() ren.AddActor(cowAxes) ren.SetBackground(colors.GetColor3d("BkgColor1")) renWin.SetSize(600, 480) iren.Initialize() cowAxes.VisibilityOn() renWin.Render() # Activate this if you want to see the Position and Focal point. # ren.GetActiveCamera().AddObserver('ModifiedEvent', CameraModifiedCallback) # These four walks use the same camera position. Rotate_X(cowActor, ren, renWin) Rotate_Y(cowActor, ren, renWin) Rotate_Z(cowActor, ren, renWin) Rotate_XY(cowActor, ren, renWin) ren.SetBackground(colors.GetColor3d("BkgColor2")) if figure == 1: Rotate_V_0(cowActor, ren, renWin) elif figure == 2: Rotate_V_V(cowActor, ren, renWin) else: Rotate_V_0(cowActor, ren, renWin) Rotate_V_V(cowActor, ren, renWin) # Walk() needs to go after Rotate_V_0() or Rotate_V_V(). Walk(cowActor, ren, renWin) # Interact with data. renWin.EraseOff() iren.Start() def main(): file_name, figure = get_program_parameters() walk_cow(file_name, figure) def get_program_parameters(): import argparse description = 'Produce figures: 3-32, 3-33a, 3-33b from the VTK Textbook.' epilogue = ''' It is a translation of the original WalkCow.tcl with a few additional enhancements. If the parameter figure is 0, 1 or 2 then these correspond to the VTK Textbook figures 3-32, 3-33a, 3-33b in that order. ''' parser = argparse.ArgumentParser(description=description, epilog=epilogue, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('filename', help='The file cow.g.') parser.add_argument('figure', default=0, type=int, nargs='?', help='The particular rotation that you want to view.') args = parser.parse_args() return args.filename, args.figure """ These Rotate* and Walk functions create a scene where multiple views of the object exist. They all operate in a similar manner, namely: 1) Accept vtkActor, vtkRenderer, vtkRenderWindow as parameters. 2) Position the object. 3) Position the observer with the focal point sent to the centre of the object. 4) Render and set EraseOff() in the render window. Note that: EraseOff() has to be called after a Render() call to work in the desired way. 5) Then rotate or Walk the object around the scene. 6) Optionally write out the scene using Screenshot(). 6) Set EraseOff() in the render window. 7) Reset the object position. """ def Rotate_X(cowActor, ren, renWin): # Six rotations about the x axis. ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 # This closely matches the original illustration. ren.GetActiveCamera().SetPosition(2, 25, 0) ren.GetActiveCamera().SetFocalPoint(fp) ren.GetActiveCamera().SetViewUp(0, 0, -1) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(0, 6): cowActor.RotateX(60) renWin.Render() renWin.Render() # Screenshot("Fig3-31a.png", renWin) renWin.EraseOn() def Rotate_Y(cowActor, ren, renWin): # Six rotations about the y axis. ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 # This closely matches the original illustration. ren.GetActiveCamera().SetPosition(2, 0, 25) ren.GetActiveCamera().SetFocalPoint(fp) ren.GetActiveCamera().SetViewUp(0, 1, 0) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(0, 6): cowActor.RotateY(60) renWin.Render() renWin.Render() # Screenshot("Fig3-31b.png", renWin) renWin.EraseOn() def Rotate_Z(cowActor, ren, renWin): # Six rotations about the z axis. ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 # This closely matches the original illustration. # ren.GetActiveCamera().SetPosition(2, 0, 25) ren.GetActiveCamera().SetPosition(2, 0, 25) ren.GetActiveCamera().SetFocalPoint(fp) ren.GetActiveCamera().SetViewUp(0, 1, 0) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(0, 6): cowActor.RotateZ(60) renWin.Render() renWin.Render() # Screenshot("Fig3-31c.png", renWin) renWin.EraseOn() def Rotate_XY(cowActor, ren, renWin): # First a rotation about the x axis, then six rotations about the y axis. ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 # This closely matches the original illustration. # ren.GetActiveCamera().SetPosition(2, 0, 24) ren.GetActiveCamera().SetPosition(2, 0, 25) ren.GetActiveCamera().SetFocalPoint(fp) ren.GetActiveCamera().SetViewUp(0, 1, 0) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() cowActor.RotateX(60) for idx in range(0, 6): cowActor.RotateY(60) renWin.Render() renWin.Render() cowActor.RotateX(-60) # Screenshot("Fig3-31d.png", renWin) renWin.EraseOn() def Rotate_V_0(cowActor, ren, renWin): # The cow rotating about a vector passing through her nose. # With the origin at (0, 0, 0). ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 cowPos = vtk.vtkTransform() cowPos.Identity() cowPos.SetMatrix(cowActor.GetMatrix()) cowTransform = vtk.vtkTransform() cowTransform.Identity() cowActor.SetUserMatrix(cowTransform.GetMatrix()) # This closely matches the original illustration. ren.GetActiveCamera().SetPosition(16, 9, -12) ren.GetActiveCamera().SetFocalPoint(fp) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(0, 6): cowActor.RotateWXYZ(60, 2.19574, -1.42455, -0.0331036) renWin.Render() renWin.Render() # Screenshot("Fig3-33a.png", renWin) renWin.EraseOn() # Put the cow back on the origin. # for idx in range(0, 6): # cowActor.RotateWXYZ(-60, 2.19574, -1.42455, -0.0331036) # cowActor.SetUserMatrix(cowPos.GetMatrix()) # ren.GetActiveCamera().SetPosition(0, 0, 1) # ren.GetActiveCamera().SetViewUp(0, 1, 0) # ren.ResetCamera() def Rotate_V_V(cowActor, ren, renWin): # The cow rotating about a vector passing through her nose. # With the origin at (6.11414, 1.27386, 0.015175). ren.ResetCamera() ren.ResetCameraClippingRange() cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0) cowActor.SetPosition(0, 0, 0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 cowPos = vtk.vtkTransform() cowPos.Identity() cowPos.SetMatrix(cowActor.GetMatrix()) cowActor.SetOrigin(6.11414, 1.27386, 0.015175) # The cow's nose cowTransform = vtk.vtkTransform() cowTransform.Identity() cowActor.SetUserMatrix(cowTransform.GetMatrix()) # This closely matches the original illustration. ren.GetActiveCamera().SetPosition(31, 23, -21) ren.GetActiveCamera().SetFocalPoint(fp) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(0, 6): cowActor.RotateWXYZ(60, 2.19574, -1.42455, -0.0331036) renWin.Render() renWin.Render() # Screenshot("Fig3-33b.png", renWin) renWin.EraseOn() # Put the cow back on the origin. # for idx in range(0, 6): # cowActor.RotateWXYZ(-60, 2.19574, -1.42455, -0.0331036) # cowActor.SetUserMatrix(cowPos.GetMatrix()) def Walk(cowActor, ren, renWin): # The cow "walking" around the global origin cowPos = vtk.vtkTransform() cowPos.Identity() cowPos.SetMatrix(cowActor.GetMatrix()) cowActor.SetOrientation(0.0, 0.0, 0.0) cowActor.SetOrigin(0.0, 0.0, 0.0) # Get the focal point. bounds = cowActor.GetBounds() fp = [0.0] * 3 for i in range(0, 3): fp[i] = (bounds[i * 2 + 1] + bounds[i * 2]) / 2.0 cowTransform = vtk.vtkTransform() cowTransform.Identity() cowTransform.Translate(0, 0, 5) cowActor.SetUserMatrix(cowTransform.GetMatrix()) # This closely matches the original illustration. ren.GetActiveCamera().SetPosition(1, 24, 16) ren.GetActiveCamera().SetFocalPoint(fp) ren.GetActiveCamera().SetViewUp(0, 0, -1) ren.ResetCameraClippingRange() renWin.Render() renWin.Render() renWin.EraseOff() for idx in range(1, 7): cowTransform.Identity() cowTransform.RotateY(idx * 60) cowTransform.Translate(0, 0, 5) cowActor.SetUserMatrix(cowTransform.GetMatrix()) renWin.Render() renWin.Render() # Screenshot("Fig3-32.png", renWin) renWin.EraseOn() # Walkies are over, put the cow back on the origin. # cowActor.SetUserMatrix(cowPos.GetMatrix()) def CameraModifiedCallback(caller, ev): """ Used to estimate positions similar to the book illustrations. :param caller: :return: """ print(caller.GetClassName(), "modified") # Print the interesting stuff. print("\tPosition: ", caller.GetPosition()[0], ", ", caller.GetPosition()[1], ", ", caller.GetPosition()[2]) print("\tFocal point: ", caller.GetFocalPoint()[0], ", ", caller.GetFocalPoint()[1], ", ", caller.GetFocalPoint()[2]) def Screenshot(fileName, renWin): """ Save a screenshot. :param fileName: :param renWin: :return: """ windowToImageFilter = vtk.vtkWindowToImageFilter() windowToImageFilter.SetInput(renWin) windowToImageFilter.SetScale(1) # image quality # We are not recording the alpha (transparency) channel. # windowToImageFilter.SetInputBufferTypeToRGBA() windowToImageFilter.SetInputBufferTypeToRGB() # Read from the front buffer. windowToImageFilter.ReadFrontBufferOff() windowToImageFilter.Update() writer = vtk.vtkPNGWriter() writer.SetFileName(fileName) writer.SetInputConnection(windowToImageFilter.GetOutputPort()) writer.Write() if __name__ == '__main__': main()
apache-2.0
juliatem/aiohttp
demos/polls/aiohttpdemo_polls/main.py
5
1553
import argparse import asyncio import logging import sys import jinja2 from trafaret_config import commandline import aiohttp_jinja2 from aiohttp import web from aiohttpdemo_polls.db import close_pg, init_pg from aiohttpdemo_polls.middlewares import setup_middlewares from aiohttpdemo_polls.routes import setup_routes from aiohttpdemo_polls.utils import TRAFARET def init(loop, argv): ap = argparse.ArgumentParser() commandline.standard_argparse_options(ap, default_config='./config/polls.yaml') # # define your command-line arguments here # options = ap.parse_args(argv) config = commandline.config_from_options(options, TRAFARET) # setup application and extensions app = web.Application(loop=loop) # load config from yaml file in current dir app['config'] = config # setup Jinja2 template renderer aiohttp_jinja2.setup( app, loader=jinja2.PackageLoader('aiohttpdemo_polls', 'templates')) # create connection to the database app.on_startup.append(init_pg) # shutdown db connection on exit app.on_cleanup.append(close_pg) # setup views and routes setup_routes(app) setup_middlewares(app) return app def main(argv): # init logging logging.basicConfig(level=logging.DEBUG) loop = asyncio.get_event_loop() app = init(loop, argv) web.run_app(app, host=app['config']['host'], port=app['config']['port']) if __name__ == '__main__': main(sys.argv[1:])
apache-2.0
usakey/vnpy
vn.strategy/strategydemo/backtestingEngine.py
91
7735
# encoding: UTF-8 import shelve from eventEngine import * from pymongo import Connection from pymongo.errors import * from strategyEngine import * ######################################################################## class LimitOrder(object): """限价单对象""" #---------------------------------------------------------------------- def __init__(self, symbol): """Constructor""" self.symbol = symbol self.price = 0 self.volume = 0 self.direction = None self.offset = None ######################################################################## class BacktestingEngine(object): """ 回测引擎,作用: 1. 从数据库中读取数据并回放 2. 作为StrategyEngine创建时的参数传入 """ #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.eventEngine = EventEngine() # 策略引擎 self.strategyEngine = None # TICK历史数据列表,由于要使用For循环来实现仿真回放 # 使用list的速度比Numpy和Pandas都要更快 self.listDataHistory = [] # 限价单字典 self.dictOrder = {} # 最新的TICK数据 self.currentData = None # 回测的成交字典 self.listTrade = [] # 报单编号 self.orderRef = 0 # 成交编号 self.tradeID = 0 #---------------------------------------------------------------------- def setStrategyEngine(self, engine): """设置策略引擎""" self.strategyEngine = engine self.writeLog(u'策略引擎设置完成') #---------------------------------------------------------------------- def connectMongo(self): """连接MongoDB数据库""" try: self.__mongoConnection = Connection() self.__mongoConnected = True self.__mongoTickDB = self.__mongoConnection['TickDB'] self.writeLog(u'回测引擎连接MongoDB成功') except ConnectionFailure: self.writeLog(u'回测引擎连接MongoDB失败') #---------------------------------------------------------------------- def loadDataHistory(self, symbol, startDate, endDate): """载入历史TICK数据""" if self.__mongoConnected: collection = self.__mongoTickDB[symbol] # 如果输入了读取TICK的最后日期 if endDate: cx = collection.find({'date':{'$gte':startDate, '$lte':endDate}}) elif startDate: cx = collection.find({'date':{'$gte':startDate}}) else: cx = collection.find() # 将TICK数据读入内存 self.listDataHistory = [data for data in cx] self.writeLog(u'历史TICK数据载入完成') else: self.writeLog(u'MongoDB未连接,请检查') #---------------------------------------------------------------------- def processLimitOrder(self): """处理限价单""" for ref, order in self.dictOrder.items(): # 如果是买单,且限价大于等于当前TICK的卖一价,则假设成交 if order.direction == DIRECTION_BUY and \ order.price >= self.currentData['AskPrice1']: self.executeLimitOrder(ref, order, self.currentData['AskPrice1']) # 如果是卖单,且限价低于当前TICK的买一价,则假设全部成交 if order.direction == DIRECTION_SELL and \ order.price <= self.currentData['BidPrice1']: self.executeLimitOrder(ref, order, self.currentData['BidPrice1']) #---------------------------------------------------------------------- def executeLimitOrder(self, ref, order, price): """限价单成交处理""" # 成交回报 self.tradeID = self.tradeID + 1 tradeData = {} tradeData['InstrumentID'] = order.symbol tradeData['OrderRef'] = ref tradeData['TradeID'] = str(self.tradeID) tradeData['Direction'] = order.direction tradeData['OffsetFlag'] = order.offset tradeData['Price'] = price tradeData['Volume'] = order.volume tradeEvent = Event() tradeEvent.dict_['data'] = tradeData self.strategyEngine.updateTrade(tradeEvent) # 报单回报 orderData = {} orderData['InstrumentID'] = order.symbol orderData['OrderRef'] = ref orderData['Direction'] = order.direction orderData['CombOffsetFlag'] = order.offset orderData['LimitPrice'] = price orderData['VolumeTotalOriginal'] = order.volume orderData['VolumeTraded'] = order.volume orderData['InsertTime'] = '' orderData['CancelTime'] = '' orderData['FrontID'] = '' orderData['SessionID'] = '' orderData['OrderStatus'] = '' orderEvent = Event() orderEvent.dict_['data'] = orderData self.strategyEngine.updateOrder(orderEvent) # 记录该成交到列表中 self.listTrade.append(tradeData) # 删除该限价单 del self.dictOrder[ref] #---------------------------------------------------------------------- def startBacktesting(self): """开始回测""" self.writeLog(u'开始回测') for data in self.listDataHistory: # 记录最新的TICK数据 self.currentData = data # 处理限价单 self.processLimitOrder() # 推送到策略引擎中 event = Event() event.dict_['data'] = data self.strategyEngine.updateMarketData(event) self.saveTradeData() self.writeLog(u'回测结束') #---------------------------------------------------------------------- def sendOrder(self, instrumentid, exchangeid, price, pricetype, volume, direction, offset): """回测发单""" order = LimitOrder(instrumentid) order.price = price order.direction = direction order.volume = volume order.offset = offset self.orderRef = self.orderRef + 1 self.dictOrder[str(self.orderRef)] = order return str(self.orderRef) #---------------------------------------------------------------------- def cancelOrder(self, instrumentid, exchangeid, orderref, frontid, sessionid): """回测撤单""" try: del self.dictOrder[orderref] except KeyError: pass #---------------------------------------------------------------------- def writeLog(self, log): """写日志""" print log #---------------------------------------------------------------------- def selectInstrument(self, symbol): """读取合约数据""" d = {} d['ExchangeID'] = 'BackTesting' return d #---------------------------------------------------------------------- def saveTradeData(self): """保存交易记录""" f = shelve.open('result.vn') f['listTrade'] = self.listTrade f.close() #---------------------------------------------------------------------- def subscribe(self, symbol, exchange): """仿真订阅合约""" pass
mit
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/requests/packages/chardet/escsm.py
2930
7839
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart, eError, eItsMe HZ_cls = ( 1,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,0,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,4,0,5,2,0, # 78 - 7f 1,1,1,1,1,1,1,1, # 80 - 87 1,1,1,1,1,1,1,1, # 88 - 8f 1,1,1,1,1,1,1,1, # 90 - 97 1,1,1,1,1,1,1,1, # 98 - 9f 1,1,1,1,1,1,1,1, # a0 - a7 1,1,1,1,1,1,1,1, # a8 - af 1,1,1,1,1,1,1,1, # b0 - b7 1,1,1,1,1,1,1,1, # b8 - bf 1,1,1,1,1,1,1,1, # c0 - c7 1,1,1,1,1,1,1,1, # c8 - cf 1,1,1,1,1,1,1,1, # d0 - d7 1,1,1,1,1,1,1,1, # d8 - df 1,1,1,1,1,1,1,1, # e0 - e7 1,1,1,1,1,1,1,1, # e8 - ef 1,1,1,1,1,1,1,1, # f0 - f7 1,1,1,1,1,1,1,1, # f8 - ff ) HZ_st = ( eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17 5,eError, 6,eError, 5, 5, 4,eError,# 18-1f 4,eError, 4, 4, 4,eError, 4,eError,# 20-27 4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f ) HZCharLenTable = (0, 0, 0, 0, 0, 0) HZSMModel = {'classTable': HZ_cls, 'classFactor': 6, 'stateTable': HZ_st, 'charLenTable': HZCharLenTable, 'name': "HZ-GB-2312"} ISO2022CN_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,4,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022CN_st = ( eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27 5, 6,eError,eError,eError,eError,eError,eError,# 28-2f eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37 eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f ) ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022CNSMModel = {'classTable': ISO2022CN_cls, 'classFactor': 9, 'stateTable': ISO2022CN_st, 'charLenTable': ISO2022CNCharLenTable, 'name': "ISO-2022-CN"} ISO2022JP_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,2,2, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,7,0,0,0, # 20 - 27 3,0,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 6,0,4,0,8,0,0,0, # 40 - 47 0,9,5,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022JP_st = ( eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f eError, 5,eError,eError,eError, 4,eError,eError,# 20-27 eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37 eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47 ) ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022JPSMModel = {'classTable': ISO2022JP_cls, 'classFactor': 10, 'stateTable': ISO2022JP_st, 'charLenTable': ISO2022JPCharLenTable, 'name': "ISO-2022-JP"} ISO2022KR_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,3,0,0,0, # 20 - 27 0,4,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,5,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022KR_st = ( eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17 eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27 ) ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0) ISO2022KRSMModel = {'classTable': ISO2022KR_cls, 'classFactor': 6, 'stateTable': ISO2022KR_st, 'charLenTable': ISO2022KRCharLenTable, 'name': "ISO-2022-KR"} # flake8: noqa
bsd-3-clause
nippoo/phy
phy/electrode/mea.py
2
6010
# -*- coding: utf-8 -*- """Multi-electrode arrays.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os import os.path as op import itertools import numpy as np from ..utils._types import _as_array from ..utils._misc import _read_python #------------------------------------------------------------------------------ # PRB file utilities #------------------------------------------------------------------------------ def _edges_to_adjacency_list(edges): """Convert a list of edges into an adjacency list.""" adj = {} for i, j in edges: if i in adj: ni = adj[i] else: ni = adj[i] = set() if j in adj: nj = adj[j] else: nj = adj[j] = set() ni.add(j) nj.add(i) return adj def _probe_positions(probe, group): """Return the positions of a probe channel group.""" positions = probe['channel_groups'][group]['geometry'] channels = _probe_channels(probe, group) return np.array([positions[channel] for channel in channels]) def _probe_channels(probe, group): """Return the list of channels in a channel group. The order is kept. """ return probe['channel_groups'][group]['channels'] def _probe_all_channels(probe): """Return the list of channels in the probe.""" cgs = probe['channel_groups'].values() cg_channels = [cg['channels'] for cg in cgs] return sorted(set(itertools.chain(*cg_channels))) def _probe_adjacency_list(probe): """Return an adjacency list of a whole probe.""" cgs = probe['channel_groups'].values() graphs = [cg['graph'] for cg in cgs] edges = list(itertools.chain(*graphs)) adjacency_list = _edges_to_adjacency_list(edges) return adjacency_list def _channels_per_group(probe): groups = probe['channel_groups'].keys() return {group: probe['channel_groups'][group]['channels'] for group in groups} def load_probe(name): """Load one of the built-in probes.""" if op.exists(name): # The argument can be either a path to a PRB file. path = name else: # Or the name of a built-in probe. curdir = op.realpath(op.dirname(__file__)) path = op.join(curdir, 'probes/{}.prb'.format(name)) if not op.exists(path): raise IOError("The probe `{}` cannot be found.".format(name)) return _read_python(path) def list_probes(): """Return the list of built-in probes.""" curdir = op.realpath(op.dirname(__file__)) return [op.splitext(fn)[0] for fn in os.listdir(op.join(curdir, 'probes')) if fn.endswith('.prb')] #------------------------------------------------------------------------------ # MEA class #------------------------------------------------------------------------------ class MEA(object): """A Multi-Electrode Array. There are two modes: * No probe specified: one single channel group, positions and adjacency list specified directly. * Probe specified: one can change the current channel_group. """ def __init__(self, channels=None, positions=None, adjacency=None, probe=None, ): self._probe = probe self._channels = channels if positions is not None: assert self.n_channels == positions.shape[0] self._positions = positions # This is a mapping {channel: list of neighbors}. if adjacency is None and probe is not None: adjacency = _probe_adjacency_list(probe) self.channels_per_group = _channels_per_group(probe) self._adjacency = adjacency def _check_positions(self, positions): if positions is None: return positions = _as_array(positions) if self.n_channels is None: self.n_channels = positions.shape[0] if positions.shape[0] != self.n_channels: raise ValueError("'positions' " "(shape {0:s})".format(str(positions.shape)) + " and 'n_channels' " "({0:d})".format(self.n_channels) + " do not match.") @property def positions(self): """Channel positions in the current channel group.""" return self._positions @positions.setter def positions(self, value): self._check_positions(value) self._positions = value @property def channels(self): """Channel ids in the current channel group.""" return self._channels @property def n_channels(self): """Number of channels in the current channel group.""" return len(self._channels) if self._channels is not None else 0 @property def adjacency(self): """Adjacency graph in the current channel group.""" return self._adjacency @adjacency.setter def adjacency(self, value): self._adjacency = value def change_channel_group(self, group): """Change the current channel group.""" assert self._probe is not None self._channels = _probe_channels(self._probe, group) self._positions = _probe_positions(self._probe, group) #------------------------------------------------------------------------------ # Common probes #------------------------------------------------------------------------------ def linear_positions(n_channels): """Linear channel positions along the vertical axis.""" return np.c_[np.zeros(n_channels), np.linspace(0., 1., n_channels)] def staggered_positions(n_channels): """Generate channel positions for a staggered probe.""" i = np.arange(n_channels - 1) x, y = (-1) ** i * (5 + i), 10 * (i + 1) pos = np.flipud(np.r_[np.zeros((1, 2)), np.c_[x, y]]) return pos
bsd-3-clause
naoyak/Agile_Data_Code_2
ch07/train_sklearn_model.py
1
5282
import sys, os, re sys.path.append("lib") import utils import numpy as np import sklearn import iso8601 import datetime print("Imports loaded...") # Load and check the size of our training data. May take a minute. print("Original JSON file size: {:,} Bytes".format(os.path.getsize("data/simple_flight_delay_features.jsonl"))) training_data = utils.read_json_lines_file('data/simple_flight_delay_features.jsonl') print("Training items: {:,}".format(len(training_data))) # 5,714,008 print("Data loaded...") # Inspect a record before we alter them print("Size of training data in RAM: {:,} Bytes".format(sys.getsizeof(training_data))) # 50MB print(training_data[0]) # # Sample down our training data at first... # sampled_training_data = training_data#np.random.choice(training_data, 1000000) # print("Sampled items: {:,} Bytes".format(len(training_data))) # print("Data sampled...") # Separate our results from the rest of the data, vectorize and size up results = [record['ArrDelay'] for record in training_data] results_vector = np.array(results) sys.getsizeof(results_vector) # 45,712,160 Bytes print("Results vectorized...") # Remove the two delay fields and the flight date from our training data for item in training_data: item.pop('ArrDelay', None) item.pop('FlightDate', None) print("ArrDelay and FlightDate removed from training data...") # Must convert datetime strings to unix times for item in training_data: if isinstance(item['CRSArrTime'], str): dt = iso8601.parse_date(item['CRSArrTime']) unix_time = int(dt.timestamp()) item['CRSArrTime'] = unix_time if isinstance(item['CRSDepTime'], str): dt = iso8601.parse_date(item['CRSDepTime']) unix_time = int(dt.timestamp()) item['CRSDepTime'] = unix_time print("Datetimes converted to unix times...") # Use DictVectorizer to convert feature dicts to vectors from sklearn.feature_extraction import DictVectorizer print("Original dimensions: [{:,}]".format(len(training_data))) vectorizer = DictVectorizer() training_vectors = vectorizer.fit_transform(training_data) print("Size of DictVectorized vectors: {:,} Bytes".format(training_vectors.data.nbytes)) print("Training data vectorized...") from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( training_vectors, results_vector, test_size=0.1, random_state=43 ) print(X_train.shape, X_test.shape) print(y_train.shape, y_test.shape) print("Test train split performed...") # Train a regressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split, cross_val_predict from sklearn.metrics import median_absolute_error, r2_score print("Regressor library and metrics imported...") regressor = LinearRegression() print("Regressor instantiated...") from sklearn.ensemble import GradientBoostingRegressor regressor = GradientBoostingRegressor print("Swapped gradient boosting trees for linear regression!") # Lets go back for now... regressor = LinearRegression() print("Swapped back to linear regression!") regressor.fit(X_train, y_train) print("Regressor fitted...") predicted = regressor.predict(X_test) print("Predictions made for X_test...") # Definitions from http://scikit-learn.org/stable/modules/model_evaluation.html from sklearn.metrics import median_absolute_error, r2_score # Median absolute error is the median of all absolute differences between the target and the prediction. # Less is better, more indicates a high error between target and prediction. medae = median_absolute_error(y_test, predicted) print("Median absolute error: {:.3g}".format(medae)) # R2 score is the coefficient of determination. Ranges from 1-0, 1.0 is best, 0.0 is worst. # Measures how well future samples are likely to be predicted. r2 = r2_score(y_test, predicted) print("r2 score: {:.3g}".format(r2)) # Plot outputs, compare actual vs predicted values # import matplotlib.pyplot as plt # # plt.scatter( # y_test, # predicted, # color='blue', # linewidth=1 # ) # # plt.xticks(()) # plt.yticks(()) # # plt.show() # # Persist model using pickle # print("Testing model persistance...") import pickle project_home = os.environ["PROJECT_HOME"] # Dump the model itself regressor_path = "{}/data/sklearn_regressor.pkl".format(project_home) regressor_bytes = pickle.dumps(regressor) model_f = open(regressor_path, 'wb') model_f.write(regressor_bytes) # Dump the DictVectorizer that vectorizes the features vectorizer_path = "{}/data/sklearn_vectorizer.pkl".format(project_home) vectorizer_bytes = pickle.dumps(vectorizer) vectorizer_f = open(vectorizer_path, 'wb') vectorizer_f.write(vectorizer_bytes) # Load the model itself model_f = open(regressor_path, 'rb') model_bytes = model_f.read() regressor = pickle.loads(model_bytes) # Load the DictVectorizer vectorizer_f = open(vectorizer_path, 'rb') vectorizer_bytes = vectorizer_f.read() vectorizer = pickle.loads(vectorizer_bytes) # # Persist model using sklearn.externals.joblib # from sklearn.externals import joblib # Dump the model and vectorizer joblib.dump(regressor, regressor_path) joblib.dump(vectorizer, vectorizer_path) # Load the model and vectorizer regressor = joblib.load(regressor_path) vectorizer = joblib.load(vectorizer_path)
mit
goldsborough/.emacs
.emacs.d/.python-environments/default/lib/python3.5/site-packages/pip/_vendor/colorama/win32.py
535
5365
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 try: import ctypes from ctypes import LibraryLoader windll = LibraryLoader(ctypes.WinDLL) from ctypes import wintypes except (AttributeError, ImportError): windll = None SetConsoleTextAttribute = lambda *_: None winapi_test = lambda *_: None else: from ctypes import byref, Structure, c_char, POINTER COORD = wintypes._COORD class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA _SetConsoleTitleW.argtypes = [ wintypes.LPCSTR ] _SetConsoleTitleW.restype = wintypes.BOOL handles = { STDOUT: _GetStdHandle(STDOUT), STDERR: _GetStdHandle(STDERR), } def winapi_test(): handle = handles[STDOUT] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return bool(success) def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = handles[stream_id] return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): position = COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = COORD(position.Y - 1, position.X - 1) if adjust: # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = handles[stream_id] return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = handles[stream_id] char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = handles[stream_id] attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written)) def SetConsoleTitle(title): return _SetConsoleTitleW(title)
mit
vmindru/ansible
test/units/module_utils/basic/test_exit_json.py
123
5336
# -*- coding: utf-8 -*- # Copyright (c) 2015-2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import json import pytest EMPTY_INVOCATION = {u'module_args': {}} class TestAnsibleModuleExitJson: """ Test that various means of calling exitJson and FailJson return the messages they've been given """ DATA = ( ({}, {'invocation': EMPTY_INVOCATION}), ({'msg': 'message'}, {'msg': 'message', 'invocation': EMPTY_INVOCATION}), ({'msg': 'success', 'changed': True}, {'msg': 'success', 'changed': True, 'invocation': EMPTY_INVOCATION}), ({'msg': 'nochange', 'changed': False}, {'msg': 'nochange', 'changed': False, 'invocation': EMPTY_INVOCATION}), ) # pylint bug: https://github.com/PyCQA/pylint/issues/511 # pylint: disable=undefined-variable @pytest.mark.parametrize('args, expected, stdin', ((a, e, {}) for a, e in DATA), indirect=['stdin']) def test_exit_json_exits(self, am, capfd, args, expected): with pytest.raises(SystemExit) as ctx: am.exit_json(**args) assert ctx.value.code == 0 out, err = capfd.readouterr() return_val = json.loads(out) assert return_val == expected # Fail_json is only legal if it's called with a message # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('args, expected, stdin', ((a, e, {}) for a, e in DATA if 'msg' in a), # pylint: disable=undefined-variable indirect=['stdin']) def test_fail_json_exits(self, am, capfd, args, expected): with pytest.raises(SystemExit) as ctx: am.fail_json(**args) assert ctx.value.code == 1 out, err = capfd.readouterr() return_val = json.loads(out) # Fail_json should add failed=True expected['failed'] = True assert return_val == expected @pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) def test_fail_json_no_msg(self, am): with pytest.raises(AssertionError) as ctx: am.fail_json() assert ctx.value.args[0] == "implementation error -- msg to explain the error is required" class TestAnsibleModuleExitValuesRemoved: """ Test that ExitJson and FailJson remove password-like values """ OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' DATA = ( ( dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here', invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ( dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', not_secret='following the leader', msg='here', invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ( dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', not_secret='following the leader', msg='here', invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ) # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('am, stdin, return_val, expected', (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e) for s, r, e in DATA), # pylint: disable=undefined-variable indirect=['am', 'stdin']) def test_exit_json_removes_values(self, am, capfd, return_val, expected): with pytest.raises(SystemExit): am.exit_json(**return_val) out, err = capfd.readouterr() assert json.loads(out) == expected # pylint bug: https://github.com/PyCQA/pylint/issues/511 @pytest.mark.parametrize('am, stdin, return_val, expected', (({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e) for s, r, e in DATA), # pylint: disable=undefined-variable indirect=['am', 'stdin']) def test_fail_json_removes_values(self, am, capfd, return_val, expected): expected['failed'] = True with pytest.raises(SystemExit): am.fail_json(**return_val) == expected out, err = capfd.readouterr() assert json.loads(out) == expected
gpl-3.0
Salat-Cx65/python-for-android
python-modules/twisted/twisted/persisted/sob.py
60
6366
# -*- test-case-name: twisted.test.test_sob -*- # Copyright (c) 2001-2008 Twisted Matrix Laboratories. # See LICENSE for details. # """ Save and load Small OBjects to and from files, using various formats. Maintainer: Moshe Zadka """ import os, sys try: import cPickle as pickle except ImportError: import pickle try: import cStringIO as StringIO except ImportError: import StringIO from twisted.python import log, runtime from twisted.python.hashlib import md5 from twisted.persisted import styles from zope.interface import implements, Interface # Note: # These encrypt/decrypt functions only work for data formats # which are immune to having spaces tucked at the end. # All data formats which persist saves hold that condition. def _encrypt(passphrase, data): from Crypto.Cipher import AES as cipher leftover = len(data) % cipher.block_size if leftover: data += ' '*(cipher.block_size - leftover) return cipher.new(md5(passphrase).digest()[:16]).encrypt(data) def _decrypt(passphrase, data): from Crypto.Cipher import AES return AES.new(md5(passphrase).digest()[:16]).decrypt(data) class IPersistable(Interface): """An object which can be saved in several formats to a file""" def setStyle(style): """Set desired format. @type style: string (one of 'pickle' or 'source') """ def save(tag=None, filename=None, passphrase=None): """Save object to file. @type tag: string @type filename: string @type passphrase: string """ class Persistent: implements(IPersistable) style = "pickle" def __init__(self, original, name): self.original = original self.name = name def setStyle(self, style): """Set desired format. @type style: string (one of 'pickle' or 'source') """ self.style = style def _getFilename(self, filename, ext, tag): if filename: finalname = filename filename = finalname + "-2" elif tag: filename = "%s-%s-2.%s" % (self.name, tag, ext) finalname = "%s-%s.%s" % (self.name, tag, ext) else: filename = "%s-2.%s" % (self.name, ext) finalname = "%s.%s" % (self.name, ext) return finalname, filename def _saveTemp(self, filename, passphrase, dumpFunc): f = open(filename, 'wb') if passphrase is None: dumpFunc(self.original, f) else: s = StringIO.StringIO() dumpFunc(self.original, s) f.write(_encrypt(passphrase, s.getvalue())) f.close() def _getStyle(self): if self.style == "source": from twisted.persisted.aot import jellyToSource as dumpFunc ext = "tas" else: def dumpFunc(obj, file): pickle.dump(obj, file, 2) ext = "tap" return ext, dumpFunc def save(self, tag=None, filename=None, passphrase=None): """Save object to file. @type tag: string @type filename: string @type passphrase: string """ ext, dumpFunc = self._getStyle() if passphrase: ext = 'e' + ext finalname, filename = self._getFilename(filename, ext, tag) log.msg("Saving "+self.name+" application to "+finalname+"...") self._saveTemp(filename, passphrase, dumpFunc) if runtime.platformType == "win32" and os.path.isfile(finalname): os.remove(finalname) os.rename(filename, finalname) log.msg("Saved.") # "Persistant" has been present since 1.0.7, so retain it for compatibility Persistant = Persistent class _EverythingEphemeral(styles.Ephemeral): initRun = 0 def __init__(self, mainMod): """ @param mainMod: The '__main__' module that this class will proxy. """ self.mainMod = mainMod def __getattr__(self, key): try: return getattr(self.mainMod, key) except AttributeError: if self.initRun: raise else: log.msg("Warning! Loading from __main__: %s" % key) return styles.Ephemeral() def load(filename, style, passphrase=None): """Load an object from a file. Deserialize an object from a file. The file can be encrypted. @param filename: string @param style: string (one of 'pickle' or 'source') @param passphrase: string """ mode = 'r' if style=='source': from twisted.persisted.aot import unjellyFromSource as _load else: _load, mode = pickle.load, 'rb' if passphrase: fp = StringIO.StringIO(_decrypt(passphrase, open(filename, 'rb').read())) else: fp = open(filename, mode) ee = _EverythingEphemeral(sys.modules['__main__']) sys.modules['__main__'] = ee ee.initRun = 1 try: value = _load(fp) finally: # restore __main__ if an exception is raised. sys.modules['__main__'] = ee.mainMod styles.doUpgrade() ee.initRun = 0 persistable = IPersistable(value, None) if persistable is not None: persistable.setStyle(style) return value def loadValueFromFile(filename, variable, passphrase=None): """Load the value of a variable in a Python file. Run the contents of the file, after decrypting if C{passphrase} is given, in a namespace and return the result of the variable named C{variable}. @param filename: string @param variable: string @param passphrase: string """ if passphrase: mode = 'rb' else: mode = 'r' fileObj = open(filename, mode) d = {'__file__': filename} if passphrase: data = fileObj.read() data = _decrypt(passphrase, data) exec data in d, d else: exec fileObj in d, d value = d[variable] return value def guessType(filename): ext = os.path.splitext(filename)[1] return { '.tac': 'python', '.etac': 'python', '.py': 'python', '.tap': 'pickle', '.etap': 'pickle', '.tas': 'source', '.etas': 'source', }[ext] __all__ = ['loadValueFromFile', 'load', 'Persistent', 'Persistant', 'IPersistable', 'guessType']
apache-2.0
maleficarium/youtube-dl
youtube_dl/extractor/oktoberfesttv.py
168
1500
# encoding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class OktoberfestTVIE(InfoExtractor): _VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)' _TEST = { 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt', 'info_dict': { 'id': 'hb-zelt', 'ext': 'mp4', 'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'thumbnail': 're:^https?://.*\.jpg$', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._live_title(self._html_search_regex( r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title')) clip = self._search_regex( r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip') ncurl = self._search_regex( r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base') video_url = ncurl + clip thumbnail = self._search_regex( r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'title': title, 'url': video_url, 'ext': 'mp4', 'is_live': True, 'thumbnail': thumbnail, }
unlicense
mne-tools/mne-tools.github.io
0.22/_downloads/433df1c5704b0eae2b3e5b8d29f9ddcd/plot_45_projectors_background.py
9
22444
# -*- coding: utf-8 -*- """ .. _tut-projectors-background: Background on projectors and projections ======================================== This tutorial provides background information on projectors and Signal Space Projection (SSP), and covers loading and saving projectors, adding and removing projectors from Raw objects, the difference between "applied" and "unapplied" projectors, and at what stages MNE-Python applies projectors automatically. .. contents:: Page contents :local: :depth: 2 We'll start by importing the Python modules we need; we'll also define a short function to make it easier to make several plots that look similar: """ import os import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # noqa from scipy.linalg import svd import mne def setup_3d_axes(): ax = plt.axes(projection='3d') ax.view_init(azim=-105, elev=20) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') ax.set_xlim(-1, 5) ax.set_ylim(-1, 5) ax.set_zlim(0, 5) return ax ############################################################################### # What is a projection? # ^^^^^^^^^^^^^^^^^^^^^ # # In the most basic terms, a *projection* is an operation that converts one set # of points into another set of points, where repeating the projection # operation on the resulting points has no effect. To give a simple geometric # example, imagine the point :math:`(3, 2, 5)` in 3-dimensional space. A # projection of that point onto the :math:`x, y` plane looks a lot like a # shadow cast by that point if the sun were directly above it: ax = setup_3d_axes() # plot the vector (3, 2, 5) origin = np.zeros((3, 1)) point = np.array([[3, 2, 5]]).T vector = np.hstack([origin, point]) ax.plot(*vector, color='k') ax.plot(*point, color='k', marker='o') # project the vector onto the x,y plane and plot it xy_projection_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]) projected_point = xy_projection_matrix @ point projected_vector = xy_projection_matrix @ vector ax.plot(*projected_vector, color='C0') ax.plot(*projected_point, color='C0', marker='o') # add dashed arrow showing projection arrow_coords = np.concatenate([point, projected_point - point]).flatten() ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1, color='C1', linewidth=1, linestyle='dashed') ############################################################################### # # .. note:: # # The ``@`` symbol indicates matrix multiplication on NumPy arrays, and was # introduced in Python 3.5 / NumPy 1.10. The notation ``plot(*point)`` uses # Python `argument expansion`_ to "unpack" the elements of ``point`` into # separate positional arguments to the function. In other words, # ``plot(*point)`` expands to ``plot(3, 2, 5)``. # # Notice that we used matrix multiplication to compute the projection of our # point :math:`(3, 2, 5)`onto the :math:`x, y` plane: # # .. math:: # # \left[ # \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix} # \right] # \left[ \begin{matrix} 3 \\ 2 \\ 5 \end{matrix} \right] = # \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] # # ...and that applying the projection again to the result just gives back the # result again: # # .. math:: # # \left[ # \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix} # \right] # \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] = # \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] # # From an information perspective, this projection has taken the point # :math:`x, y, z` and removed the information about how far in the :math:`z` # direction our point was located; all we know now is its position in the # :math:`x, y` plane. Moreover, applying our projection matrix to *any point* # in :math:`x, y, z` space will reduce it to a corresponding point on the # :math:`x, y` plane. The term for this is a *subspace*: the projection matrix # projects points in the original space into a *subspace* of lower dimension # than the original. The reason our subspace is the :math:`x,y` plane (instead # of, say, the :math:`y,z` plane) is a direct result of the particular values # in our projection matrix. # # # Example: projection as noise reduction # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Another way to describe this "loss of information" or "projection into a # subspace" is to say that projection reduces the rank (or "degrees of # freedom") of the measurement — here, from 3 dimensions down to 2. On the # other hand, if you know that measurement component in the :math:`z` direction # is just noise due to your measurement method, and all you care about are the # :math:`x` and :math:`y` components, then projecting your 3-dimensional # measurement into the :math:`x, y` plane could be seen as a form of noise # reduction. # # Of course, it would be very lucky indeed if all the measurement noise were # concentrated in the :math:`z` direction; you could just discard the :math:`z` # component without bothering to construct a projection matrix or do the matrix # multiplication. Suppose instead that in order to take that measurement you # had to pull a trigger on a measurement device, and the act of pulling the # trigger causes the device to move a little. If you measure how # trigger-pulling affects measurement device position, you could then "correct" # your real measurements to "project out" the effect of the trigger pulling. # Here we'll suppose that the average effect of the trigger is to move the # measurement device by :math:`(3, -1, 1)`: trigger_effect = np.array([[3, -1, 1]]).T ############################################################################### # Knowing that, we can compute a plane that is orthogonal to the effect of the # trigger (using the fact that a plane through the origin has equation # :math:`Ax + By + Cz = 0` given a normal vector :math:`(A, B, C)`), and # project our real measurements onto that plane. # compute the plane orthogonal to trigger_effect x, y = np.meshgrid(np.linspace(-1, 5, 61), np.linspace(-1, 5, 61)) A, B, C = trigger_effect z = (-A * x - B * y) / C # cut off the plane below z=0 (just to make the plot nicer) mask = np.where(z >= 0) x = x[mask] y = y[mask] z = z[mask] ############################################################################### # Computing the projection matrix from the ``trigger_effect`` vector is done # using `singular value decomposition <svd_>`_ (SVD); interested readers may # consult the internet or a linear algebra textbook for details on this method. # With the projection matrix in place, we can project our original vector # :math:`(3, 2, 5)` to remove the effect of the trigger, and then plot it: # sphinx_gallery_thumbnail_number = 2 # compute the projection matrix U, S, V = svd(trigger_effect, full_matrices=False) trigger_projection_matrix = np.eye(3) - U @ U.T # project the vector onto the orthogonal plane projected_point = trigger_projection_matrix @ point projected_vector = trigger_projection_matrix @ vector # plot the trigger effect and its orthogonal plane ax = setup_3d_axes() ax.plot_trisurf(x, y, z, color='C2', shade=False, alpha=0.25) ax.quiver3D(*np.concatenate([origin, trigger_effect]).flatten(), arrow_length_ratio=0.1, color='C2', alpha=0.5) # plot the original vector ax.plot(*vector, color='k') ax.plot(*point, color='k', marker='o') offset = np.full((3, 1), 0.1) ax.text(*(point + offset).flat, '({}, {}, {})'.format(*point.flat), color='k') # plot the projected vector ax.plot(*projected_vector, color='C0') ax.plot(*projected_point, color='C0', marker='o') offset = np.full((3, 1), -0.2) ax.text(*(projected_point + offset).flat, '({}, {}, {})'.format(*np.round(projected_point.flat, 2)), color='C0', horizontalalignment='right') # add dashed arrow showing projection arrow_coords = np.concatenate([point, projected_point - point]).flatten() ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1, color='C1', linewidth=1, linestyle='dashed') ############################################################################### # Just as before, the projection matrix will map *any point* in :math:`x, y, z` # space onto that plane, and once a point has been projected onto that plane, # applying the projection again will have no effect. For that reason, it should # be clear that although the projected points vary in all three :math:`x`, # :math:`y`, and :math:`z` directions, the set of projected points have only # two *effective* dimensions (i.e., they are constrained to a plane). # # .. sidebar:: Terminology # # In MNE-Python, the matrix used to project a raw signal into a subspace is # usually called a :term:`projector <projector>` or a *projection # operator* — these terms are interchangeable with the term *projection # matrix* used above. # # Projections of EEG or MEG signals work in very much the same way: the point # :math:`x, y, z` corresponds to the value of each sensor at a single time # point, and the projection matrix varies depending on what aspects of the # signal (i.e., what kind of noise) you are trying to project out. The only # real difference is that instead of a single 3-dimensional point :math:`(x, y, # z)` you're dealing with a time series of :math:`N`-dimensional "points" (one # at each sampling time), where :math:`N` is usually in the tens or hundreds # (depending on how many sensors your EEG/MEG system has). Fortunately, because # projection is a matrix operation, it can be done very quickly even on signals # with hundreds of dimensions and tens of thousands of time points. # # # .. _ssp-tutorial: # # Signal-space projection (SSP) # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # We mentioned above that the projection matrix will vary depending on what # kind of noise you are trying to project away. Signal-space projection (SSP) # :footcite:`UusitaloIlmoniemi1997` is a way of estimating what that projection # matrix should be, by # comparing measurements with and without the signal of interest. For example, # you can take additional "empty room" measurements that record activity at the # sensors when no subject is present. By looking at the spatial pattern of # activity across MEG sensors in an empty room measurement, you can create one # or more :math:`N`-dimensional vector(s) giving the "direction(s)" of # environmental noise in sensor space (analogous to the vector for "effect of # the trigger" in our example above). SSP is also often used for removing # heartbeat and eye movement artifacts — in those cases, instead of empty room # recordings the direction of the noise is estimated by detecting the # artifacts, extracting epochs around them, and averaging. See # :ref:`tut-artifact-ssp` for examples. # # Once you know the noise vectors, you can create a hyperplane that is # orthogonal # to them, and construct a projection matrix to project your experimental # recordings onto that hyperplane. In that way, the component of your # measurements associated with environmental noise can be removed. Again, it # should be clear that the projection reduces the dimensionality of your data — # you'll still have the same number of sensor signals, but they won't all be # *linearly independent* — but typically there are tens or hundreds of sensors # and the noise subspace that you are eliminating has only 3-5 dimensions, so # the loss of degrees of freedom is usually not problematic. # # # Projectors in MNE-Python # ^^^^^^^^^^^^^^^^^^^^^^^^ # # In our example data, :ref:`SSP <ssp-tutorial>` has already been performed # using empty room recordings, but the :term:`projectors <projector>` are # stored alongside the raw data and have not been *applied* yet (or, # synonymously, the projectors are not *active* yet). Here we'll load # the :ref:`sample data <sample-dataset>` and crop it to 60 seconds; you can # see the projectors in the output of :func:`~mne.io.read_raw_fif` below: sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(sample_data_raw_file) raw.crop(tmax=60).load_data() ############################################################################### # In MNE-Python, the environmental noise vectors are computed using `principal # component analysis <pca_>`_, usually abbreviated "PCA", which is why the SSP # projectors usually have names like "PCA-v1". (Incidentally, since the process # of performing PCA uses `singular value decomposition <svd_>`_ under the hood, # it is also common to see phrases like "projectors were computed using SVD" in # published papers.) The projectors are stored in the ``projs`` field of # ``raw.info``: print(raw.info['projs']) ############################################################################### # ``raw.info['projs']`` is an ordinary Python :class:`list` of # :class:`~mne.Projection` objects, so you can access individual projectors by # indexing into it. The :class:`~mne.Projection` object itself is similar to a # Python :class:`dict`, so you can use its ``.keys()`` method to see what # fields it contains (normally you don't need to access its properties # directly, but you can if necessary): first_projector = raw.info['projs'][0] print(first_projector) print(first_projector.keys()) ############################################################################### # The :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked` # objects all have a boolean :attr:`~mne.io.Raw.proj` attribute that indicates # whether there are any unapplied / inactive projectors stored in the object. # In other words, the :attr:`~mne.io.Raw.proj` attribute is ``True`` if at # least one :term:`projector` is present and all of them are active. In # addition, each individual projector also has a boolean ``active`` field: print(raw.proj) print(first_projector['active']) ############################################################################### # Computing projectors # ~~~~~~~~~~~~~~~~~~~~ # # In MNE-Python, SSP vectors can be computed using general purpose functions # :func:`mne.compute_proj_raw`, :func:`mne.compute_proj_epochs`, and # :func:`mne.compute_proj_evoked`. The general assumption these functions make # is that the data passed contains raw data, epochs or averages of the artifact # you want to repair via projection. In practice this typically involves # continuous raw data of empty room recordings or averaged ECG or EOG # artifacts. A second set of high-level convenience functions is provided to # compute projection vectors for typical use cases. This includes # :func:`mne.preprocessing.compute_proj_ecg` and # :func:`mne.preprocessing.compute_proj_eog` for computing the ECG and EOG # related artifact components, respectively; see :ref:`tut-artifact-ssp` for # examples of these uses. For computing the EEG reference signal as a # projector, the function :func:`mne.set_eeg_reference` can be used; see # :ref:`tut-set-eeg-ref` for more information. # # .. warning:: It is best to compute projectors only on channels that will be # used (e.g., excluding bad channels). This ensures that # projection vectors will remain ortho-normalized and that they # properly capture the activity of interest. # # # Visualizing the effect of projectors # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # You can see the effect the projectors are having on the measured signal by # comparing plots with and without the projectors applied. By default, # ``raw.plot()`` will apply the projectors in the background before plotting # (without modifying the :class:`~mne.io.Raw` object); you can control this # with the boolean ``proj`` parameter as shown below, or you can turn them on # and off interactively with the projectors interface, accessed via the # :kbd:`Proj` button in the lower right corner of the plot window. Here we'll # look at just the magnetometers, and a 2-second sample from the beginning of # the file. mags = raw.copy().crop(tmax=2).pick_types(meg='mag') for proj in (False, True): fig = mags.plot(butterfly=True, proj=proj) fig.subplots_adjust(top=0.9) fig.suptitle('proj={}'.format(proj), size='xx-large', weight='bold') ############################################################################### # Additional ways of visualizing projectors are covered in the tutorial # :ref:`tut-artifact-ssp`. # # # Loading and saving projectors # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # SSP can be used for other types of signal cleaning besides just reduction of # environmental noise. You probably noticed two large deflections in the # magnetometer signals in the previous plot that were not removed by the # empty-room projectors — those are artifacts of the subject's heartbeat. SSP # can be used to remove those artifacts as well. The sample data includes # projectors for heartbeat noise reduction that were saved in a separate file # from the raw data, which can be loaded with the :func:`mne.read_proj` # function: ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_ecg-proj.fif') ecg_projs = mne.read_proj(ecg_proj_file) print(ecg_projs) ############################################################################### # There is a corresponding :func:`mne.write_proj` function that can be used to # save projectors to disk in ``.fif`` format: # # .. code-block:: python3 # # mne.write_proj('heartbeat-proj.fif', ecg_projs) # # .. note:: # # By convention, MNE-Python expects projectors to be saved with a filename # ending in ``-proj.fif`` (or ``-proj.fif.gz``), and will issue a warning # if you forgo this recommendation. # # # Adding and removing projectors # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Above, when we printed the ``ecg_projs`` list that we loaded from a file, it # showed two projectors for gradiometers (the first two, marked "planar"), two # for magnetometers (the middle two, marked "axial"), and two for EEG sensors # (the last two, marked "eeg"). We can add them to the :class:`~mne.io.Raw` # object using the :meth:`~mne.io.Raw.add_proj` method: raw.add_proj(ecg_projs) ############################################################################### # To remove projectors, there is a corresponding method # :meth:`~mne.io.Raw.del_proj` that will remove projectors based on their index # within the ``raw.info['projs']`` list. For the special case of replacing the # existing projectors with new ones, use # ``raw.add_proj(ecg_projs, remove_existing=True)``. # # To see how the ECG projectors affect the measured signal, we can once again # plot the data with and without the projectors applied (though remember that # the :meth:`~mne.io.Raw.plot` method only *temporarily* applies the projectors # for visualization, and does not permanently change the underlying data). # We'll compare the ``mags`` variable we created above, which had only the # empty room SSP projectors, to the data with both empty room and ECG # projectors: mags_ecg = raw.copy().crop(tmax=2).pick_types(meg='mag') for data, title in zip([mags, mags_ecg], ['Without', 'With']): fig = data.plot(butterfly=True, proj=True) fig.subplots_adjust(top=0.9) fig.suptitle('{} ECG projector'.format(title), size='xx-large', weight='bold') ############################################################################### # When are projectors "applied"? # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # By default, projectors are applied when creating :class:`epoched # <mne.Epochs>` data from :class:`~mne.io.Raw` data, though application of the # projectors can be *delayed* by passing ``proj=False`` to the # :class:`~mne.Epochs` constructor. However, even when projectors have not been # applied, the :meth:`mne.Epochs.get_data` method will return data *as if the # projectors had been applied* (though the :class:`~mne.Epochs` object will be # unchanged). Additionally, projectors cannot be applied if the data are not # :ref:`preloaded <memory>`. If the data are `memory-mapped`_ (i.e., not # preloaded), you can check the ``_projector`` attribute to see whether any # projectors will be applied once the data is loaded in memory. # # Finally, when performing inverse imaging (i.e., with # :func:`mne.minimum_norm.apply_inverse`), the projectors will be # automatically applied. It is also possible to apply projectors manually when # working with :class:`~mne.io.Raw`, :class:`~mne.Epochs` or # :class:`~mne.Evoked` objects via the object's :meth:`~mne.io.Raw.apply_proj` # method. For all instance types, you can always copy the contents of # :samp:`{<instance>}.info['projs']` into a separate :class:`list` variable, # use :samp:`{<instance>}.del_proj({<index of proj(s) to remove>})` to remove # one or more projectors, and then add them back later with # :samp:`{<instance>}.add_proj({<list containing projs>})` if desired. # # .. warning:: # # Remember that once a projector is applied, it can't be un-applied, so # during interactive / exploratory analysis it's a good idea to use the # object's :meth:`~mne.io.Raw.copy` method before applying projectors. # # # Best practices # ~~~~~~~~~~~~~~ # # In general, it is recommended to apply projectors when creating # :class:`~mne.Epochs` from :class:`~mne.io.Raw` data. There are two reasons # for this recommendation: # # 1. It is computationally cheaper to apply projectors to data *after* the # data have been reducted to just the segments of interest (the epochs) # # 2. If you are applying amplitude-based rejection criteria to epochs, it is # preferable to reject based on the signal *after* projectors have been # applied, because the projectors may reduce noise in some epochs to # tolerable levels (thereby increasing the number of acceptable epochs and # consequenty increasing statistical power in any later analyses). # # # References # ^^^^^^^^^^ # # .. footbibliography:: # # # .. LINKS # # .. _`argument expansion`: # https://docs.python.org/3/tutorial/controlflow.html#tut-unpacking-arguments # .. _`pca`: https://en.wikipedia.org/wiki/Principal_component_analysis # .. _`svd`: https://en.wikipedia.org/wiki/Singular_value_decomposition # .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
bsd-3-clause
lavajumper/sexcoin
test/functional/test_framework/coverage.py
6
3389
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' class AuthServiceProxyWrapper(object): """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, name): return_val = getattr(self.auth_service_proxy_instance, name) if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) self._log_call() return return_val def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: f.write("%s\n" % rpc_method) def __truediv__(self, relative_uri): return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, self.coverage_logfile) def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args, **kwargs) def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): commands.add("%s\n" % line.split()[0]) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True
mit
blacklin/kbengine
kbe/res/scripts/common/Lib/test/test_json/test_indent.py
103
1824
import textwrap from io import StringIO from test.test_json import PyTest, CTest class TestIndent: def test_indent(self): h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', {'nifty': 87}, {'field': 'yes', 'morefield': False} ] expect = textwrap.dedent("""\ [ \t[ \t\t"blorpie" \t], \t[ \t\t"whoops" \t], \t[], \t"d-shtaeou", \t"d-nthiouh", \t"i-vhbjkhnth", \t{ \t\t"nifty": 87 \t}, \t{ \t\t"field": "yes", \t\t"morefield": false \t} ]""") d1 = self.dumps(h) d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) d3 = self.dumps(h, indent='\t', sort_keys=True, separators=(',', ': ')) d4 = self.dumps(h, indent=2, sort_keys=True) d5 = self.dumps(h, indent='\t', sort_keys=True) h1 = self.loads(d1) h2 = self.loads(d2) h3 = self.loads(d3) self.assertEqual(h1, h) self.assertEqual(h2, h) self.assertEqual(h3, h) self.assertEqual(d2, expect.expandtabs(2)) self.assertEqual(d3, expect) self.assertEqual(d4, d2) self.assertEqual(d5, d3) def test_indent0(self): h = {3: 1} def check(indent, expected): d1 = self.dumps(h, indent=indent) self.assertEqual(d1, expected) sio = StringIO() self.json.dump(h, sio, indent=indent) self.assertEqual(sio.getvalue(), expected) # indent=0 should emit newlines check(0, '{\n"3": 1\n}') # indent=None is more compact check(None, '{"3": 1}') class TestPyIndent(TestIndent, PyTest): pass class TestCIndent(TestIndent, CTest): pass
lgpl-3.0
willharris/django
tests/multiple_database/tests.py
20
93166
from __future__ import unicode_literals import datetime import pickle import warnings from operator import attrgetter from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.core import management from django.db import DEFAULT_DB_ALIAS, connections, router, transaction from django.db.models import signals from django.db.utils import ConnectionRouter from django.test import TestCase, override_settings from django.utils.encoding import force_text from django.utils.six import StringIO from .models import Book, Person, Pet, Review, UserProfile from .routers import AuthRouter, TestRouter, WriteRouter class QueryTestCase(TestCase): multi_db = True def test_db_selection(self): "Check that querysets will use the default database by default" self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS) self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS) self.assertEqual(Book.objects.using('other').db, 'other') self.assertEqual(Book.objects.db_manager('other').db, 'other') self.assertEqual(Book.objects.db_manager('other').all().db, 'other') def test_default_creation(self): "Objects created on the default database don't leak onto other databases" # Create a book on the default database using create() Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) # Create a book on the default database using a save dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) dive.save() # Check that book exists on the default database, but not on other database try: Book.objects.get(title="Pro Django") Book.objects.using('default').get(title="Pro Django") except Book.DoesNotExist: self.fail('"Pro Django" should exist on default database') self.assertRaises( Book.DoesNotExist, Book.objects.using('other').get, title="Pro Django" ) try: Book.objects.get(title="Dive into Python") Book.objects.using('default').get(title="Dive into Python") except Book.DoesNotExist: self.fail('"Dive into Python" should exist on default database') self.assertRaises( Book.DoesNotExist, Book.objects.using('other').get, title="Dive into Python" ) def test_other_creation(self): "Objects created on another database don't leak onto the default database" # Create a book on the second database Book.objects.using('other').create(title="Pro Django", published=datetime.date(2008, 12, 16)) # Create a book on the default database using a save dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) dive.save(using='other') # Check that book exists on the default database, but not on other database try: Book.objects.using('other').get(title="Pro Django") except Book.DoesNotExist: self.fail('"Pro Django" should exist on other database') self.assertRaises( Book.DoesNotExist, Book.objects.get, title="Pro Django" ) self.assertRaises( Book.DoesNotExist, Book.objects.using('default').get, title="Pro Django" ) try: Book.objects.using('other').get(title="Dive into Python") except Book.DoesNotExist: self.fail('"Dive into Python" should exist on other database') self.assertRaises( Book.DoesNotExist, Book.objects.get, title="Dive into Python" ) self.assertRaises( Book.DoesNotExist, Book.objects.using('default').get, title="Dive into Python" ) def test_refresh(self): dive = Book() dive.title = "Dive into Python" dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) dive.save(using='other') dive.published = datetime.date(2009, 5, 4) dive.save(using='other') dive2 = Book.objects.using('other').get() dive2.title = "Dive into Python (on default)" dive2.save(using='default') dive.refresh_from_db() self.assertEqual(dive.title, "Dive into Python") dive.refresh_from_db(using='default') self.assertEqual(dive.title, "Dive into Python (on default)") self.assertEqual(dive._state.db, "default") def test_basic_queries(self): "Queries are constrained to a single database" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4)) self.assertEqual(dive.title, "Dive into Python") self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4)) dive = Book.objects.using('other').get(title__icontains="dive") self.assertEqual(dive.title, "Dive into Python") self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive") dive = Book.objects.using('other').get(title__iexact="dive INTO python") self.assertEqual(dive.title, "Dive into Python") self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python") dive = Book.objects.using('other').get(published__year=2009) self.assertEqual(dive.title, "Dive into Python") self.assertEqual(dive.published, datetime.date(2009, 5, 4)) self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009) years = Book.objects.using('other').dates('published', 'year') self.assertEqual([o.year for o in years], [2009]) years = Book.objects.using('default').dates('published', 'year') self.assertEqual([o.year for o in years], []) months = Book.objects.using('other').dates('published', 'month') self.assertEqual([o.month for o in months], [5]) months = Book.objects.using('default').dates('published', 'month') self.assertEqual([o.month for o in months], []) def test_m2m_separation(self): "M2M fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations pro.authors = [marty] dive.authors = [mark] # Inspect the m2m tables directly. # There should be 1 entry in each database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Check that queries work across m2m joins self.assertEqual(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)), ['Pro Django']) self.assertEqual(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)), []) self.assertEqual(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), []) self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python']) # Reget the objects to clear caches dive = Book.objects.using('other').get(title="Dive into Python") mark = Person.objects.using('other').get(name="Mark Pilgrim") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(dive.authors.all().values_list('name', flat=True)), ['Mark Pilgrim']) self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)), ['Dive into Python']) def test_m2m_forward_operations(self): "M2M forward manipulations are all constrained to a single DB" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations dive.authors = [mark] # Add a second author john = Person.objects.using('other').create(name="John Smith") self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), []) dive.authors.add(john) self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python']) self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), ['Dive into Python']) # Remove the second author dive.authors.remove(john) self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python']) self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), []) # Clear all authors dive.authors.clear() self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), []) self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), []) # Create an author through the m2m interface dive.authors.create(name='Jane Brown') self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), []) self.assertEqual(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)), ['Dive into Python']) def test_m2m_reverse_operations(self): "M2M reverse manipulations are all constrained to a single DB" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations dive.authors = [mark] # Create a second book on the other database grease = Book.objects.using('other').create(title="Greasemonkey Hacks", published=datetime.date(2005, 11, 1)) # Add a books to the m2m mark.book_set.add(grease) self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), ['Mark Pilgrim']) self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), ['Mark Pilgrim']) # Remove a book from the m2m mark.book_set.remove(grease) self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), ['Mark Pilgrim']) self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), []) # Clear the books associated with mark mark.book_set.clear() self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), []) # Create a book through the m2m interface mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1)) self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)), ['Mark Pilgrim']) def test_m2m_cross_database_protection(self): "Operations that involve sharing M2M objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Set a foreign key set with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='default'): marty.edited = [pro, dive] # Add to an m2m with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='default'): marty.book_set.add(dive) # Set a m2m with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='default'): marty.book_set = [pro, dive] # Add to a reverse m2m with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='other'): dive.authors.add(marty) # Set a reverse m2m with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='other'): dive.authors = [mark, marty] def test_m2m_deletion(self): "Cascaded deletions of m2m relations issue queries on the right database" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") dive.authors = [mark] # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Delete the object on the other database dive.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) # The person still exists ... self.assertEqual(Person.objects.using('other').count(), 1) # ... but the book has been deleted self.assertEqual(Book.objects.using('other').count(), 0) # ... and the relationship object has also been deleted. self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Now try deletion in the reverse direction. Set up the relation again dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) dive.authors = [mark] # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Delete the object on the other database mark.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) # The person has been deleted ... self.assertEqual(Person.objects.using('other').count(), 0) # ... but the book still exists self.assertEqual(Book.objects.using('other').count(), 1) # ... and the relationship object has been deleted. self.assertEqual(Book.authors.through.objects.using('other').count(), 0) def test_foreign_key_separation(self): "FK fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) george = Person.objects.create(name="George Vilches") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) chris = Person.objects.using('other').create(name="Chris Mills") # Save the author's favorite books pro.editor = george pro.save() dive.editor = chris dive.save() pro = Book.objects.using('default').get(title="Pro Django") self.assertEqual(pro.editor.name, "George Vilches") dive = Book.objects.using('other').get(title="Dive into Python") self.assertEqual(dive.editor.name, "Chris Mills") # Check that queries work across foreign key joins self.assertEqual(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)), ['George Vilches']) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), ['Chris Mills']) # Reget the objects to clear caches chris = Person.objects.using('other').get(name="Chris Mills") dive = Book.objects.using('other').get(title="Dive into Python") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(chris.edited.values_list('title', flat=True)), ['Dive into Python']) def test_foreign_key_reverse_operations(self): "FK reverse manipulations are all constrained to a single DB" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) chris = Person.objects.using('other').create(name="Chris Mills") # Save the author relations dive.editor = chris dive.save() # Add a second book edited by chris html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), []) chris.edited.add(html5) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), ['Chris Mills']) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), ['Chris Mills']) # Remove the second editor chris.edited.remove(html5) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), ['Chris Mills']) # Clear all edited books chris.edited.clear() self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), []) # Create an author through the m2m interface chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15)) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), []) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)), ['Chris Mills']) self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), []) def test_foreign_key_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # Set a foreign key with an object from a different database with self.assertRaises(ValueError): dive.editor = marty # Set a foreign key set with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='default'): marty.edited = [pro, dive] # Add to a foreign key set with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='default'): marty.edited.add(dive) def test_foreign_key_deletion(self): "Cascaded deletions of Foreign Key relations issue queries on the right database" mark = Person.objects.using('other').create(name="Mark Pilgrim") Pet.objects.using('other').create(name="Fido", owner=mark) # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Pet.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Pet.objects.using('other').count(), 1) # Delete the person object, which will cascade onto the pet mark.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Pet.objects.using('default').count(), 0) # Both the pet and the person have been deleted from the right database self.assertEqual(Person.objects.using('other').count(), 0) self.assertEqual(Pet.objects.using('other').count(), 0) def test_foreign_key_validation(self): "ForeignKey.validate() uses the correct database" mickey = Person.objects.using('other').create(name="Mickey") pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey) self.assertEqual(None, pluto.full_clean()) def test_o2o_separation(self): "OneToOne fields are constrained to a single database" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog') # Retrieve related objects; queries should be database constrained alice = User.objects.using('default').get(username="alice") self.assertEqual(alice.userprofile.flavor, "chocolate") bob = User.objects.using('other').get(username="bob") self.assertEqual(bob.userprofile.flavor, "crunchy frog") # Check that queries work across joins self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='chocolate').values_list('username', flat=True)), ['alice']) self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='chocolate').values_list('username', flat=True)), []) self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)), []) self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)), ['bob']) # Reget the objects to clear caches alice_profile = UserProfile.objects.using('default').get(flavor='chocolate') bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog') # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(alice_profile.user.username, 'alice') self.assertEqual(bob_profile.user.username, 'bob') def test_o2o_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') # Set a one-to-one relation with an object from a different database alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate') with self.assertRaises(ValueError): bob.userprofile = alice_profile # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog') new_bob_profile = UserProfile(flavor="spring surprise") # assigning a profile requires an explicit pk as the object isn't saved charlie = User(pk=51, username='charlie', email='charlie@example.com') charlie.set_unusable_password() # initially, no db assigned self.assertEqual(new_bob_profile._state.db, None) self.assertEqual(charlie._state.db, None) # old object comes from 'other', so the new object is set to use 'other'... new_bob_profile.user = bob charlie.userprofile = bob_profile self.assertEqual(new_bob_profile._state.db, 'other') self.assertEqual(charlie._state.db, 'other') # ... but it isn't saved yet self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob']) self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog']) # When saved (no using required), new objects goes to 'other' charlie.save() bob_profile.save() new_bob_profile.save() self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)), ['alice']) self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob', 'charlie']) self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise']) # This also works if you assign the O2O relation in the constructor denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com') denise_profile = UserProfile(flavor="tofu", user=denise) self.assertEqual(denise_profile._state.db, 'other') # ... but it isn't saved yet self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise']) # When saved, the new profile goes to 'other' denise_profile.save() self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise', 'tofu']) def test_generic_key_separation(self): "Generic fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) review1 = Review.objects.using('default').get(source="Python Monthly") self.assertEqual(review1.content_object.title, "Pro Django") review2 = Review.objects.using('other').get(source="Python Weekly") self.assertEqual(review2.content_object.title, "Dive into Python") # Reget the objects to clear caches dive = Book.objects.using('other').get(title="Dive into Python") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)), ['Python Weekly']) def test_generic_key_reverse_operations(self): "Generic reverse manipulations are all constrained to a single DB" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) temp = Book.objects.using('other').create(title="Temp", published=datetime.date(2009, 5, 4)) review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp) self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), []) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Weekly']) # Add a second review dive.reviews.add(review2) self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), []) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Monthly', 'Python Weekly']) # Remove the second author dive.reviews.remove(review1) self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), []) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Monthly']) # Clear all reviews dive.reviews.clear() self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), []) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), []) # Create an author through the generic interface dive.reviews.create(source='Python Daily') self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), []) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Daily']) def test_generic_key_cross_database_protection(self): "Operations that involve sharing generic key objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) Review.objects.using('other').create(source="Python Weekly", content_object=dive) # Set a foreign key with an object from a different database with self.assertRaises(ValueError): review1.content_object = dive # Add to a foreign key set with an object from a different database with self.assertRaises(ValueError): with transaction.atomic(using='other'): dive.reviews.add(review1) # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. review3 = Review(source="Python Daily") # initially, no db assigned self.assertEqual(review3._state.db, None) # Dive comes from 'other', so review3 is set to use 'other'... review3.content_object = dive self.assertEqual(review3._state.db, 'other') # ... but it isn't saved yet self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), ['Python Monthly']) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Weekly']) # When saved, John goes to 'other' review3.save() self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), ['Python Monthly']) self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Daily', 'Python Weekly']) def test_generic_key_deletion(self): "Cascaded deletions of Generic Key relations issue queries on the right database" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) Review.objects.using('other').create(source="Python Weekly", content_object=dive) # Check the initial state self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Review.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Review.objects.using('other').count(), 1) # Delete the Book object, which will cascade onto the pet dive.delete(using='other') self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Review.objects.using('default').count(), 0) # Both the pet and the person have been deleted from the right database self.assertEqual(Book.objects.using('other').count(), 0) self.assertEqual(Review.objects.using('other').count(), 0) def test_ordering(self): "get_next_by_XXX commands stick to a single database" Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) learn = Book.objects.using('other').create(title="Learning Python", published=datetime.date(2008, 7, 16)) self.assertEqual(learn.get_next_by_published().title, "Dive into Python") self.assertEqual(dive.get_previous_by_published().title, "Learning Python") def test_raw(self): "test the raw() method across databases" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book') self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk")) val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other') self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk")) def test_select_related(self): "Database assignment is retained if an object is retrieved with select_related()" # Create a book and author on the other database mark = Person.objects.using('other').create(name="Mark Pilgrim") Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark) # Retrieve the Person using select_related() book = Book.objects.using('other').select_related('editor').get(title="Dive into Python") # The editor instance should have a db state self.assertEqual(book.editor._state.db, 'other') def test_subquery(self): """Make sure as_sql works with subqueries and primary/replica.""" sub = Person.objects.using('other').filter(name='fff') qs = Book.objects.filter(editor__in=sub) # When you call __str__ on the query object, it doesn't know about using # so it falls back to the default. If the subquery explicitly uses a # different database, an error should be raised. self.assertRaises(ValueError, str, qs.query) # Evaluating the query shouldn't work, either with self.assertRaises(ValueError): for obj in qs: pass def test_related_manager(self): "Related managers return managers, not querysets" mark = Person.objects.using('other').create(name="Mark Pilgrim") # extra_arg is removed by the BookManager's implementation of # create(); but the BookManager's implementation won't get called # unless edited returns a Manager, not a queryset mark.book_set.create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True) mark.book_set.get_or_create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True) mark.edited.create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True) mark.edited.get_or_create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True) class ConnectionRouterTestCase(TestCase): @override_settings(DATABASE_ROUTERS=[ 'multiple_database.tests.TestRouter', 'multiple_database.tests.WriteRouter']) def test_router_init_default(self): connection_router = ConnectionRouter() self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) def test_router_init_arg(self): connection_router = ConnectionRouter([ 'multiple_database.tests.TestRouter', 'multiple_database.tests.WriteRouter' ]) self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) # Init with instances instead of strings connection_router = ConnectionRouter([TestRouter(), WriteRouter()]) self.assertListEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) # Make the 'other' database appear to be a replica of the 'default' @override_settings(DATABASE_ROUTERS=[TestRouter()]) class RouterTestCase(TestCase): multi_db = True def test_db_selection(self): "Check that querysets obey the router for db suggestions" self.assertEqual(Book.objects.db, 'other') self.assertEqual(Book.objects.all().db, 'other') self.assertEqual(Book.objects.using('default').db, 'default') self.assertEqual(Book.objects.db_manager('default').db, 'default') self.assertEqual(Book.objects.db_manager('default').all().db, 'default') def test_migrate_selection(self): "Synchronization behavior is predictable" self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]): # Add the auth router to the chain. TestRouter is a universal # synchronizer, so it should have no effect. self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]): # Now check what happens if the router order is reversed. self.assertFalse(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) def test_migrate_legacy_router(self): class LegacyRouter(object): def allow_migrate(self, db, model): """ Deprecated allow_migrate signature should trigger RemovedInDjango20Warning. """ assert db == 'default' assert model is User return True with override_settings(DATABASE_ROUTERS=[LegacyRouter()]): with warnings.catch_warnings(record=True) as recorded: warnings.filterwarnings('always') msg = ( "The signature of allow_migrate has changed from " "allow_migrate(self, db, model) to " "allow_migrate(self, db, app_label, model_name=None, **hints). " "Support for the old signature will be removed in Django 2.0." ) self.assertTrue(router.allow_migrate_model('default', User)) self.assertEqual(force_text(recorded.pop().message), msg) self.assertEqual(recorded, []) self.assertTrue(router.allow_migrate('default', 'app_label')) self.assertEqual(force_text(recorded.pop().message), msg) def test_partial_router(self): "A router can choose to implement a subset of methods" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # First check the baseline behavior. self.assertEqual(router.db_for_read(User), 'other') self.assertEqual(router.db_for_read(Book), 'other') self.assertEqual(router.db_for_write(User), 'default') self.assertEqual(router.db_for_write(Book), 'default') self.assertTrue(router.allow_relation(dive, dive)) self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]): self.assertEqual(router.db_for_read(User), 'default') self.assertEqual(router.db_for_read(Book), 'other') self.assertEqual(router.db_for_write(User), 'writer') self.assertEqual(router.db_for_write(Book), 'writer') self.assertTrue(router.allow_relation(dive, dive)) self.assertFalse(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) def test_database_routing(self): marty = Person.objects.using('default').create(name="Marty Alchin") pro = Book.objects.using('default').create(title="Pro Django", published=datetime.date(2008, 12, 16), editor=marty) pro.authors = [marty] # Create a book and author on the other database Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # An update query will be routed to the default database Book.objects.filter(title='Pro Django').update(pages=200) with self.assertRaises(Book.DoesNotExist): # By default, the get query will be directed to 'other' Book.objects.get(title='Pro Django') # But the same query issued explicitly at a database will work. pro = Book.objects.using('default').get(title='Pro Django') # Check that the update worked. self.assertEqual(pro.pages, 200) # An update query with an explicit using clause will be routed # to the requested database. Book.objects.using('other').filter(title='Dive into Python').update(pages=300) self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300) # Related object queries stick to the same database # as the original object, regardless of the router self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin']) self.assertEqual(pro.editor.name, 'Marty Alchin') # get_or_create is a special case. The get needs to be targeted at # the write database in order to avoid potential transaction # consistency problems book, created = Book.objects.get_or_create(title="Pro Django") self.assertFalse(created) book, created = Book.objects.get_or_create(title="Dive Into Python", defaults={'published': datetime.date(2009, 5, 4)}) self.assertTrue(created) # Check the head count of objects self.assertEqual(Book.objects.using('default').count(), 2) self.assertEqual(Book.objects.using('other').count(), 1) # If a database isn't specified, the read database is used self.assertEqual(Book.objects.count(), 1) # A delete query will also be routed to the default database Book.objects.filter(pages__gt=150).delete() # The default database has lost the book. self.assertEqual(Book.objects.using('default').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) def test_foreign_key_cross_database_protection(self): "Foreign keys can cross databases if they two databases have a common source" # Create a book and author on the default database pro = Book.objects.using('default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('default').create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Set a foreign key with an object from a different database try: dive.editor = marty except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Set a foreign key set with an object from a different database try: marty.edited = [pro, dive] except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Assignment implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a foreign key set with an object from a different database try: marty.edited.add(dive) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Add implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') # If you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. chris = Person(name="Chris Mills") html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) # initially, no db assigned self.assertEqual(chris._state.db, None) self.assertEqual(html5._state.db, None) # old object comes from 'other', so the new object is set to use the # source of 'other'... self.assertEqual(dive._state.db, 'other') chris.save() dive.editor = chris html5.editor = mark self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') self.assertEqual(chris._state.db, 'default') self.assertEqual(html5._state.db, 'default') # This also works if you assign the FK in the constructor water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark) self.assertEqual(water._state.db, 'default') # For the remainder of this test, create a copy of 'mark' in the # 'default' database to prevent integrity errors on backends that # don't defer constraints checks until the end of the transaction mark.save(using='default') # This moved 'mark' in the 'default' database, move it back in 'other' mark.save(using='other') self.assertEqual(mark._state.db, 'other') # If you create an object through a FK relation, it will be # written to the write database, even if the original object # was on the read database cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15)) self.assertEqual(cheesecake._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating cheesecake, created = mark.edited.get_or_create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15)) self.assertEqual(cheesecake._state.db, 'default') puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15)) self.assertEqual(puddles._state.db, 'default') def test_m2m_cross_database_protection(self): "M2M relations can cross databases if the database share a source" # Create books and authors on the inverse to the usual database pro = Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") dive = Book.objects.using('default').create(pk=2, title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim") # Now save back onto the usual database. # This simulates primary/replica - the objects exist on both database, # but the _state.db is as it is for all other tests. pro.save(using='default') marty.save(using='default') dive.save(using='other') mark.save(using='other') # Check that we have 2 of both types of object on both databases self.assertEqual(Book.objects.using('default').count(), 2) self.assertEqual(Book.objects.using('other').count(), 2) self.assertEqual(Person.objects.using('default').count(), 2) self.assertEqual(Person.objects.using('other').count(), 2) # Set a m2m set with an object from a different database try: marty.book_set = [pro, dive] except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Add to an m2m with an object from a different database try: marty.book_set.add(dive) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Set a reverse m2m with an object from a different database try: dive.authors = [mark, marty] except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Add to a reverse m2m with an object from a different database try: dive.authors.add(marty) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database alice = dive.authors.create(name='Alice') self.assertEqual(alice._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating alice, created = dive.authors.get_or_create(name='Alice') self.assertEqual(alice._state.db, 'default') bob, created = dive.authors.get_or_create(name='Bob') self.assertEqual(bob._state.db, 'default') def test_o2o_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') # Set a one-to-one relation with an object from a different database alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate') try: bob.userprofile = alice_profile except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(alice._state.db, 'default') self.assertEqual(alice_profile._state.db, 'default') self.assertEqual(bob._state.db, 'other') # ... but they will when the affected object is saved. bob.save() self.assertEqual(bob._state.db, 'default') def test_generic_key_cross_database_protection(self): "Generic Key operations can span databases if they share a source" # Create a book and author on the default database pro = Book.objects.using( 'default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.using( 'default').create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using( 'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) review2 = Review.objects.using( 'other').create(source="Python Weekly", content_object=dive) # Set a generic foreign key with an object from a different database try: review1.content_object = dive except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a generic foreign key set with an object from a different database try: dive.reviews.add(review1) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. review3 = Review(source="Python Daily") # initially, no db assigned self.assertEqual(review3._state.db, None) # Dive comes from 'other', so review3 is set to use the source of 'other'... review3.content_object = dive self.assertEqual(review3._state.db, 'default') # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database dive = Book.objects.using('other').get(title='Dive into Python') nyt = dive.reviews.create(source="New York Times", content_object=dive) self.assertEqual(nyt._state.db, 'default') def test_m2m_managers(self): "M2M relations are represented by managers, and can be controlled like managers" pro = Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") self.assertEqual(pro.authors.db, 'other') self.assertEqual(pro.authors.db_manager('default').db, 'default') self.assertEqual(pro.authors.db_manager('default').all().db, 'default') self.assertEqual(marty.book_set.db, 'other') self.assertEqual(marty.book_set.db_manager('default').db, 'default') self.assertEqual(marty.book_set.db_manager('default').all().db, 'default') def test_foreign_key_managers(self): "FK reverse relations are represented by managers, and can be controlled like managers" marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16), editor=marty) self.assertEqual(marty.edited.db, 'other') self.assertEqual(marty.edited.db_manager('default').db, 'default') self.assertEqual(marty.edited.db_manager('default').all().db, 'default') def test_generic_key_managers(self): "Generic key relations are represented by managers, and can be controlled like managers" pro = Book.objects.using('other').create(title="Pro Django", published=datetime.date(2008, 12, 16)) Review.objects.using('other').create(source="Python Monthly", content_object=pro) self.assertEqual(pro.reviews.db, 'other') self.assertEqual(pro.reviews.db_manager('default').db, 'default') self.assertEqual(pro.reviews.db_manager('default').all().db, 'default') def test_subquery(self): """Make sure as_sql works with subqueries and primary/replica.""" # Create a book and author on the other database mark = Person.objects.using('other').create(name="Mark Pilgrim") Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark) sub = Person.objects.filter(name='Mark Pilgrim') qs = Book.objects.filter(editor__in=sub) # When you call __str__ on the query object, it doesn't know about using # so it falls back to the default. Don't let routing instructions # force the subquery to an incompatible database. str(qs.query) # If you evaluate the query, it should work, running on 'other' self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python']) def test_deferred_models(self): mark_def = Person.objects.using('default').create(name="Mark Pilgrim") mark_other = Person.objects.using('other').create(name="Mark Pilgrim") orig_b = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark_other) b = Book.objects.using('other').only('title').get(pk=orig_b.pk) self.assertEqual(b.published, datetime.date(2009, 5, 4)) b = Book.objects.using('other').only('title').get(pk=orig_b.pk) b.editor = mark_def b.save(using='default') self.assertEqual(Book.objects.using('default').get(pk=b.pk).published, datetime.date(2009, 5, 4)) @override_settings(DATABASE_ROUTERS=[AuthRouter()]) class AuthTestCase(TestCase): multi_db = True def test_auth_manager(self): "The methods on the auth manager obey database hints" # Create one user using default allocation policy User.objects.create_user('alice', 'alice@example.com') # Create another user, explicitly specifying the database User.objects.db_manager('default').create_user('bob', 'bob@example.com') # The second user only exists on the other database alice = User.objects.using('other').get(username='alice') self.assertEqual(alice.username, 'alice') self.assertEqual(alice._state.db, 'other') self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice') # The second user only exists on the default database bob = User.objects.using('default').get(username='bob') self.assertEqual(bob.username, 'bob') self.assertEqual(bob._state.db, 'default') self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob') # That is... there is one user on each database self.assertEqual(User.objects.using('default').count(), 1) self.assertEqual(User.objects.using('other').count(), 1) def test_dumpdata(self): "Check that dumpdata honors allow_migrate restrictions on the router" User.objects.create_user('alice', 'alice@example.com') User.objects.db_manager('default').create_user('bob', 'bob@example.com') # Check that dumping the default database doesn't try to include auth # because allow_migrate prohibits auth on default new_io = StringIO() management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io) command_output = new_io.getvalue().strip() self.assertEqual(command_output, '[]') # Check that dumping the other database does include auth new_io = StringIO() management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io) command_output = new_io.getvalue().strip() self.assertIn('"email": "alice@example.com"', command_output) class AntiPetRouter(object): # A router that only expresses an opinion on migrate, # passing pets to the 'other' database def allow_migrate(self, db, app_label, model_name=None, **hints): if db == 'other': return model_name == 'pet' else: return model_name != 'pet' class FixtureTestCase(TestCase): multi_db = True fixtures = ['multidb-common', 'multidb'] @override_settings(DATABASE_ROUTERS=[AntiPetRouter()]) def test_fixture_loading(self): "Multi-db fixtures are loaded correctly" # Check that "Pro Django" exists on the default database, but not on other database try: Book.objects.get(title="Pro Django") Book.objects.using('default').get(title="Pro Django") except Book.DoesNotExist: self.fail('"Pro Django" should exist on default database') self.assertRaises( Book.DoesNotExist, Book.objects.using('other').get, title="Pro Django" ) # Check that "Dive into Python" exists on the default database, but not on other database try: Book.objects.using('other').get(title="Dive into Python") except Book.DoesNotExist: self.fail('"Dive into Python" should exist on other database') self.assertRaises( Book.DoesNotExist, Book.objects.get, title="Dive into Python" ) self.assertRaises( Book.DoesNotExist, Book.objects.using('default').get, title="Dive into Python" ) # Check that "Definitive Guide" exists on the both databases try: Book.objects.get(title="The Definitive Guide to Django") Book.objects.using('default').get(title="The Definitive Guide to Django") Book.objects.using('other').get(title="The Definitive Guide to Django") except Book.DoesNotExist: self.fail('"The Definitive Guide to Django" should exist on both databases') @override_settings(DATABASE_ROUTERS=[AntiPetRouter()]) def test_pseudo_empty_fixtures(self): "A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (ref #14068)" new_io = StringIO() management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io) command_output = new_io.getvalue().strip() # No objects will actually be loaded self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)") class PickleQuerySetTestCase(TestCase): multi_db = True def test_pickling(self): for db in connections: Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4)) qs = Book.objects.all() self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db) class DatabaseReceiver(object): """ Used in the tests for the database argument in signals (#13552) """ def __call__(self, signal, sender, **kwargs): self._database = kwargs['using'] class WriteToOtherRouter(object): """ A router that sends all writes to the other database. """ def db_for_write(self, model, **hints): return "other" class SignalTests(TestCase): multi_db = True def override_router(self): return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()]) def test_database_arg_save_and_delete(self): """ Tests that the pre/post_save signal contains the correct database. (#13552) """ # Make some signal receivers pre_save_receiver = DatabaseReceiver() post_save_receiver = DatabaseReceiver() pre_delete_receiver = DatabaseReceiver() post_delete_receiver = DatabaseReceiver() # Make model and connect receivers signals.pre_save.connect(sender=Person, receiver=pre_save_receiver) signals.post_save.connect(sender=Person, receiver=post_save_receiver) signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver) signals.post_delete.connect(sender=Person, receiver=post_delete_receiver) p = Person.objects.create(name='Darth Vader') # Save and test receivers got calls p.save() self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS) self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS) # Delete, and test p.delete() self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS) self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS) # Save again to a different database p.save(using="other") self.assertEqual(pre_save_receiver._database, "other") self.assertEqual(post_save_receiver._database, "other") # Delete, and test p.delete(using="other") self.assertEqual(pre_delete_receiver._database, "other") self.assertEqual(post_delete_receiver._database, "other") signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver) signals.post_save.disconnect(sender=Person, receiver=post_save_receiver) signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver) signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver) def test_database_arg_m2m(self): """ Test that the m2m_changed signal has a correct database arg (#13552) """ # Make a receiver receiver = DatabaseReceiver() # Connect it signals.m2m_changed.connect(receiver=receiver) # Create the models that will be used for the tests b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") # Create a copy of the models on the 'other' database to prevent # integrity errors on backends that don't defer constraints checks Book.objects.using('other').create(pk=b.pk, title=b.title, published=b.published) Person.objects.using('other').create(pk=p.pk, name=p.name) # Test addition b.authors.add(p) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.add(p) self.assertEqual(receiver._database, "other") # Test removal b.authors.remove(p) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.remove(p) self.assertEqual(receiver._database, "other") # Test addition in reverse p.book_set.add(b) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): p.book_set.add(b) self.assertEqual(receiver._database, "other") # Test clearing b.authors.clear() self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.clear() self.assertEqual(receiver._database, "other") class AttributeErrorRouter(object): "A router to test the exception handling of ConnectionRouter" def db_for_read(self, model, **hints): raise AttributeError def db_for_write(self, model, **hints): raise AttributeError class RouterAttributeErrorTestCase(TestCase): multi_db = True def override_router(self): return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()]) def test_attribute_error_read(self): "Check that the AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.override_router(): self.assertRaises(AttributeError, Book.objects.get, pk=b.pk) def test_attribute_error_save(self): "Check that the AttributeError from AttributeErrorRouter bubbles up" dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) with self.override_router(): self.assertRaises(AttributeError, dive.save) def test_attribute_error_delete(self): "Check that the AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") b.authors = [p] b.editor = p with self.override_router(): self.assertRaises(AttributeError, b.delete) def test_attribute_error_m2m(self): "Check that the AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") with self.override_router(): self.assertRaises(AttributeError, setattr, b, 'authors', [p]) class ModelMetaRouter(object): "A router to ensure model arguments are real model classes" def db_for_write(self, model, **hints): if not hasattr(model, '_meta'): raise ValueError @override_settings(DATABASE_ROUTERS=[ModelMetaRouter()]) class RouterModelArgumentTestCase(TestCase): multi_db = True def test_m2m_collection(self): b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") # test add b.authors.add(p) # test remove b.authors.remove(p) # test clear b.authors.clear() # test setattr b.authors = [p] # test M2M collection b.delete() def test_foreignkey_collection(self): person = Person.objects.create(name='Bob') Pet.objects.create(owner=person, name='Wart') # test related FK collection person.delete() class SyncOnlyDefaultDatabaseRouter(object): def allow_migrate(self, db, app_label, **hints): return db == DEFAULT_DB_ALIAS class MigrateTestCase(TestCase): available_apps = [ 'multiple_database', 'django.contrib.auth', 'django.contrib.contenttypes' ] multi_db = True def test_migrate_to_other_database(self): """Regression test for #16039: migrate with --database option.""" cts = ContentType.objects.using('other').filter(app_label='multiple_database') count = cts.count() self.assertGreater(count, 0) cts.delete() management.call_command('migrate', verbosity=0, interactive=False, database='other') self.assertEqual(cts.count(), count) def test_migrate_to_other_database_with_router(self): """Regression test for #16039: migrate with --database option.""" cts = ContentType.objects.using('other').filter(app_label='multiple_database') cts.delete() with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]): management.call_command('migrate', verbosity=0, interactive=False, database='other') self.assertEqual(cts.count(), 0) class RouterUsed(Exception): WRITE = 'write' def __init__(self, mode, model, hints): self.mode = mode self.model = model self.hints = hints class RouteForWriteTestCase(TestCase): multi_db = True class WriteCheckRouter(object): def db_for_write(self, model, **hints): raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints) def override_router(self): return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()]) def test_fk_delete(self): owner = Person.objects.create(name='Someone') pet = Pet.objects.create(name='fido', owner=owner) try: with self.override_router(): pet.owner.delete() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_delete(self): owner = Person.objects.create(name='Someone') to_del_qs = owner.pet_set.all() try: with self.override_router(): to_del_qs.delete() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_get_or_create(self): owner = Person.objects.create(name='Someone') try: with self.override_router(): owner.pet_set.get_or_create(name='fido') self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_update(self): owner = Person.objects.create(name='Someone') Pet.objects.create(name='fido', owner=owner) try: with self.override_router(): owner.pet_set.update(name='max') self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_m2m_add(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) try: with self.override_router(): book.authors.add(auth) self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_clear(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): book.authors.clear() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_delete(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): book.authors.all().delete() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': book}) def test_m2m_get_or_create(self): Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) try: with self.override_router(): book.authors.get_or_create(name='Someone else') self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': book}) def test_m2m_remove(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): book.authors.remove(auth) self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_update(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): book.authors.all().update(name='Different') self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': book}) def test_reverse_m2m_add(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) try: with self.override_router(): auth.book_set.add(book) self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_clear(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): auth.book_set.clear() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_delete(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): auth.book_set.all().delete() self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_get_or_create(self): auth = Person.objects.create(name='Someone') Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) try: with self.override_router(): auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now()) self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_remove(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): auth.book_set.remove(book) self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_update(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) try: with self.override_router(): auth.book_set.all().update(title='Different') self.fail('db_for_write() not invoked on router') except RouterUsed as e: self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': auth})
bsd-3-clause
nmarley/electrum-dash
lib/synchronizer.py
8
7179
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2014 Thomas Voegtlin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from threading import Lock from bitcoin import Hash, hash_encode from transaction import Transaction from util import print_error, print_msg class WalletSynchronizer(): '''The synchronizer keeps the wallet up-to-date with its set of addresses and their transactions. It subscribes over the network to wallet addresses, gets the wallet to generate new addresses when necessary, requests the transaction history of any addresses we don't have the full history of, and requests binary transaction data of any transactions the wallet doesn't have. External interface: __init__() and add() member functions. ''' def __init__(self, wallet, network): self.wallet = wallet self.network = network self.new_addresses = set() # Entries are (tx_hash, tx_height) tuples self.requested_tx = set() self.requested_histories = {} self.requested_addrs = set() self.lock = Lock() self.initialize() def print_error(self, *msg): print_error("[Synchronizer]", *msg) def print_msg(self, *msg): print_msg("[Synchronizer]", *msg) def parse_response(self, response): if response.get('error'): self.print_error("response error:", response) return None, None return response['params'], response['result'] def is_up_to_date(self): return (not self.requested_tx and not self.requested_histories and not self.requested_addrs) def add(self, address): '''This can be called from the proxy or GUI threads.''' with self.lock: self.new_addresses.add(address) def subscribe_to_addresses(self, addresses): if addresses: self.requested_addrs |= addresses msgs = map(lambda addr: ('blockchain.address.subscribe', [addr]), addresses) self.network.send(msgs, self.addr_subscription_response) def addr_subscription_response(self, response): params, result = self.parse_response(response) if not params: return addr = params[0] if addr in self.requested_addrs: # Notifications won't be in self.requested_addrs.remove(addr) history = self.wallet.get_address_history(addr) if self.wallet.get_status(history) != result: if self.requested_histories.get(addr) is None: self.network.send([('blockchain.address.get_history', [addr])], self.addr_history_response) self.requested_histories[addr] = result def addr_history_response(self, response): params, result = self.parse_response(response) if not params: return addr = params[0] self.print_error("receiving history", addr, len(result)) server_status = self.requested_histories.pop(addr) # Check that txids are unique hashes = set(map(lambda item: item['tx_hash'], result)) if len(hashes) != len(result): self.print_error("error: server history has non-unique txids: %s"% addr) return # Check that the status corresponds to what was announced hist = map(lambda item: (item['tx_hash'], item['height']), result) if self.wallet.get_status(hist) != server_status: self.print_error("error: status mismatch: %s" % addr) return # Store received history self.wallet.receive_history_callback(addr, hist) # Request transactions we don't have self.request_missing_txs(hist) def tx_response(self, response): params, result = self.parse_response(response) if not params: return tx_hash, tx_height = params assert tx_hash == hash_encode(Hash(result.decode('hex'))) tx = Transaction(result) try: tx.deserialize() except Exception: self.print_msg("cannot deserialize transaction, skipping", tx_hash) return self.wallet.receive_tx_callback(tx_hash, tx, tx_height) self.requested_tx.remove((tx_hash, tx_height)) self.print_error("received tx:", tx_hash, len(tx.raw)) if not self.requested_tx: self.network.trigger_callback('updated') # Updated gets called too many times from other places as # well; if we used that signal we get the notification # three times self.network.trigger_callback("new_transaction") def request_missing_txs(self, hist): # "hist" is a list of [tx_hash, tx_height] lists missing = set() for tx_hash, tx_height in hist: if self.wallet.transactions.get(tx_hash) is None: missing.add((tx_hash, tx_height)) missing -= self.requested_tx if missing: requests = [('blockchain.transaction.get', tx) for tx in missing] self.network.send(requests, self.tx_response) self.requested_tx |= missing def initialize(self): '''Check the initial state of the wallet. Subscribe to all its addresses, and request any transactions in its address history we don't have. ''' for history in self.wallet.history.values(): # Old electrum servers returned ['*'] when all history for # the address was pruned. This no longer happens but may # remain in old wallets. if history == ['*']: continue self.request_missing_txs(history) if self.requested_tx: self.print_error("missing tx", self.requested_tx) self.subscribe_to_addresses(set(self.wallet.addresses(True))) def main_loop(self): '''Called from the network proxy thread main loop.''' # 1. Create new addresses self.wallet.synchronize() # 2. Subscribe to new addresses with self.lock: addresses = self.new_addresses self.new_addresses = set() self.subscribe_to_addresses(addresses) # 3. Detect if situation has changed up_to_date = self.is_up_to_date() if up_to_date != self.wallet.is_up_to_date(): self.wallet.set_up_to_date(up_to_date) if up_to_date: self.wallet.save_transactions() self.network.trigger_callback('updated')
gpl-3.0