content stringlengths 0 1.55M |
|---|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions common to native, java and host-driven test runners."""<import_stmt>collections<import_stmt>logging<import_from_stmt>devil.utils logging_common<line_sep>CustomFormatter=logging_common.CustomFormatter<line_sep>_WrappedLoggingArgs=collections.namedtuple('_WrappedLoggingArgs' ['verbose' 'quiet'])<def_stmt>SetLogLevel verbose_count add_handler=<true><block_start>"""Sets log level as |verbose_count|.
Args:
verbose_count: Verbosity level.
add_handler: If true, adds a handler with |CustomFormatter|.
"""<line_sep>logging_common.InitializeLogging(_WrappedLoggingArgs(verbose_count 0) handler=<none><if>add_handler<else>logging.NullHandler())<block_end> |
default_app_config='oscar.apps.customer.apps.CustomerConfig'<line_sep> |
'''初始化'''<import_from_stmt>.server gobangSever<import_from_stmt>.client gobangClient<import_from_stmt>.playOnline playOnlineUI<line_sep> |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for fused batchnorm op """<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.contrib graph_editor<as>ge<import_from_stmt>aimet_common.utils AimetLogger<import_from_stmt>aimet_tensorflow.utils constants<line_sep>logger=AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)<line_sep>_BN_STRUCTURE_ERROR_MSG="BN op doesn't have the expected structure"<class_stmt>BNUtils<block_start>""" Batch Norm/ fused Batch Norm op related utils"""<line_sep>@staticmethod<def_stmt>skip_bn_op sess:tf.compat.v1.Session bn_op:tf.Operation in_tensor:tf.Tensor out_tensor:tf.Tensor<block_start>"""
Skip given bn op specified (fused batch norm op).
Note: supports only Fused bn op types.
:param sess: Tensorflow session
:param bn_op: Batchnorm op to be skipped
:param in_tensor: Input tensor to the batchnorm op
:param out_tensor: Output tensor of the batchnorm op
"""<if_stmt>in_tensor<is><none><or>out_tensor<is><none><block_start>logger.error("Error, input and output tensors must be provided for skipping the op")<assert_stmt><false><block_end><else_stmt><block_start><with_stmt>sess.graph.as_default()<block_start><if_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<block_start>ge.detach_outputs(in_tensor.op)<line_sep>ge.reroute_ts(in_tensor out_tensor)<line_sep>BNUtils.remove_bn_op_from_update_ops(sess bn_op)<block_end><else_stmt><block_start>logger.error("Error, Unknown BN op")<assert_stmt><false><block_end><block_end><block_end><block_end>@staticmethod<def_stmt>_get_tensor_read_var_op_trainable_bn_op input_tensor:tf.Tensor<arrow>tf.Tensor<block_start>"""
Generic helper to find a read op tensor associated with input tensor that can be evaluated, when the bn op is
marked trainable.
:param input_tensor: Input tensor to find corresponding read op tensor that can be evaluated
:return: read var op type tensor as tf.Tensor type.
"""<line_sep>logger.debug('Fetching params from trainable BN op type')<assert_stmt>input_tensor.op.inputs[0].op.inputs<is><not><none><line_sep># inputs of 0 is beta tensor , get readVarOp associated with it
var_tensor=input_tensor.op.inputs[0].op.inputs[0]<assert_stmt>var_tensor.op.outputs<is><not><none><assert_stmt>len(var_tensor.consumers())<ge>3<line_sep>tensor_consumers=var_tensor.consumers()<line_sep>var_read_tensor=<none><line_sep># get read variable op tensor from these consumers
# do not pick the one with _1 , it is not fetch-able
<for_stmt>consumer tensor_consumers<block_start><if_stmt>consumer.type<eq>'ReadVariableOp'<and>'ReadVariableOp_1'<not><in>consumer.name<block_start><assert_stmt>consumer.outputs<is><not><none><line_sep>var_read_tensor=consumer.outputs[0]<line_sep><break><block_end><block_end><assert_stmt>var_read_tensor<is><not><none><line_sep><return>var_read_tensor<block_end>@staticmethod<def_stmt>get_beta_read_op bn_op:tf.Operation<arrow>tf.Operation<block_start>"""
Get beta read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: beta read op
"""<if_stmt>bn_op.type<in>['Mul']# For regular BN
# mul_1 -> add_1 <-- sub <-- beta_read
<block_start><assert_stmt>len(bn_op.outputs)<ge>1 _BN_STRUCTURE_ERROR_MSG<line_sep>add_1=bn_op.outputs[0].consumers()[0]<assert_stmt>len(add_1.inputs)<ge>2 _BN_STRUCTURE_ERROR_MSG<line_sep>sub=add_1.inputs[1].op<assert_stmt>len(sub.inputs)<ge>1 _BN_STRUCTURE_ERROR_MSG<line_sep>beta_read=sub.inputs[0].op<block_end><elif_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<block_start><assert_stmt>len(bn_op.inputs)<eq>5<line_sep>beta_read=bn_op.inputs[constants.BN_OP_PARAM_INDICES['beta']].op<if_stmt>beta_read.type<eq>'Switch'# tf slim bn using training tensor form
<block_start>beta_read=beta_read.inputs[0].op<assert_stmt>'read'<in>beta_read.name<block_end><block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><assert_stmt>beta_read.type<in>['ReadVariableOp' 'Identity']# Will be identity for tf slim BNs
<return>beta_read<block_end>@staticmethod<def_stmt>_get_beta_read_var_op_tensor_using_structure bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get beta readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm' 'Mul']<line_sep>beta_read_tensor=BNUtils.get_beta_read_op(bn_op).outputs[0]<assert_stmt>beta_read_tensor<is><not><none><if_stmt>beta_read_tensor.op.inputs[0].op.type<eq>'Switch'<block_start>logger.debug('Fetching params from trainable BN op type')<line_sep>beta_read_tensor=BNUtils._get_tensor_read_var_op_trainable_bn_op(beta_read_tensor)<block_end><return>beta_read_tensor<block_end>@staticmethod<def_stmt>get_beta_read_var_op_tensor graph:tf.Graph bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get beta readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""<try_stmt># try name based tensor look up for Keras layers
<block_start>beta_read_tensor=BNUtils._get_bn_param_tensor_using_name(graph bn_op constants.BNOpParamType.beta)<block_end><except_stmt>KeyError# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
<block_start>beta_read_tensor=BNUtils._get_beta_read_var_op_tensor_using_structure(bn_op)<block_end><return>beta_read_tensor<block_end>@staticmethod<def_stmt>get_beta_as_numpy_data sess:tf.compat.v1.Session bn_op:tf.Operation<arrow>np.ndarray<block_start>"""
Get beta param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op as tf.Operation
:return: beta tensor as numpy data
"""<line_sep>beta_tensor=BNUtils.get_beta_read_var_op_tensor(sess.graph bn_op)<with_stmt>sess.graph.as_default()<block_start>numpy_data=sess.run(beta_tensor)<block_end><return>numpy_data<block_end>@staticmethod<def_stmt>get_gamma_as_read_op bn_op:tf.Operation<arrow>tf.Operation<block_start>"""
Get gamma read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma read op
"""<if_stmt>bn_op.type<in>['Mul']# For regular BN
# mul_1 <-- mul <-- gamma_read <-- gamma_tensor
<block_start><assert_stmt>len(bn_op.inputs)<ge>2 _BN_STRUCTURE_ERROR_MSG<line_sep>mul=bn_op.inputs[1].op<assert_stmt>len(mul.inputs)<ge>2 _BN_STRUCTURE_ERROR_MSG<line_sep>gamma_read=mul.inputs[1].op<block_end><elif_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<block_start><assert_stmt>len(bn_op.inputs)<eq>5<line_sep>gamma_read=bn_op.inputs[constants.BN_OP_PARAM_INDICES['gamma']].op<if_stmt>gamma_read.type<eq>'Switch'# tf slim bn using training tensor form
<block_start>gamma_read=gamma_read.inputs[0].op<assert_stmt>'read'<in>gamma_read.name<or>gamma_read.type<eq>'Const'<block_end><block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><assert_stmt>gamma_read.type<in>['ReadVariableOp' 'Identity' 'Const']# Will be identity for tf slim BNs
<return>gamma_read<block_end>@staticmethod<def_stmt>_get_gamma_read_var_op_tensor_using_structure bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get the gamma read var op tensor associated with the batchnorm op.
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm' 'Mul']<line_sep>gamma_read_tensor=BNUtils.get_gamma_as_read_op(bn_op).outputs[0]<assert_stmt>gamma_read_tensor<is><not><none><if_stmt>gamma_read_tensor.op.inputs<and>gamma_read_tensor.op.inputs[0].op.type<eq>'Switch'<block_start>logger.debug('Fetching params from trainable BN op type')<line_sep>gamma_read_tensor=BNUtils._get_tensor_read_var_op_trainable_bn_op(gamma_read_tensor)<block_end><return>gamma_read_tensor<block_end>@staticmethod<def_stmt>get_gamma_read_var_op_tensor graph:tf.Graph bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get the gamma read var op tensor associated with the batchnorm op.
:param graph: TensorFlow graph
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""<try_stmt># try name based tensor look up for Keras layers
<block_start>gamma_read_tensor=BNUtils._get_bn_param_tensor_using_name(graph bn_op constants.BNOpParamType.gamma)<block_end><except_stmt>KeyError# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
<block_start>gamma_read_tensor=BNUtils._get_gamma_read_var_op_tensor_using_structure(bn_op)<block_end><return>gamma_read_tensor<block_end>@staticmethod<def_stmt>get_gamma_as_numpy_data sess:tf.compat.v1.Session bn_op:tf.Operation<arrow>np.ndarray<block_start>"""
Get gamma param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma as numpy data
"""<line_sep>gamma_tensor=BNUtils.get_gamma_read_var_op_tensor(sess.graph bn_op)<with_stmt>sess.graph.as_default()<block_start>numpy_data=sess.run(gamma_tensor)<block_end><return>numpy_data<block_end>@staticmethod<def_stmt>_bn_op_var_struct_1 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_variance op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>rsqrt_op=mul_op.inputs[0].op<assert_stmt>rsqrt_op.type<eq>'Rsqrt'<line_sep>add_op=rsqrt_op.inputs[0].op<assert_stmt>add_op.type<eq>'AddV2'<line_sep>merge_op=add_op.inputs[0].op<assert_stmt>merge_op.type<eq>'Merge'<line_sep>read_op=merge_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_bn_op_var_struct_2 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_variance op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>rsqrt_op=mul_op.inputs[0].op<assert_stmt>rsqrt_op.type<eq>'Rsqrt'<line_sep>add_op=rsqrt_op.inputs[0].op<assert_stmt>add_op.type<eq>'AddV2'<line_sep>squeeze_1_op=add_op.inputs[0].op<assert_stmt>squeeze_1_op.type<eq>'Squeeze'<line_sep>sub_op=squeeze_1_op.outputs[0].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>read_op=sub_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_bn_op_var_struct_3 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_variance op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>rsqrt_op=mul_op.inputs[0].op<assert_stmt>rsqrt_op.type<eq>'Rsqrt'<line_sep>add_op=rsqrt_op.inputs[0].op<assert_stmt>add_op.type<eq>'AddV2'<line_sep>read_op=add_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>get_moving_variance_as_read_op bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Get moving variance read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as read op
"""<line_sep># register handlers for different structures
bn_op_struct_for_variance_handlers=[BNUtils._bn_op_var_struct_1 BNUtils._bn_op_var_struct_2 BNUtils._bn_op_var_struct_3]<if_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<block_start><assert_stmt>len(bn_op.inputs)<eq>5<line_sep>moving_var_read=bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingvariance']].op<if_stmt>moving_var_read.type<eq>'Switch'# tf slim bn using training tensor form
<block_start>moving_var_read=moving_var_read.inputs[0].op<assert_stmt>'read'<in>moving_var_read.name<block_end><block_end><elif_stmt>bn_op.type<in>['Mul']# For regular BN
<block_start>moving_var_read=<none><line_sep># try all handlers available
<for_stmt>handler bn_op_struct_for_variance_handlers<block_start><if_stmt>moving_var_read<is><none><block_start>moving_var_read=handler(bn_op)<block_end><else_stmt><block_start><break><block_end><block_end><assert_stmt>moving_var_read<is><not><none> _BN_STRUCTURE_ERROR_MSG<block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><if_stmt>moving_var_read.type<eq>'Identity'<block_start><assert_stmt>len(moving_var_read.inputs)<eq>1 _BN_STRUCTURE_ERROR_MSG<block_end><assert_stmt>moving_var_read.type<in>['ReadVariableOp' 'Const' 'Identity']<line_sep><return>moving_var_read<block_end>@staticmethod<def_stmt>_get_moving_variance_read_var_op_tensor_using_structure bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get moving variance readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""<line_sep># only support fused BN
<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm' 'Mul']<line_sep>moving_var_read_tensor=BNUtils.get_moving_variance_as_read_op(bn_op).outputs[0]<assert_stmt>moving_var_read_tensor<is><not><none><if_stmt>moving_var_read_tensor.op.type<eq>'Const'<block_start>logger.debug("BN op has const type op for moving variance")<line_sep># get the sub_1 op associated with moving variance read op
<assert_stmt>len(bn_op.outputs)<ge>2<line_sep>moving_avg_1_sub_1=bn_op.outputs[2].consumers()[0]<line_sep>all_inputs=moving_avg_1_sub_1.inputs<line_sep># among inputs figure out the read var op type that can be "evaluated"
<for_stmt>input_t all_inputs<block_start><if_stmt>input_t.op.type<eq>'ReadVariableOp'<block_start>moving_var_read_tensor=input_t<block_end><elif_stmt>input_t.op.type<eq>'Identity'<and>'read:0'<in>input_t.name# tf slim form
<block_start>moving_var_read_tensor=input_t<block_end><block_end><block_end><elif_stmt>moving_var_read_tensor.op.inputs[0].op.type<eq>'Switch'<block_start>logger.debug("Fetch moving var from a trainable BN op structure")<line_sep>moving_var_read_tensor=BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_var_read_tensor)<block_end><return>moving_var_read_tensor<block_end>@staticmethod<def_stmt>get_moving_variance_read_var_op_tensor graph:tf.Graph bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get moving variance readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""<try_stmt># try name based tensor look up for Keras layers
<block_start>moving_var_read_tensor=BNUtils._get_bn_param_tensor_using_name(graph bn_op constants.BNOpParamType.moving_variance)<block_end><except_stmt>KeyError# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
<block_start>moving_var_read_tensor=BNUtils._get_moving_variance_read_var_op_tensor_using_structure(bn_op)<block_end><return>moving_var_read_tensor<block_end>@staticmethod<def_stmt>get_moving_variance_as_numpy_data sess:tf.compat.v1.Session bn_op:tf.Operation<arrow>np.ndarray<block_start>"""
Get moving variance param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as numpy data
"""<line_sep>moving_var_tensor=BNUtils.get_moving_variance_read_var_op_tensor(sess.graph bn_op)<with_stmt>sess.graph.as_default()<block_start>numpy_data=sess.run(moving_var_tensor)<block_end><return>numpy_data<block_end>@staticmethod<def_stmt>_bn_op_mean_struct_1 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_mean op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>mul_2_op=mul_op.outputs[0].consumers()[1]<assert_stmt>mul_2_op.type<eq>'Mul'<line_sep>merge_op=mul_2_op.inputs[0].op<assert_stmt>merge_op.type<eq>'Merge'<line_sep>read_op=merge_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_bn_op_mean_struct_2 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_mean op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>mul_2_op=mul_op.outputs[0].consumers()[1]<assert_stmt>mul_2_op.type<eq>'Mul'<line_sep>squeeze_op=mul_2_op.inputs[0].op<assert_stmt>squeeze_op.type<eq>'Squeeze'<line_sep>sub_op=squeeze_op.outputs[0].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>read_op=sub_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_bn_op_mean_struct_3 bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Return moving_mean op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules
a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>mul_2_op=mul_op.outputs[0].consumers()[1]<assert_stmt>mul_2_op.type<eq>'Mul'<line_sep>read_op=mul_2_op.inputs[0].op<assert_stmt>read_op.type<in>['ReadVariableOp']<line_sep><return>read_op<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>get_moving_mean_as_read_op bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Get moving mean read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean read op
"""<if_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<block_start><assert_stmt>len(bn_op.inputs)<eq>5<line_sep>moving_mean_read=bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingmean']].op<if_stmt>moving_mean_read.type<eq>'Switch'# tf slim bn using training tensor form
<block_start>moving_mean_read=moving_mean_read.inputs[0].op<assert_stmt>'read'<in>moving_mean_read.name<block_end><block_end><elif_stmt>bn_op.type<in>['Mul']# For regular BN
# mul_1 << - mul --> mul_2 <-- cond/merge <-- switch2 <-- moving mean read < moving mean tensor
# inputs[1] is mul .op.inputs[1] is gamma:read op whose input is gamma tensor as variable v2
# register handlers for different structures
<block_start>bn_op_struct_for_mean_handlers=[BNUtils._bn_op_mean_struct_1 BNUtils._bn_op_mean_struct_2 BNUtils._bn_op_mean_struct_3]<line_sep>moving_mean_read=<none><line_sep># try all handlers available
<for_stmt>handler bn_op_struct_for_mean_handlers<block_start><if_stmt>moving_mean_read<is><none><block_start>moving_mean_read=handler(bn_op)<block_end><else_stmt><block_start><break><block_end><block_end><assert_stmt>moving_mean_read<is><not><none> _BN_STRUCTURE_ERROR_MSG<block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><if_stmt>moving_mean_read.type<eq>'Identity'<block_start><assert_stmt>len(moving_mean_read.inputs)<eq>1 _BN_STRUCTURE_ERROR_MSG<block_end><assert_stmt>moving_mean_read.type<in>['ReadVariableOp' 'Const' 'Identity']<line_sep><return>moving_mean_read<block_end>@staticmethod<def_stmt>_get_moving_mean_read_var_op_tensor_using_structure bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get moving mean readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""<line_sep># only support fused BN
<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm' 'Mul']<line_sep>moving_mean_read_tensor=BNUtils.get_moving_mean_as_read_op(bn_op).outputs[0]<assert_stmt>moving_mean_read_tensor<is><not><none><if_stmt>moving_mean_read_tensor.op.type<eq>'Const'<block_start>logger.debug("BN op has const type op for moving variance")<line_sep># get the read var type from bn op
# get the sub_1 op associated with moving mean read op
<assert_stmt>len(bn_op.outputs)<g>1<line_sep>moving_avg_sub_1=bn_op.outputs[1].consumers()[0]<line_sep>all_inputs=moving_avg_sub_1.inputs<line_sep># among inputs figure out the read var op type that can be "evaluated"
<for_stmt>input_t all_inputs<block_start><if_stmt>input_t.op.type<eq>'ReadVariableOp'<block_start>moving_mean_read_tensor=input_t<block_end><elif_stmt>input_t.op.type<eq>'Identity'<and>'read:0'<in>input_t.name# tf slim form
<block_start>moving_mean_read_tensor=input_t<block_end><block_end><block_end><elif_stmt>moving_mean_read_tensor.op.inputs[0].op.type<eq>'Switch'<block_start>logger.debug("Fetch moving var from a trainable BN op structure")<line_sep>moving_mean_read_tensor=BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_mean_read_tensor)<block_end><return>moving_mean_read_tensor<block_end>@staticmethod<def_stmt>get_moving_mean_read_var_op_tensor graph:tf.Graph bn_op:tf.Operation<arrow>tf.Tensor<block_start>"""
Get moving mean readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""<try_stmt># try name based tensor look up for Keras layers
<block_start>moving_mean_read_tensor=BNUtils._get_bn_param_tensor_using_name(graph bn_op constants.BNOpParamType.moving_mean)<block_end><except_stmt>KeyError# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
<block_start>moving_mean_read_tensor=BNUtils._get_moving_mean_read_var_op_tensor_using_structure(bn_op)<block_end><return>moving_mean_read_tensor<block_end>@staticmethod<def_stmt>get_moving_mean_as_numpy_data sess:tf.compat.v1.Session bn_op:tf.Operation<arrow>np.ndarray<block_start>"""
Get moving mean param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean as numpy data
"""<line_sep>moving_mean_tensor=BNUtils.get_moving_mean_read_var_op_tensor(sess.graph bn_op)<with_stmt>sess.graph.as_default()<block_start>numpy_data=sess.run(moving_mean_tensor)<block_end><return>numpy_data<block_end>@staticmethod<def_stmt>get_epsilon bn_op:tf.Operation<arrow>float<block_start>"""
Returns epsilon extracted from given bn op.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: epsilon value
"""<if_stmt>bn_op.type<in>['Mul']<block_start><assert_stmt>len(bn_op.inputs)<ge>2 _BN_STRUCTURE_ERROR_MSG<line_sep>mul=bn_op.inputs[1].op<assert_stmt>len(mul.inputs)<ge>1 _BN_STRUCTURE_ERROR_MSG<line_sep>rsqrt=mul.inputs[0].op<assert_stmt>len(rsqrt.inputs)<ge>1 _BN_STRUCTURE_ERROR_MSG<line_sep>add=rsqrt.inputs[0].op<assert_stmt>len(add.inputs)<ge>2 _BN_STRUCTURE_ERROR_MSG<line_sep>epsilon=add.inputs[1].op<line_sep>numpy_epsilon=epsilon.get_attr('value').float_val[0]<block_end><elif_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']# epsilon can be derived as attribute value
<block_start>numpy_epsilon=bn_op.get_attr("epsilon")<block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><return>numpy_epsilon<block_end>@staticmethod<def_stmt>get_assign_moving_avg_op bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Get assign_moving_avg op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg op
:return: assign_moving_op corresponding with the bn op, or None if it does not exist.
"""<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<assert_stmt>len(bn_op.outputs)<eq>6<or>len(bn_op.outputs)<eq>5<if_stmt>bn_op.outputs[1].consumers()<block_start>child_op=bn_op.outputs[1].consumers()[0]<if_stmt>child_op.type<eq>'Merge'<block_start>sub_op=child_op.outputs[0].consumers()[0]<block_end><else_stmt><block_start>sub_op=child_op<block_end><assert_stmt>sub_op.type<eq>'Sub'<line_sep>mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>mul_op.type<eq>'Mul'<line_sep>assign_moving_avg_op=mul_op.outputs[0].consumers()[0]<assert_stmt>assign_moving_avg_op.type<in>['AssignSub' 'AssignSubVariableOp']<line_sep><return>assign_moving_avg_op<block_end><return><none><block_end>@staticmethod<def_stmt>get_assign_moving_avg_1_op bn_op:tf.Operation<arrow>Union[tf.Operation <none>]<block_start>"""
Get assign_moving_avg_1 op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg_1 op
:return: assign_moving_avg_1 corresponding with the bn op, or None if it does not exist.
"""<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']<assert_stmt>len(bn_op.outputs)<eq>6<or>len(bn_op.outputs)<eq>5<if_stmt>bn_op.outputs[2].consumers()<block_start>child_op=bn_op.outputs[2].consumers()[0]<if_stmt>child_op.type<eq>'Merge'<block_start>sub_op=child_op.outputs[0].consumers()[0]<block_end><else_stmt><block_start>sub_op=child_op<block_end><assert_stmt>sub_op.type<eq>'Sub'<line_sep>mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>mul_op.type<eq>'Mul'<line_sep>assign_moving_avg_op=mul_op.outputs[0].consumers()[0]<assert_stmt>assign_moving_avg_op.type<in>['AssignSub' 'AssignSubVariableOp']<line_sep><return>assign_moving_avg_op<block_end><return><none><block_end>@staticmethod<def_stmt>remove_bn_op_from_update_ops sess:tf.compat.v1.Session bn_op:tf.Operation<block_start>"""
Remove batchnorm assign_moving_avg and assign_moving_avg_1 ops from update ops.
:param sess: tf.compat.v1.Session
:param bn_op: BatchNorm operation whose assign_moving_avg and assign_moving_avg_1 ops should be removed.
"""<with_stmt>sess.graph.as_default()<block_start>update_ops=tf.compat.v1.get_collection_ref(tf.compat.v1.GraphKeys.UPDATE_OPS)<line_sep>assign_moving_avg_op=BNUtils.get_assign_moving_avg_op(bn_op)<line_sep>assign_moving_avg_op_1=BNUtils.get_assign_moving_avg_1_op(bn_op)<if_stmt>assign_moving_avg_op<and>assign_moving_avg_op<in>update_ops<block_start>update_ops.remove(assign_moving_avg_op)<line_sep>logger.debug('Removed %s from update ops' assign_moving_avg_op.name)<block_end><if_stmt>assign_moving_avg_op_1<and>assign_moving_avg_op_1<in>update_ops<block_start>update_ops.remove(assign_moving_avg_op_1)<line_sep>logger.debug('Removed %s from update ops' assign_moving_avg_op_1.name)<block_end><block_end><block_end>@staticmethod<def_stmt>_get_bn_param_tensor_using_name graph:tf.Graph bn_op:tf.Operation param_type:constants.BNOpParamType<block_start>"""
Helper to get BN op param read tensor.
:param graph: TensorFlow graph
:param bn_op: BN op from which param read tensor is to be extracted
:param param_type: param type for which param tensor is to be extracted, as constants.BNOpParamType (supported
types are beta, gamma, moving_mean or moving_variance)
:return: param read tensor
"""<if_stmt>param_type<not><in>vars(constants.BNOpParamType).values()<block_start><assert_stmt>0 'Error, get_bn_param_using_name() invalid param type requested'<block_end># name of the fused bn contains bn_name/FusedBatchNormV3 or
# bn_name/cond/FusedBatchNormV3_1
# we need only the bn_name to make param tensor names
op_name=bn_op.name.split('/')[0]<line_sep>param_tensor_name=op_name+constants.BN_OP_PARAM_NAME_SUFFIX[param_type]<line_sep>param_tensor=graph.get_tensor_by_name(param_tensor_name)<line_sep><return>param_tensor<block_end>@staticmethod<def_stmt>_bn_op_momentum_struct_1 bn_op:tf.Operation<arrow>Union[float <none>]<block_start>"""
Return momentum value corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>mul_2_op=mul_op.outputs[0].consumers()[1]<assert_stmt>mul_2_op.type<eq>'Mul'<line_sep>merge_op=mul_2_op.inputs[0].op<assert_stmt>merge_op.type<eq>'Merge'<line_sep>switch_1_op=merge_op.outputs[0].consumers()[0]<assert_stmt>switch_1_op.type<eq>'Switch'<line_sep>sub_op=switch_1_op.outputs[1].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>assign_moving_avg_mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>assign_moving_avg_mul_op.type<eq>'Mul'<line_sep>decay_op=assign_moving_avg_mul_op.inputs[1].op<assert_stmt>decay_op.type<eq>'Const'<line_sep>decay=decay_op.get_attr('value').float_val[0]<line_sep><return>1-decay<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_bn_op_momentum_struct_2 bn_op:tf.Operation<arrow>Union[float <none>]<block_start>"""
Return momentum value corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""<try_stmt><block_start>mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>mul_2_op=mul_op.outputs[0].consumers()[1]<assert_stmt>mul_2_op.type<eq>'Mul'<line_sep>squeeze_op=mul_2_op.inputs[0].op<assert_stmt>squeeze_op.type<eq>'Squeeze'<line_sep>sub_op=squeeze_op.outputs[0].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>assign_moving_avg_mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>assign_moving_avg_mul_op.type<eq>'Mul'<line_sep>decay_op=assign_moving_avg_mul_op.inputs[1].op<assert_stmt>decay_op.type<eq>'Const'<line_sep>decay=decay_op.get_attr('value').float_val[0]<line_sep><return>1-decay<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_fused_bn_op_momentum_struct_1 bn_op:tf.Operation<arrow>Union[float <none>]<block_start>"""
Return momentum value corresponding to fused batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""<try_stmt><block_start>merge_1_op=bn_op.outputs[1].consumers()[0]<assert_stmt>merge_1_op.type<eq>'Merge'<line_sep>sub_op=merge_1_op.outputs[0].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>mul_op.type<eq>'Mul'<line_sep>sub_2_op=mul_op.inputs[1].op<assert_stmt>sub_2_op.type<eq>'Sub'<line_sep>merge_op=sub_2_op.inputs[1].op<assert_stmt>merge_op.type<eq>'Merge'<line_sep>decay_op=merge_op.inputs[1].op<assert_stmt>decay_op.type<eq>'Const'<line_sep>decay=decay_op.get_attr('value').float_val[0]<line_sep><return>decay<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_fused_bn_op_momentum_struct_2 bn_op:tf.Operation<arrow>Union[float <none>]<block_start>"""
Return momentum value corresponding to fused batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""<try_stmt><block_start>sub_op=bn_op.outputs[1].consumers()[0]<assert_stmt>sub_op.type<eq>'Sub'<line_sep>mul_op=sub_op.outputs[0].consumers()[0]<assert_stmt>mul_op.type<eq>'Mul'<line_sep>sub_2_op=mul_op.inputs[1].op<assert_stmt>sub_2_op.type<eq>'Sub'<line_sep>decay_op=sub_2_op.inputs[1].op<assert_stmt>decay_op.type<eq>'Const'<line_sep>decay=decay_op.get_attr('value').float_val[0]<line_sep><return>decay<block_end><except_stmt># pylint: disable=bare-except
<block_start><return><none><block_end><block_end>@staticmethod<def_stmt>get_momentum bn_op:tf.Operation<arrow>float<block_start>"""
Returns momentum extracted from given bn op. If bn op is training=False mode, momentum will be none.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""<line_sep># register handlers for different structures
bn_op_struct_for_momentum_handlers=[BNUtils._bn_op_momentum_struct_1 BNUtils._bn_op_momentum_struct_2]<line_sep>fused_bn_op_struct_for_momentum_handlers=[BNUtils._fused_bn_op_momentum_struct_1 BNUtils._fused_bn_op_momentum_struct_2]<line_sep>decay=<none><if_stmt>bn_op.type<in>['Mul']# try all handlers available
<block_start><for_stmt>handler bn_op_struct_for_momentum_handlers<block_start><if_stmt>decay<is><none><block_start>decay=handler(bn_op)<block_end><else_stmt><block_start><break><block_end><block_end><block_end><elif_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm']# try all handlers available
<block_start><for_stmt>handler fused_bn_op_struct_for_momentum_handlers<block_start><if_stmt>decay<is><none><block_start>decay=handler(bn_op)<block_end><else_stmt><block_start><break><block_end><block_end><block_end><else_stmt><block_start>logger.error("Error, unknown BN op")<assert_stmt><false><block_end><return>decay<block_end>@staticmethod<def_stmt>get_training bn_op:tf.Operation<arrow>Union[<none> bool tf.Tensor]<block_start>"""
Returns either a boolean of whether the BN op training mode is True or False, or the is_training tensor
feeding into the BN op if it is using a tensor to determine the mode dynamically.
:param bn_op: bn_op obtained in the connected graph
:return: True or False for training mode, or tf.Tensor that determines the mode dynamically.
"""<assert_stmt>bn_op.type<in>['FusedBatchNormV3' 'FusedBatchNorm' 'Mul']<if_stmt>bn_op.type<eq>'FusedBatchNormV3'<or>bn_op.type<eq>'FusedBatchNorm'<block_start><if_stmt>'FusedBatchNormV3_1'<in>bn_op.name<block_start>switch_op=bn_op.inputs[0].op<line_sep>pred_id_op=switch_op.inputs[1].op<line_sep>training=pred_id_op.inputs[0]<block_end><else_stmt><block_start>training=bn_op.get_attr('is_training')<block_end><return>training<block_end># Non fused batchnorm case
mul_op=bn_op.inputs[1].op<assert_stmt>mul_op.type<eq>'Mul'<line_sep>rsqrt_op=mul_op.inputs[0].op<assert_stmt>rsqrt_op.type<eq>'Rsqrt'<line_sep>add_op=rsqrt_op.inputs[0].op<assert_stmt>add_op.type<eq>'AddV2'<line_sep>add_input_op=add_op.inputs[0].op<if_stmt>add_input_op.type<eq>'Squeeze'<block_start><return><true><block_end><if_stmt>add_input_op.type<eq>'ReadVariableOp'<block_start><return><false><block_end><if_stmt>add_input_op.type<eq>'Merge'<block_start>switch_op=add_input_op.inputs[1].op<assert_stmt>switch_op.type<eq>'Switch'<line_sep>pred_id_op=switch_op.inputs[1].op<assert_stmt>pred_id_op.type<eq>'Identity'<line_sep><return>pred_id_op.inputs[0]<block_end>logger.error('Error, unknown BN structure')<line_sep><return><none><block_end><block_end> |
<import_stmt>argparse<import_from_stmt>collections defaultdict Counter<import_from_stmt>typing Any List Tuple Mapping Optional<import_stmt>datetime<import_stmt>hashlib<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>requests<import_stmt>sys<import_stmt>boto3<import_from_stmt>e2e GLOBAL_CONFIG<import_from_stmt>alerts.default handle_result<as>default_handle_result<import_from_stmt>alerts.rllib_tests handle_result<as>rllib_tests_handle_result<import_from_stmt>alerts.long_running_tests handle_result<as>long_running_tests_handle_result<import_from_stmt>alerts.tune_tests handle_result<as>tune_tests_handle_result<import_from_stmt>alerts.xgboost_tests handle_result<as>xgboost_tests_handle_result<line_sep>SUITE_TO_FN={"long_running_tests":long_running_tests_handle_result "rllib_tests":rllib_tests_handle_result "tune_tests":tune_tests_handle_result "xgboost_tests":xgboost_tests_handle_result }<line_sep>GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]="alert_state"<line_sep>GLOBAL_CONFIG["SLACK_WEBHOOK"]=os.environ.get("SLACK_WEBHOOK" "")<line_sep>GLOBAL_CONFIG["SLACK_CHANNEL"]=os.environ.get("SLACK_CHANNEL" "#oss-test-cop")<line_sep>RESULTS_LIMIT=120<line_sep>logger=logging.getLogger()<line_sep>logger.setLevel(logging.INFO)<line_sep>handler=logging.StreamHandler(stream=sys.stdout)<line_sep>formatter=logging.Formatter(fmt="[%(levelname)s %(asctime)s] "<concat>"%(filename)s: %(lineno)d "<concat>"%(message)s")<line_sep>handler.setFormatter(formatter)<line_sep>logger.addHandler(handler)<def_stmt>maybe_fetch_slack_webhook <block_start><if_stmt>GLOBAL_CONFIG["SLACK_WEBHOOK"]<in>[<none> ""]<block_start>print("Missing SLACK_WEBHOOK, retrieving from AWS secrets store")<line_sep>GLOBAL_CONFIG["SLACK_WEBHOOK"]=boto3.client("secretsmanager" region_name="us-west-2").get_secret_value(SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"<concat>"release-automation/"<concat>"slack-webhook-Na0CFP")["SecretString"]<block_end><block_end><def_stmt>_obj_hash obj:Any<arrow>str<block_start>json_str=json.dumps(obj sort_keys=<true> ensure_ascii=<true>)<line_sep>sha=hashlib.sha256()<line_sep>sha.update(json_str.encode())<line_sep><return>sha.hexdigest()<block_end><def_stmt>fetch_latest_alerts rds_data_client<block_start>schema=GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]<line_sep>sql=(f"""
SELECT DISTINCT ON (category, test_suite, test_name)
category, test_suite, test_name, last_result_hash,
last_notification_dt
FROM {schema}
ORDER BY category, test_suite, test_name, last_notification_dt DESC
LIMIT {RESULTS_LIMIT}
""")<line_sep>result=rds_data_client.execute_statement(database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"] secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"] resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"] schema=schema sql=sql )<for_stmt>row result["records"]<block_start>category,test_suite,test_name,last_result_hash,last_notification_dt=(r["stringValue"]<if>"stringValue"<in>r<else><none><for>r row)<line_sep>last_notification_dt=datetime.datetime.strptime(last_notification_dt "%Y-%m-%d %H:%M:%S")<line_sep><yield>category test_suite test_name last_result_hash last_notification_dt<block_end><block_end><def_stmt>fetch_latest_results rds_data_client fetch_since:Optional[datetime.datetime]=<none><block_start>schema=GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]<line_sep>sql=(f"""
SELECT DISTINCT ON (category, test_suite, test_name)
created_on, category, test_suite, test_name, status, results,
artifacts, last_logs
FROM {schema} """)<line_sep>parameters=[]<if_stmt>fetch_since<is><not><none><block_start>sql<augadd>"WHERE created_on >= :created_on "<line_sep>parameters=[{"name":"created_on" "typeHint":"TIMESTAMP" "value":{"stringValue":fetch_since.strftime("%Y-%m-%d %H:%M:%S")} } ]<block_end>sql<augadd>"ORDER BY category, test_suite, test_name, created_on DESC "<line_sep>sql<augadd>f"LIMIT {RESULTS_LIMIT}"<line_sep>result=rds_data_client.execute_statement(database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"] secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"] resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"] schema=schema sql=sql parameters=parameters )<for_stmt>row result["records"]<block_start>created_on,category,test_suite,test_name,status,results,artifacts,last_logs=(r["stringValue"]<if>"stringValue"<in>r<else><none><for>r row)<line_sep># Calculate hash before converting strings to objects
result_obj=(created_on category test_suite test_name status results artifacts last_logs)<line_sep>result_json=json.dumps(result_obj)<line_sep>result_hash=_obj_hash(result_json)<line_sep># Convert some strings to python objects
created_on=datetime.datetime.strptime(created_on "%Y-%m-%d %H:%M:%S")<line_sep>results=json.loads(results)<line_sep>artifacts=json.loads(artifacts)<line_sep><yield>result_hash created_on category test_suite test_name status results artifacts last_logs<block_end><block_end><def_stmt>mark_as_handled rds_data_client update:bool category:str test_suite:str test_name:str result_hash:str last_notification_dt:datetime.datetime<block_start>schema=GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]<if_stmt><not>update<block_start>sql=(f"""
INSERT INTO {schema}
(category, test_suite, test_name,
last_result_hash, last_notification_dt)
VALUES (:category, :test_suite, :test_name,
:last_result_hash, :last_notification_dt)
""")<block_end><else_stmt><block_start>sql=(f"""
UPDATE {schema}
SET last_result_hash=:last_result_hash,
last_notification_dt=:last_notification_dt
WHERE category=:category AND test_suite=:test_suite
AND test_name=:test_name
""")<block_end>rds_data_client.execute_statement(database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"] parameters=[{"name":"category" "value":{"stringValue":category}} {"name":"test_suite" "value":{"stringValue":test_suite<or>""}} {"name":"test_name" "value":{"stringValue":test_name}} {"name":"last_result_hash" "value":{"stringValue":result_hash}} {"name":"last_notification_dt" "typeHint":"TIMESTAMP" "value":{"stringValue":last_notification_dt.strftime("%Y-%m-%d %H:%M:%S")} } ] secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"] resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"] schema=schema sql=sql )<block_end><def_stmt>post_alerts_to_slack channel:str alerts:List[Tuple[str str str str]] non_alerts:Mapping[str int]<block_start><if_stmt>len(alerts)<eq>0<block_start>logger.info("No alerts to post to slack.")<line_sep><return><block_end>markdown_lines=[f"* {len(alerts)} new release test failures found!*" "" ]<line_sep>category_alerts=defaultdict(list)<for_stmt>(category test_suite test_name alert) alerts<block_start>category_alerts[category].append(f" *{test_suite}/{test_name}* failed: {alert}")<block_end><for_stmt>category,alert_list category_alerts.items()<block_start>markdown_lines.append(f"Branch: *{category}*")<line_sep>markdown_lines.extend(alert_list)<line_sep>markdown_lines.append("")<block_end>total_non_alerts=sum(n<for>n non_alerts.values())<line_sep>non_alert_detail=[f"{n} on {c}"<for>c,n non_alerts.items()]<line_sep>markdown_lines<augadd>[f"Additionally, {total_non_alerts} tests passed successfully "<concat>f"({', '.join(non_alert_detail)})."]<line_sep>slack_url=GLOBAL_CONFIG["SLACK_WEBHOOK"]<line_sep>resp=requests.post(slack_url json={"text":"\n".join(markdown_lines) "channel":channel "username":"Fail Bot" "icon_emoji":":red_circle:" } )<line_sep>print(resp.status_code)<line_sep>print(resp.text)<block_end><def_stmt>post_statistics_to_slack channel:str alerts:List[Tuple[str str str str]] non_alerts:Mapping[str int]<block_start>total_alerts=len(alerts)<line_sep>category_alerts=defaultdict(list)<for_stmt>(category test_suite test_name alert) alerts<block_start>category_alerts[category].append(f"`{test_suite}/{test_name}`")<block_end>alert_detail=[f"{len(a)} on {c}"<for>c,a category_alerts.items()]<line_sep>total_non_alerts=sum(n<for>n non_alerts.values())<line_sep>non_alert_detail=[f"{n} on {c}"<for>c,n non_alerts.items()]<line_sep>markdown_lines=["*Periodic release test report*" "" f"In the past 24 hours, "<concat>f"*{total_non_alerts}* release tests finished successfully, and "<concat>f"*{total_alerts}* release tests failed."]<line_sep>markdown_lines.append("")<if_stmt>total_alerts<block_start>markdown_lines.append(f"*Failing:* {', '.join(alert_detail)}")<for_stmt>c,a category_alerts.items()<block_start>markdown_lines.append(f" *{c}*: {', '.join(sorted(a))}")<block_end><block_end><else_stmt><block_start>markdown_lines.append("*Failing:* None")<block_end>markdown_lines.append("")<if_stmt>total_non_alerts<block_start>markdown_lines.append(f"*Passing:* {', '.join(non_alert_detail)}")<block_end><else_stmt><block_start>markdown_lines.append("*Passing:* None")<block_end>slack_url=GLOBAL_CONFIG["SLACK_WEBHOOK"]<line_sep>resp=requests.post(slack_url json={"text":"\n".join(markdown_lines) "channel":channel "username":"Fail Bot" "icon_emoji":":red_circle:" } )<line_sep>print(resp.status_code)<line_sep>print(resp.text)<block_end><def_stmt>handle_results_and_get_alerts rds_data_client fetch_since:Optional[datetime.datetime]=<none> always_try_alert:bool=<false> no_status_update:bool=<false># First build a map of last notifications
<block_start>last_notifications_map={}<for_stmt>category,test_suite,test_name,last_result_hash,last_notification_dt fetch_latest_alerts(rds_data_client)<block_start>last_notifications_map[(category test_suite test_name)]=(last_result_hash last_notification_dt)<block_end>alerts=[]<line_sep>non_alerts=Counter()<line_sep># Then fetch latest results
<for_stmt>result_hash,created_on,category,test_suite,test_name,status,results,artifacts,last_logs fetch_latest_results(rds_data_client fetch_since=fetch_since)<block_start>key=(category test_suite test_name)<line_sep>try_alert=always_try_alert<if_stmt>key<in>last_notifications_map# If we have an alert for this key, fetch info
<block_start>last_result_hash,last_notification_dt=last_notifications_map[key]<if_stmt>last_result_hash<ne>result_hash# If we got a new result, handle new result
<block_start>try_alert=<true><block_end># Todo: maybe alert again after some time?
<block_end><else_stmt><block_start>try_alert=<true><block_end><if_stmt>try_alert<block_start>handle_fn=SUITE_TO_FN.get(test_suite <none>)<if_stmt><not>handle_fn<block_start>logger.warning(f"No handle for suite {test_suite}")<line_sep>alert=default_handle_result(created_on category test_suite test_name status results artifacts last_logs)<block_end><else_stmt><block_start>alert=handle_fn(created_on category test_suite test_name status results artifacts last_logs)<block_end><if_stmt>alert<block_start>logger.warning(f"Alert raised for test {test_suite}/{test_name} "<concat>f"({category}): {alert}")<line_sep>alerts.append((category test_suite test_name alert))<block_end><else_stmt><block_start>logger.debug(f"No alert raised for test {test_suite}/{test_name} "<concat>f"({category})")<line_sep>non_alerts[category]<augadd>1<block_end><if_stmt><not>no_status_update<block_start>mark_as_handled(rds_data_client key<in>last_notifications_map category test_suite test_name result_hash datetime.datetime.now())<block_end><block_end><block_end><return>alerts non_alerts<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--stats" action="store_true" default=<false> help="Finish quickly for training.")<line_sep>args=parser.parse_args()<line_sep>maybe_fetch_slack_webhook()<line_sep>rds_data_client=boto3.client("rds-data" region_name="us-west-2")<if_stmt>args.stats# Only update last 24 hour stats
<block_start>fetch_since=datetime.datetime.now()-datetime.timedelta(days=1)<line_sep>alerts,non_alerts=handle_results_and_get_alerts(rds_data_client fetch_since=fetch_since always_try_alert=<true> no_status_update=<true>)<line_sep>post_statistics_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"] alerts non_alerts)<block_end><else_stmt><block_start>alerts,non_alerts=handle_results_and_get_alerts(rds_data_client)<line_sep>post_alerts_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"] alerts non_alerts)<block_end><block_end> |
<import_from_stmt>typing Optional Sequence<import_stmt>torch<import_from_stmt>...gpu Device<import_from_stmt>...models.encoders EncoderFactory<import_from_stmt>...models.optimizers OptimizerFactory<import_from_stmt>...models.q_functions QFunctionFactory<import_from_stmt>...preprocessing ActionScaler RewardScaler Scaler<import_from_stmt>...torch_utility TorchMiniBatch<import_from_stmt>.ddpg_impl DDPGImpl<class_stmt>TD3Impl(DDPGImpl)<block_start>_target_smoothing_sigma:float<line_sep>_target_smoothing_clip:float<def_stmt>__init__ self observation_shape:Sequence[int] action_size:int actor_learning_rate:float critic_learning_rate:float actor_optim_factory:OptimizerFactory critic_optim_factory:OptimizerFactory actor_encoder_factory:EncoderFactory critic_encoder_factory:EncoderFactory q_func_factory:QFunctionFactory gamma:float tau:float n_critics:int target_reduction_type:str target_smoothing_sigma:float target_smoothing_clip:float use_gpu:Optional[Device] scaler:Optional[Scaler] action_scaler:Optional[ActionScaler] reward_scaler:Optional[RewardScaler] <block_start>super().__init__(observation_shape=observation_shape action_size=action_size actor_learning_rate=actor_learning_rate critic_learning_rate=critic_learning_rate actor_optim_factory=actor_optim_factory critic_optim_factory=critic_optim_factory actor_encoder_factory=actor_encoder_factory critic_encoder_factory=critic_encoder_factory q_func_factory=q_func_factory gamma=gamma tau=tau n_critics=n_critics target_reduction_type=target_reduction_type use_gpu=use_gpu scaler=scaler action_scaler=action_scaler reward_scaler=reward_scaler )<line_sep>self._target_smoothing_sigma=target_smoothing_sigma<line_sep>self._target_smoothing_clip=target_smoothing_clip<block_end><def_stmt>compute_target self batch:TorchMiniBatch<arrow>torch.Tensor<block_start><assert_stmt>self._targ_policy<is><not><none><assert_stmt>self._targ_q_func<is><not><none><with_stmt>torch.no_grad()<block_start>action=self._targ_policy(batch.next_observations)<line_sep># smoothing target
noise=torch.randn(action.shape device=batch.device)<line_sep>scaled_noise=self._target_smoothing_sigma<times>noise<line_sep>clipped_noise=scaled_noise.clamp(-self._target_smoothing_clip self._target_smoothing_clip)<line_sep>smoothed_action=action+clipped_noise<line_sep>clipped_action=smoothed_action.clamp(-1.0 1.0)<line_sep><return>self._targ_q_func.compute_target(batch.next_observations clipped_action reduction=self._target_reduction_type )<block_end><block_end><block_end> |
<import_stmt>sys<import_stmt>json<import_stmt>time<import_stmt>requests<if_stmt>len(sys.argv)<l>3<block_start><raise>Exception("No endpoint specified. ")<block_end>endpoint=sys.argv[1]<line_sep>headers={'Host':sys.argv[2]}<with_stmt>open('input.json')<as>file<block_start>sample_file=json.load(file)<block_end>inputs=sample_file["instances"]<line_sep># Split inputs into chunks of size 15 and send them to the predict server
print("Sending prediction requests...")<line_sep>time_before=time.time()<line_sep>res=requests.post(endpoint json={"instances":inputs} headers=headers)<for_stmt>x range(0 len(inputs) 15)<block_start>query_inputs=inputs[x:x+20]<line_sep>payload={"instances":query_inputs}<line_sep>res=requests.post(endpoint json=payload headers=headers)<line_sep>print(res)<if_stmt><not>res.ok<block_start>res.raise_for_status()<block_end><block_end>print("TIME TAKEN: " time.time()-time_before)<line_sep>print("Last response: " res.json())<line_sep> |
<import_stmt>io<import_stmt>zlib<import_stmt>numpy<as>np<def_stmt>maybe_compress str compress<block_start><return>zlib.compress(str)<if>compress<else>str<block_end><def_stmt>maybe_decompress str decompress<block_start><return>zlib.decompress(str)<if>decompress<else>str<block_end><def_stmt>serialize_numpy arr:np.ndarray compress:bool=<false><arrow>str<block_start>"""Serializes numpy array to string with optional zlib compression.
Args:
arr (np.ndarray): Numpy array to serialize.
compress (bool, optional): Whether to compress resulting string with zlib or not.
Defaults to False.
Returns:
str: serialized string
"""<line_sep>buf=io.BytesIO()<assert_stmt>isinstance(arr np.ndarray)<line_sep>np.save(buf arr)<line_sep>result=buf.getvalue()<line_sep><return>maybe_compress(result compress)<block_end><def_stmt>deserialize_numpy serialized_string:str decompress:bool=<false><arrow>np.ndarray<block_start>"""Deserializes numpy array from compressed string.
Args:
serialized_string (str): Serialized numpy array
decompress (bool, optional): Whether to decompress string with zlib before laoding.
Defaults to False.
Returns:
np.ndarray: deserialized numpy array
"""<line_sep>str=maybe_decompress(serialized_string decompress)<line_sep>buf=io.BytesIO(str)<line_sep><return>np.load(buf)<block_end> |
<import_stmt>hubspot.crm.extensions.videoconferencing<as>api_client<import_from_stmt>....discovery_base DiscoveryBase<class_stmt>Discovery(DiscoveryBase)<block_start>@property<def_stmt>settings_api self<arrow>api_client.SettingsApi<block_start><return>self._configure_api_client(api_client "SettingsApi")<block_end><block_end> |
# Test cases that are expected to fail, e.g. unimplemented features or bug-fixes.
# Remove from list when fixed.
xfail={"namespace_keywords" # 70
"googletypes_struct" # 9
"googletypes_value" # 9
"import_capitalized_package" "example" # This is the example in the readme. Not a test.
}<line_sep>services={"googletypes_response" "googletypes_response_embedded" "service" "service_separate_packages" "import_service_input_message" "googletypes_service_returns_empty" "googletypes_service_returns_googletype" "example_service" "empty_service" }<line_sep># Indicate json sample messages to skip when testing that json (de)serialization
# is symmetrical becuase some cases legitimately are not symmetrical.
# Each key references the name of the test scenario and the values in the tuple
# Are the names of the json files.
non_symmetrical_json={"empty_repeated":("empty_repeated" )}<line_sep> |
<import_stmt>numpy<as>np<import_from_stmt>matplotlib _api<import_from_stmt>.axes_divider make_axes_locatable Size<import_from_stmt>.mpl_axes Axes<line_sep>@_api.delete_parameter("3.3" "add_all")<def_stmt>make_rgb_axes ax pad=0.01 axes_class=<none> add_all=<true> **kwargs<block_start>"""
Parameters
----------
pad : float
Fraction of the axes height.
"""<line_sep>divider=make_axes_locatable(ax)<line_sep>pad_size=pad<times>Size.AxesY(ax)<line_sep>xsize=((1-2<times>pad)/3)<times>Size.AxesX(ax)<line_sep>ysize=((1-2<times>pad)/3)<times>Size.AxesY(ax)<line_sep>divider.set_horizontal([Size.AxesX(ax) pad_size xsize])<line_sep>divider.set_vertical([ysize pad_size ysize pad_size ysize])<line_sep>ax.set_axes_locator(divider.new_locator(0 0 ny1=-1))<line_sep>ax_rgb=[]<if_stmt>axes_class<is><none><block_start><try_stmt><block_start>axes_class=ax._axes_class<block_end><except_stmt>AttributeError<block_start>axes_class=type(ax)<block_end><block_end><for_stmt>ny [4 2 0]<block_start>ax1=axes_class(ax.get_figure() ax.get_position(original=<true>) sharex=ax sharey=ax **kwargs)<line_sep>locator=divider.new_locator(nx=2 ny=ny)<line_sep>ax1.set_axes_locator(locator)<for_stmt>t ax1.yaxis.get_ticklabels()+ax1.xaxis.get_ticklabels()<block_start>t.set_visible(<false>)<block_end><try_stmt><block_start><for_stmt>axis ax1.axis.values()<block_start>axis.major_ticklabels.set_visible(<false>)<block_end><block_end><except_stmt>AttributeError<block_start><pass><block_end>ax_rgb.append(ax1)<block_end><if_stmt>add_all<block_start>fig=ax.get_figure()<for_stmt>ax1 ax_rgb<block_start>fig.add_axes(ax1)<block_end><block_end><return>ax_rgb<block_end>@_api.deprecated("3.3" alternative="ax.imshow(np.dstack([r, g, b]))")<def_stmt>imshow_rgb ax r g b **kwargs<block_start><return>ax.imshow(np.dstack([r g b]) **kwargs)<block_end><class_stmt>RGBAxes<block_start>"""
4-panel imshow (RGB, R, G, B).
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Subclasses can override the ``_defaultAxesClass`` attribute.
Attributes
----------
RGB : ``_defaultAxesClass``
The axes object for the three-channel imshow.
R : ``_defaultAxesClass``
The axes object for the red channel imshow.
G : ``_defaultAxesClass``
The axes object for the green channel imshow.
B : ``_defaultAxesClass``
The axes object for the blue channel imshow.
"""<line_sep>_defaultAxesClass=Axes<line_sep>@_api.delete_parameter("3.3" "add_all")<def_stmt>__init__ self *args pad=0 add_all=<true> **kwargs<block_start>"""
Parameters
----------
pad : float, default: 0
fraction of the axes height to put as padding.
add_all : bool, default: True
Whether to add the {rgb, r, g, b} axes to the figure.
This parameter is deprecated.
axes_class : matplotlib.axes.Axes
*args
Unpacked into axes_class() init for RGB
**kwargs
Unpacked into axes_class() init for RGB, R, G, B axes
"""<line_sep>axes_class=kwargs.pop("axes_class" self._defaultAxesClass)<line_sep>self.RGB=ax=axes_class(*args **kwargs)<if_stmt>add_all<block_start>ax.get_figure().add_axes(ax)<block_end><else_stmt><block_start>kwargs["add_all"]=add_all# only show deprecation in that case
<block_end>self.R,self.G,self.B=make_rgb_axes(ax pad=pad axes_class=axes_class **kwargs)<line_sep># Set the line color and ticks for the axes.
<for_stmt>ax1 [self.RGB self.R self.G self.B]<block_start>ax1.axis[:].line.set_color("w")<line_sep>ax1.axis[:].major_ticks.set_markeredgecolor("w")<block_end><block_end>@_api.deprecated("3.3")<def_stmt>add_RGB_to_figure self<block_start>"""Add red, green and blue axes to the RGB composite's axes figure."""<line_sep>self.RGB.get_figure().add_axes(self.R)<line_sep>self.RGB.get_figure().add_axes(self.G)<line_sep>self.RGB.get_figure().add_axes(self.B)<block_end><def_stmt>imshow_rgb self r g b **kwargs<block_start>"""
Create the four images {rgb, r, g, b}.
Parameters
----------
r, g, b : array-like
The red, green, and blue arrays.
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images.
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""<if_stmt><not>(r.shape<eq>g.shape<eq>b.shape)<block_start><raise>ValueError(f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')<block_end>RGB=np.dstack([r g b])<line_sep>R=np.zeros_like(RGB)<line_sep>R[: : 0]=r<line_sep>G=np.zeros_like(RGB)<line_sep>G[: : 1]=g<line_sep>B=np.zeros_like(RGB)<line_sep>B[: : 2]=b<line_sep>im_rgb=self.RGB.imshow(RGB **kwargs)<line_sep>im_r=self.R.imshow(R **kwargs)<line_sep>im_g=self.G.imshow(G **kwargs)<line_sep>im_b=self.B.imshow(B **kwargs)<line_sep><return>im_rgb im_r im_g im_b<block_end><block_end>@_api.deprecated("3.3" alternative="RGBAxes")<class_stmt>RGBAxesBase(RGBAxes)<block_start><pass><block_end> |
<import_stmt>pytest<import_from_stmt>weasyl http<line_sep>@pytest.mark.parametrize(('wsgi_env' 'expected') [({} {}) ({'PATH_INFO':'/search' 'QUERY_STRING':'q=example'} {}) ({'HTTP_ACCEPT':'*/*'} {'Accept':'*/*'}) ({'CONTENT_LENGTH':'' 'HTTP_ACCEPT_ENCODING':'gzip' 'HTTP_UPGRADE_INSECURE_REQUESTS':'1'} {'Accept-Encoding':'gzip' 'Upgrade-Insecure-Requests':'1'} ) ])<def_stmt>test_get_headers wsgi_env expected<block_start><assert_stmt>http.get_headers(wsgi_env)<eq>expected<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<import_stmt>abc<import_stmt>ast<import_stmt>io<import_stmt>tokenize<import_from_stmt>pathlib Path<import_from_stmt>typing Iterable Optional<import_from_stmt>fixit.common.report BaseLintRuleReport<class_stmt>PseudoContext<block_start>"""
Contains information about the file that `PseudoLintRule.lint_file` should evaluate.
"""<def_stmt>__init__ self file_path:Path source:bytes tokens:Optional[Iterable[tokenize.TokenInfo]]=<none> ast_tree:Optional[ast.Module]=<none> <arrow><none><block_start>self.file_path:Path=file_path<line_sep>self.source:bytes=source<line_sep>self._tokens:Optional[Iterable[tokenize.TokenInfo]]=tokens<line_sep>self._ast_tree:Optional[ast.Module]=ast_tree<block_end>@property<def_stmt>tokens self<arrow>Iterable[tokenize.TokenInfo]<block_start>tokens=self._tokens<if_stmt>tokens<is><not><none><block_start><return>tokens<block_end>tokens=tuple(tokenize.tokenize(io.BytesIO(self.source).readline))<line_sep>self._tokens=tokens<line_sep><return>tokens<block_end>@property<def_stmt>ast_tree self<arrow>ast.Module<block_start>ast_tree=self._ast_tree<if_stmt>ast_tree<is><not><none><block_start><return>ast_tree<block_end>ast_tree=ast.parse(self.source)<line_sep>self._ast_tree=ast_tree<line_sep><return>ast_tree<block_end><block_end><class_stmt>PseudoLintRule(abc.ABC)<block_start>"""
Represents a lint rule (or a group of lint rules) that can't be represented by a
normal lint rule. These "pseudo" lint rules receive information about the file from
the `PsuedoContext`.
This API is much more flexible than the normal lint rule API, but that comes at a
(potentially large) performance cost. Because the lint framework does not control
traversal of the syntax tree, it cannot batch the execution of these rules alongside
other lint rules.
This API is used for compatibility with Flake8 rules.
"""<def_stmt>__init__ self context:PseudoContext<arrow><none><block_start>self.context:PseudoContext=context<block_end>@abc.abstractmethod<def_stmt>lint_file self<arrow>Iterable[BaseLintRuleReport]<block_start><ellipsis><block_end><block_end> |
<import_from_stmt>homeschool.referrals.tests.factories ReferralFactory<import_from_stmt>homeschool.test TestCase<class_stmt>TestReferral(TestCase)<block_start><def_stmt>test_factory self<block_start>referral=ReferralFactory()<assert_stmt>referral.referring_user<is><not><none><assert_stmt>referral.created_at<is><not><none><assert_stmt>referral.status<eq>referral.Status.PENDING<block_end><block_end> |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test update of link labels."""<import_from_stmt>uuid uuid4<import_from_stmt>aiida.common timezone<import_from_stmt>aiida.storage.psql_dos.migrator PsqlDostoreMigrator<def_stmt>test_legacy_jobcalc_attrs perform_migrations:PsqlDostoreMigrator<block_start>"""Test update of link labels."""<line_sep># starting revision
perform_migrations.migrate_up('django@django_0042')<line_sep># setup the database
user_model=perform_migrations.get_current_table('db_dbuser')<line_sep>node_model=perform_migrations.get_current_table('db_dbnode')<line_sep>link_model=perform_migrations.get_current_table('db_dblink')<with_stmt>perform_migrations.session()<as>session<block_start>user=user_model(email='<EMAIL>' first_name='John' last_name='Doe' institution='EPFL' )<line_sep>session.add(user)<line_sep>session.commit()<line_sep>node_process=node_model(uuid=str(uuid4()) node_type='process.calculation.calcjob.CalcJobNode.' label='test' description='' user_id=user.id ctime=timezone.now() mtime=timezone.now() )<line_sep>node_data=node_model(uuid=str(uuid4()) node_type='data.core.dict.Dict.' label='test' description='' user_id=user.id ctime=timezone.now() mtime=timezone.now() )<line_sep>session.add(node_process)<line_sep>session.add(node_data)<line_sep>session.commit()<line_sep>link=link_model(input_id=node_data.id output_id=node_process.id type='input' label='_return' )<line_sep>session.add(link)<line_sep>session.commit()<line_sep>link_id=link.id<block_end># final revision
perform_migrations.migrate_up('django@django_0043')<line_sep>link_model=perform_migrations.get_current_table('db_dblink')<with_stmt>perform_migrations.session()<as>session<block_start>link=session.get(link_model link_id)<assert_stmt>link.label<eq>'result'<block_end><block_end> |
<def_stmt>bubblesort L<block_start>keepgoing=<true><while_stmt>keepgoing<block_start>keepgoing=<false><for_stmt>i range(len(L)-1)<block_start><if_stmt>L[i]<g>L[i+1]<block_start>L[i],L[i+1]=L[i+1] L[i]<line_sep>keepgoing=<true><block_end><block_end><block_end><block_end> |
'''
Function(s) for the postanalysis toolkit
'''<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<import_from_stmt>. _reweight<import_from_stmt>._reweight stats_process reweight_for_c <import_from_stmt>.matrix FluxMatrix<line_sep> |
# Copyright (C) <NAME> 2019. All rights reserved.
<import_stmt>argparse<import_from_stmt>kitti_odometry KittiEvalOdom<line_sep>parser=argparse.ArgumentParser(description='KITTI evaluation')<line_sep>parser.add_argument('--result' type=str required=<true> help="Result directory")<line_sep>parser.add_argument('--align' type=str choices=['scale' 'scale_7dof' '7dof' '6dof'] default=<none> help="alignment type")<line_sep>parser.add_argument('--seqs' nargs="+" type=int help="sequences to be evaluated" default=<none>)<line_sep>args=parser.parse_args()<line_sep>eval_tool=KittiEvalOdom()<line_sep>gt_dir="dataset/kitti_odom/gt_poses/"<line_sep>result_dir=args.result<line_sep>continue_flag=input("Evaluate result in {}? [y/n]".format(result_dir))<if_stmt>continue_flag<eq>"y"<block_start>eval_tool.eval(gt_dir result_dir alignment=args.align seqs=args.seqs )<block_end><else_stmt><block_start>print("Double check the path!")<block_end> |
#original from https://github.com/csinva/hierarchical-dnn-interpretations/blob/master/acd/scores/cd.py
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>copy deepcopy<import_from_stmt>torch sigmoid<import_from_stmt>torch tanh<import_stmt>numpy<as>np<line_sep>stabilizing_constant=10e-20<def_stmt>propagate_three a b c activation<block_start>a_contrib=0.5<times>(activation(a+c)-activation(c)+activation(a+b+c)-activation(b+c))<line_sep>b_contrib=0.5<times>(activation(b+c)-activation(c)+activation(a+b+c)-activation(a+c))<line_sep><return>a_contrib b_contrib activation(c)<block_end># propagate tanh nonlinearity
<def_stmt>propagate_tanh_two a b<block_start><return>0.5<times>(tanh(a)+(tanh(a+b)-tanh(b))) 0.5<times>(tanh(b)+(tanh(a+b)-tanh(a)))<block_end># propagate convolutional or linear layer
<def_stmt>propagate_conv_linear relevant irrelevant module device='cuda'<block_start>bias=module(torch.zeros(irrelevant.size()).to(device))<line_sep>rel=module(relevant)-bias<line_sep>irrel=module(irrelevant)-bias<line_sep># elementwise proportional
prop_rel=torch.abs(rel)<line_sep>prop_irrel=torch.abs(irrel)<line_sep>prop_sum=prop_rel+prop_irrel+stabilizing_constant<line_sep>prop_rel=torch.div(prop_rel prop_sum)<line_sep>prop_irrel=torch.div(prop_irrel prop_sum)<line_sep><return>rel+torch.mul(prop_rel bias) irrel+torch.mul(prop_irrel bias)<block_end><def_stmt>propagate_AdaptiveAvgPool2d relevant irrelevant module device='cuda'<block_start>rel=module(relevant)<line_sep>irrel=module(irrelevant)<line_sep><return>rel irrel<block_end># propagate ReLu nonlinearity
<def_stmt>propagate_relu relevant irrelevant activation device='cuda'<block_start>swap_inplace=<false><try_stmt># handles inplace
<block_start><if_stmt>activation.inplace<block_start>swap_inplace=<true><line_sep>activation.inplace=<false><block_end><block_end><except_stmt><block_start><pass><block_end>zeros=torch.zeros(relevant.size()).to(device)<line_sep>rel_score=activation(relevant)<line_sep>irrel_score=activation(relevant+irrelevant)-activation(relevant)<if_stmt>swap_inplace<block_start>activation.inplace=<true><block_end><return>rel_score irrel_score<block_end># propagate maxpooling operation
<def_stmt>propagate_pooling relevant irrelevant pooler model_type='mnist'<block_start><if_stmt>model_type<eq>'mnist'<block_start>unpool=torch.nn.MaxUnpool2d(kernel_size=2 stride=2)<line_sep>avg_pooler=torch.nn.AvgPool2d(kernel_size=2 stride=2)<line_sep>window_size=4<block_end><elif_stmt>model_type<eq>'vgg'<block_start>unpool=torch.nn.MaxUnpool2d(kernel_size=pooler.kernel_size stride=pooler.stride)<line_sep>avg_pooler=torch.nn.AvgPool2d(kernel_size=(pooler.kernel_size pooler.kernel_size) stride=(pooler.stride pooler.stride) count_include_pad=<false>)<line_sep>window_size=4<block_end># get both indices
p=deepcopy(pooler)<line_sep>p.return_indices=<true><line_sep>both,both_ind=p(relevant+irrelevant)<line_sep>ones_out=torch.ones_like(both)<line_sep>size1=relevant.size()<line_sep>mask_both=unpool(ones_out both_ind output_size=size1)<line_sep># relevant
rel=mask_both<times>relevant<line_sep>rel=avg_pooler(rel)<times>window_size<line_sep># irrelevant
irrel=mask_both<times>irrelevant<line_sep>irrel=avg_pooler(irrel)<times>window_size<line_sep><return>rel irrel<block_end># propagate dropout operation
<def_stmt>propagate_dropout relevant irrelevant dropout<block_start><return>dropout(relevant) dropout(irrelevant)<block_end># get contextual decomposition scores for blob
<def_stmt>cd blob im_torch model model_type='mnist' device='cuda'# set up model
<block_start>model.eval()<line_sep>im_torch=im_torch.to(device)<line_sep># set up blobs
blob=torch.FloatTensor(blob).to(device)<line_sep>relevant=blob<times>im_torch<line_sep>irrelevant=(1-blob)<times>im_torch<if_stmt>model_type<eq>'mnist'<block_start>scores=[]<line_sep>mods=list(model.modules())[1:]<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[0])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant <lambda>x:F.max_pool2d(x 2 return_indices=<true>) model_type='mnist')<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant F.relu)<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[1])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant <lambda>x:F.max_pool2d(x 2 return_indices=<true>) model_type='mnist')<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant F.relu)<line_sep>relevant=relevant.view(-1 800)<line_sep>irrelevant=irrelevant.view(-1 800)<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[2])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant F.relu)<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[3])<block_end><else_stmt><block_start>mods=list(model.modules())<for_stmt>i,mod enumerate(mods)<block_start>t=str(type(mod))<if_stmt>'Conv2d'<in>t<or>'Linear'<in>t<block_start><if_stmt>'Linear'<in>t<block_start>relevant=relevant.view(relevant.size(0) -1)<line_sep>irrelevant=irrelevant.view(irrelevant.size(0) -1)<block_end>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mod)<block_end><elif_stmt>'ReLU'<in>t<block_start>relevant,irrelevant=propagate_relu(relevant irrelevant mod)<block_end><elif_stmt>'MaxPool2d'<in>t<block_start>relevant,irrelevant=propagate_pooling(relevant irrelevant mod model_type=model_type)<block_end><elif_stmt>'Dropout'<in>t<block_start>relevant,irrelevant=propagate_dropout(relevant irrelevant mod)<block_end><block_end><block_end><return>relevant irrelevant<block_end># batch of [start, stop) with unigrams working
<def_stmt>cd_batch_text batch model start stop my_device=0# rework for
<block_start>weights=model.lstm<line_sep># Index one = word vector (i) or hidden state (h), index two = gate
W_ii,W_if,W_ig,W_io=torch.chunk(weights.weight_ih_l0 4 0)<line_sep>W_hi,W_hf,W_hg,W_ho=torch.chunk(weights.weight_hh_l0 4 0)<line_sep>b_i,b_f,b_g,b_o=torch.chunk(weights.bias_ih_l0+weights.bias_hh_l0 4)<line_sep>word_vecs=torch.transpose(model.embed(batch.text).data 1 2)#change: we take all
T=word_vecs.shape[0]<line_sep>batch_size=word_vecs.shape[2]<line_sep>relevant_h=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>irrelevant_h=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>prev_rel=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>prev_irrel=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<for_stmt>i range(T)<block_start>prev_rel_h=relevant_h<line_sep>prev_irrel_h=irrelevant_h<line_sep>rel_i=torch.matmul(W_hi prev_rel_h)<line_sep>rel_g=torch.matmul(W_hg prev_rel_h)<line_sep>rel_f=torch.matmul(W_hf prev_rel_h)<line_sep>rel_o=torch.matmul(W_ho prev_rel_h)<line_sep>irrel_i=torch.matmul(W_hi prev_irrel_h)<line_sep>irrel_g=torch.matmul(W_hg prev_irrel_h)<line_sep>irrel_f=torch.matmul(W_hf prev_irrel_h)<line_sep>irrel_o=torch.matmul(W_ho prev_irrel_h)<if_stmt>i<ge>start<and>i<le>stop<block_start>rel_i=rel_i+torch.matmul(W_ii word_vecs[i])<line_sep>rel_g=rel_g+torch.matmul(W_ig word_vecs[i])<line_sep>rel_f=rel_f+torch.matmul(W_if word_vecs[i])<line_sep>rel_o=rel_o+torch.matmul(W_io word_vecs[i])<block_end><else_stmt><block_start>irrel_i=irrel_i+torch.matmul(W_ii word_vecs[i])<line_sep>irrel_g=irrel_g+torch.matmul(W_ig word_vecs[i])<line_sep>irrel_f=irrel_f+torch.matmul(W_if word_vecs[i])<line_sep>irrel_o=irrel_o+torch.matmul(W_io word_vecs[i])<block_end>rel_contrib_i,irrel_contrib_i,bias_contrib_i=propagate_three(rel_i irrel_i b_i[: <none>] sigmoid)<line_sep>rel_contrib_g,irrel_contrib_g,bias_contrib_g=propagate_three(rel_g irrel_g b_g[: <none>] tanh)<line_sep>relevant=rel_contrib_i<times>(rel_contrib_g+bias_contrib_g)+bias_contrib_i<times>rel_contrib_g<line_sep>irrelevant=irrel_contrib_i<times>(rel_contrib_g+irrel_contrib_g+bias_contrib_g)+(rel_contrib_i+bias_contrib_i)<times>irrel_contrib_g<if_stmt>i<ge>start<and>i<l>stop<block_start>relevant=relevant+bias_contrib_i<times>bias_contrib_g<block_end><else_stmt><block_start>irrelevant=irrelevant+bias_contrib_i<times>bias_contrib_g<block_end><if_stmt>i<g>0<block_start>rel_contrib_f,irrel_contrib_f,bias_contrib_f=propagate_three(rel_f irrel_f b_f[: <none>] sigmoid)<line_sep>relevant=relevant+(rel_contrib_f+bias_contrib_f)<times>prev_rel<line_sep>irrelevant=irrelevant+(rel_contrib_f+irrel_contrib_f+bias_contrib_f)<times>prev_irrel+irrel_contrib_f<times>prev_rel<block_end>o=sigmoid(torch.matmul(W_io word_vecs[i])+torch.matmul(W_ho prev_rel_h+prev_irrel_h)+b_o[: <none>])<line_sep>new_rel_h,new_irrel_h=propagate_tanh_two(relevant irrelevant)<line_sep>relevant_h=o<times>new_rel_h<line_sep>irrelevant_h=o<times>new_irrel_h<line_sep>prev_rel=relevant<line_sep>prev_irrel=irrelevant<block_end>W_out=model.hidden_to_label.weight<line_sep># Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores=torch.matmul(W_out relevant_h)<line_sep>irrel_scores=torch.matmul(W_out irrelevant_h)<line_sep>#tolerance = 0.001
#assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
<return>scores irrel_scores<block_end><def_stmt>cd_text_irreg_scores batch_text model start stop my_device=0<block_start>weights=model.lstm<line_sep># Index one = word vector (i) or hidden state (h), index two = gate
W_ii,W_if,W_ig,W_io=torch.chunk(weights.weight_ih_l0 4 0)<line_sep>W_hi,W_hf,W_hg,W_ho=torch.chunk(weights.weight_hh_l0 4 0)<line_sep>b_i,b_f,b_g,b_o=torch.chunk(weights.bias_ih_l0+weights.bias_hh_l0 4)<line_sep>word_vecs=torch.transpose(model.embed(batch_text).data 1 2)#change: we take all
T=word_vecs.shape[0]<line_sep>batch_size=word_vecs.shape[2]<line_sep>relevant_h=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>irrelevant_h=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>prev_rel=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<line_sep>prev_irrel=torch.zeros((model.hidden_dim batch_size) device=torch.device(my_device) requires_grad=<false>)<for_stmt>i range(T)<block_start>prev_rel_h=relevant_h<line_sep>prev_irrel_h=irrelevant_h<line_sep>rel_i=torch.matmul(W_hi prev_rel_h)<line_sep>rel_g=torch.matmul(W_hg prev_rel_h)<line_sep>rel_f=torch.matmul(W_hf prev_rel_h)<line_sep>rel_o=torch.matmul(W_ho prev_rel_h)<line_sep>irrel_i=torch.matmul(W_hi prev_irrel_h)<line_sep>irrel_g=torch.matmul(W_hg prev_irrel_h)<line_sep>irrel_f=torch.matmul(W_hf prev_irrel_h)<line_sep>irrel_o=torch.matmul(W_ho prev_irrel_h)<line_sep>w_ii_contrib=torch.matmul(W_ii word_vecs[i])<line_sep>w_ig_contrib=torch.matmul(W_ig word_vecs[i])<line_sep>w_if_contrib=torch.matmul(W_if word_vecs[i])<line_sep>w_io_contrib=torch.matmul(W_io word_vecs[i])<line_sep>is_in_relevant=((start<le>i)<times>(i<le>stop)).cuda().float()<line_sep>is_not_in_relevant=1-is_in_relevant<line_sep>rel_i=rel_i+is_in_relevant<times>w_ii_contrib<line_sep>rel_g=rel_g+is_in_relevant<times>w_ig_contrib<line_sep>rel_f=rel_f+is_in_relevant<times>w_if_contrib<line_sep>rel_o=rel_o+is_in_relevant<times>w_io_contrib<line_sep>irrel_i=irrel_i+is_not_in_relevant<times>w_ii_contrib<line_sep>irrel_g=irrel_g+is_not_in_relevant<times>w_ig_contrib<line_sep>irrel_f=irrel_f+is_not_in_relevant<times>w_if_contrib<line_sep>irrel_o=irrel_o+is_not_in_relevant<times>w_io_contrib<line_sep>rel_contrib_i,irrel_contrib_i,bias_contrib_i=propagate_three(rel_i irrel_i b_i[: <none>] sigmoid)<line_sep>rel_contrib_g,irrel_contrib_g,bias_contrib_g=propagate_three(rel_g irrel_g b_g[: <none>] tanh)<line_sep>relevant=rel_contrib_i<times>(rel_contrib_g+bias_contrib_g)+bias_contrib_i<times>rel_contrib_g<line_sep>irrelevant=irrel_contrib_i<times>(rel_contrib_g+irrel_contrib_g+bias_contrib_g)+(rel_contrib_i+bias_contrib_i)<times>irrel_contrib_g<line_sep>bias_contrib=bias_contrib_i<times>bias_contrib_g<line_sep>is_in_relevant_bias=((start<le>i)<times>(i<l>stop)).cuda().float()<line_sep>is_not_in_relevant_bias=1-is_in_relevant_bias<line_sep>relevant=relevant+is_in_relevant_bias<times>bias_contrib<line_sep>irrelevant=irrelevant+is_not_in_relevant_bias<times>bias_contrib<if_stmt>i<g>0<block_start>rel_contrib_f,irrel_contrib_f,bias_contrib_f=propagate_three(rel_f irrel_f b_f[: <none>] sigmoid)<line_sep>relevant=relevant+(rel_contrib_f+bias_contrib_f)<times>prev_rel<line_sep>irrelevant=irrelevant+(rel_contrib_f+irrel_contrib_f+bias_contrib_f)<times>prev_irrel+irrel_contrib_f<times>prev_rel<block_end>o=sigmoid(torch.matmul(W_io word_vecs[i])+torch.matmul(W_ho prev_rel_h+prev_irrel_h)+b_o[: <none>])<line_sep>new_rel_h,new_irrel_h=propagate_tanh_two(relevant irrelevant)<line_sep>relevant_h=o<times>new_rel_h<line_sep>irrelevant_h=o<times>new_irrel_h<line_sep>prev_rel=relevant<line_sep>prev_irrel=irrelevant<block_end>W_out=model.hidden_to_label.weight<line_sep># Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores=torch.matmul(W_out relevant_h)<line_sep>irrel_scores=torch.matmul(W_out irrelevant_h)<line_sep><return>scores irrel_scores<block_end><def_stmt>cd_text batch model start stop batch_id=0 my_device=0# rework for
<block_start>weights=model.lstm.state_dict()<line_sep># Index one = word vector (i) or hidden state (h), index two = gate
W_ii,W_if,W_ig,W_io=torch.chunk(weights['weight_ih_l0'] 4 0)<line_sep>W_hi,W_hf,W_hg,W_ho=torch.chunk(weights['weight_hh_l0'] 4 0)<line_sep>b_i,b_f,b_g,b_o=torch.chunk(weights['bias_ih_l0']+weights['bias_hh_l0'] 4)<line_sep>word_vecs=model.embed(batch.text)[: batch_id].data<line_sep>T=word_vecs.shape[0]<line_sep>relevant=torch.zeros((T model.hidden_dim) device=torch.device(my_device))<line_sep>irrelevant=torch.zeros((T model.hidden_dim) device=torch.device(my_device))<line_sep>relevant_h=torch.zeros((T model.hidden_dim) device=torch.device(my_device))<line_sep>irrelevant_h=torch.zeros((T model.hidden_dim) device=torch.device(my_device))<for_stmt>i range(T)<block_start><if_stmt>i<g>0<block_start>prev_rel_h=relevant_h[i-1]<line_sep>prev_irrel_h=irrelevant_h[i-1]<block_end><else_stmt><block_start>prev_rel_h=torch.zeros(model.hidden_dim device=torch.device(my_device))<line_sep>prev_irrel_h=torch.zeros(model.hidden_dim device=torch.device(my_device))<block_end>rel_i=torch.matmul(W_hi prev_rel_h)<line_sep>rel_g=torch.matmul(W_hg prev_rel_h)<line_sep>rel_f=torch.matmul(W_hf prev_rel_h)<line_sep>rel_o=torch.matmul(W_ho prev_rel_h)<line_sep>irrel_i=torch.matmul(W_hi prev_irrel_h)<line_sep>irrel_g=torch.matmul(W_hg prev_irrel_h)<line_sep>irrel_f=torch.matmul(W_hf prev_irrel_h)<line_sep>irrel_o=torch.matmul(W_ho prev_irrel_h)<if_stmt>start<le>i<le>stop<block_start>rel_i=rel_i+torch.matmul(W_ii word_vecs[i])<line_sep>rel_g=rel_g+torch.matmul(W_ig word_vecs[i])<line_sep>rel_f=rel_f+torch.matmul(W_if word_vecs[i])<line_sep>rel_o=rel_o+torch.matmul(W_io word_vecs[i])<block_end><else_stmt><block_start>irrel_i=irrel_i+torch.matmul(W_ii word_vecs[i])<line_sep>irrel_g=irrel_g+torch.matmul(W_ig word_vecs[i])<line_sep>irrel_f=irrel_f+torch.matmul(W_if word_vecs[i])<line_sep>irrel_o=irrel_o+torch.matmul(W_io word_vecs[i])<block_end>rel_contrib_i,irrel_contrib_i,bias_contrib_i=propagate_three(rel_i irrel_i b_i sigmoid)<line_sep>rel_contrib_g,irrel_contrib_g,bias_contrib_g=propagate_three(rel_g irrel_g b_g tanh)<line_sep>relevant[i]=rel_contrib_i<times>(rel_contrib_g+bias_contrib_g)+bias_contrib_i<times>rel_contrib_g<line_sep>irrelevant[i]=irrel_contrib_i<times>(rel_contrib_g+irrel_contrib_g+bias_contrib_g)+(rel_contrib_i+bias_contrib_i)<times>irrel_contrib_g<if_stmt>start<le>i<le>stop<block_start>relevant[i]<augadd>bias_contrib_i<times>bias_contrib_g<block_end><else_stmt><block_start>irrelevant[i]<augadd>bias_contrib_i<times>bias_contrib_g<block_end><if_stmt>i<g>0<block_start>rel_contrib_f,irrel_contrib_f,bias_contrib_f=propagate_three(rel_f irrel_f b_f sigmoid)<line_sep>relevant[i]<augadd>(rel_contrib_f+bias_contrib_f)<times>relevant[i-1]<line_sep>irrelevant[i]<augadd>(rel_contrib_f+irrel_contrib_f+bias_contrib_f)<times>irrelevant[i-1]+irrel_contrib_f<times>relevant[i-1]<block_end>o=sigmoid(torch.matmul(W_io word_vecs[i])+torch.matmul(W_ho prev_rel_h+prev_irrel_h)+b_o)<line_sep>#rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h,new_irrel_h=propagate_tanh_two(relevant[i] irrelevant[i])<line_sep>relevant_h[i]=o<times>new_rel_h<line_sep>irrelevant_h[i]=o<times>new_irrel_h<block_end>W_out=model.hidden_to_label.weight.data<line_sep># Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores=torch.matmul(W_out relevant_h[T-1])<line_sep>irrel_scores=torch.matmul(W_out irrelevant_h[T-1])<line_sep>tolerance=0.001<assert_stmt>torch.sum(torch.abs((model.forward(batch)-model.hidden_to_label.bias.data)-(scores+irrel_scores))).cpu().detach().numpy()<l>tolerance<line_sep><return>scores<block_end><def_stmt>softmax_out output<block_start><return>torch.nn.functional.softmax(torch.stack((output[0].reshape(-1) output[1].reshape(-1)) 1) dim=1)<block_end><def_stmt>is_in_relevant_toy batch start stop class_rules#XXX only for current model where relevant bigger five
<block_start>rel_digits=((batch.label<eq>0)[<none> :]<times>(batch.text<eq>class_rules[0]))+(batch.label<eq>1)[<none> :]<times>(batch.text<eq>class_rules[1])<line_sep>relevant=rel_digits[start:stop].sum(dim=0)<line_sep>irrelevant=rel_digits.sum(dim=0)-relevant<line_sep>test_out=torch.cat((relevant[: <none>] irrelevant[: <none>]) 1)<line_sep><return>test_out<block_end><def_stmt>cd_penalty_for_one_toy batch model1 start stop class_rules# get output
<block_start>model1_output=cd_batch_text(batch model1 start stop)<line_sep># only use the correct class
correct_idx=(batch.label torch.arange(batch.label.shape[0]))<line_sep>model1_softmax=softmax_out((model1_output[0][correct_idx] model1_output[1][correct_idx]))<line_sep>model2_softmax=is_in_relevant_toy(batch start stop class_rules).cuda().float()<line_sep>output=-(torch.log(model1_softmax)<times>model2_softmax).mean()<line_sep><return>output<block_end><def_stmt>is_in_relevant_decoy batch start stop class_rules<block_start>is_decoy=((batch.label<eq>0)<times>(batch.text[start:stop]<eq>class_rules[0])+(batch.label<eq>1)<times>(batch.text[start:stop]<eq>class_rules[1]))<line_sep><return>is_decoy.sum(dim=0)<block_end><def_stmt>cd_penalty_for_one_decoy batch model1 start stop class_rules<block_start>model1_output=cd_batch_text(batch model1 start stop)<line_sep>correct_idx=(batch.label torch.arange(batch.label.shape[0]))# only use the correct class
model1_softmax=softmax_out((model1_output[0][correct_idx] model1_output[1][correct_idx]))<line_sep>mask_decoy_in_relevant=is_in_relevant_decoy(batch start stop class_rules).cuda()<if_stmt>mask_decoy_in_relevant.byte().any()<block_start>masked_relevant=model1_softmax[: 1].masked_select(mask_decoy_in_relevant.byte())<line_sep>output=-(torch.log(masked_relevant)).mean()<line_sep><return>output<block_end><else_stmt><block_start><return>torch.zeros(1).cuda()<block_end><block_end><def_stmt>cd_penalty_annotated batch model1 start stop scores# get index where annotation present:
<block_start>idx_nonzero=(start<ne>-1).nonzero()[: 0]# find the ones where annotation exists
model_output=cd_text_irreg_scores(batch.text[: idx_nonzero] model1 start[idx_nonzero] stop[idx_nonzero])[0]#get the output and focus on relevant scores for class 0 vs 1
model_softmax=torch.nn.functional.softmax(model_output dim=0)[batch.label[idx_nonzero] np.arange(len(idx_nonzero))]#take softmax of class 0 vs 1 and take the correct digit
output=-(torch.log(model_softmax)<times>scores[idx_nonzero].float()).mean()#-(torch.log(1-model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #if it agrees, maximize - if it dis, min
<return>output<block_end># def cd_penalty_annotated(batch, model1, start, stop, scores):
# # get index where annotation present:
# idx_nonzero = (start != -1).nonzero()[:,0]
# model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0]
# correct_idx = (batch.label[ idx_nonzero], torch.arange(batch.label[ idx_nonzero].shape[0]) )
# model_softmax = torch.nn.functional.softmax(model_output, dim =0)[correct_idx]
# output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() -(torch.log(model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #next thing to try
# print(output, torch.log(model_softmax).mean())
# return output
# def cd_penalty_annotated(batch, model1, start, stop, agrees):
# model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
# correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
# model1_softmax = softmax_out((model1_output[0][0],model1_output[0][1]))[correct_idx]
# output = -(torch.log(model1_softmax) * agrees.float()).mean() #+ (torch.log(model1_softmax) * (1-agrees).float()).mean()
# return output
<def_stmt>cd_penalty_for_one_decoy_all batch model1 start stop<block_start>mask_exists=(start<ne>-1).byte().cuda()<if_stmt>mask_exists.any()<block_start>model1_output=cd_text_irreg_scores(batch.text model1 start stop)<line_sep>correct_idx=(batch.label torch.arange(batch.label.shape[0]))# only use the correct class
wrong_idx=(1-batch.label torch.arange(batch.label.shape[0]))<line_sep>model1_softmax=softmax_out((model1_output[0][correct_idx] model1_output[1][correct_idx]))#+ softmax_out((model1_output[0][wrong_idx],model1_output[1][wrong_idx]))
output=(torch.log(model1_softmax[: 1])).masked_select(mask_exists)<line_sep><return>-output.mean()<block_end><else_stmt><block_start><return>torch.zeros(1).cuda()<block_end><block_end><def_stmt>cd_penalty batch model1 model2 start stop<block_start>model1_output=cd_batch_text(batch model1 start stop)<line_sep>model2_output=cd_batch_text(batch model2 start stop)<line_sep>model1_softmax=softmax_out(model1_output)<line_sep>model2_softmax=softmax_out(model2_output)<line_sep><return>((model1_softmax-model2_softmax)<times>(torch.log(model1_softmax)-torch.log(model2_softmax))).sum(dim=1).reshape((2 -1)).sum(dim=0)<block_end># this implementation of cd is very long so that we can view CD at intermediate layers
# in reality, this should be a loop which uses the above functions
<def_stmt>cd_vgg_features blob im_torch model model_type='vgg'# set up model
<block_start>model.eval()<line_sep># set up blobs
blob=torch.cuda.FloatTensor(blob)<line_sep>relevant=blob<times>im_torch<line_sep>irrelevant=(1-blob)<times>im_torch<line_sep>mods=list(model.modules())[2:]<line_sep># (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[0])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[1])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[2])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[3])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[4] model_type=model_type)<line_sep># (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[5])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[6])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[7])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[8])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[9] model_type=model_type)<line_sep># (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[10])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[11])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[12])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[13])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[14])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[15])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[16] model_type=model_type)<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[17])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[18])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[19])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[20])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[21])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[22])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[23] model_type=model_type)<line_sep># scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[24])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[25])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[26])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[27])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[28])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[29])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[30] model_type=model_type)<line_sep>relevant,irrelevant=propagate_AdaptiveAvgPool2d(relevant irrelevant mods[31])<line_sep># scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant=relevant.view(relevant.size(0) -1)<line_sep>irrelevant=irrelevant.view(irrelevant.size(0) -1)<line_sep><return>relevant irrelevant<block_end><def_stmt>cd_vgg_classifier relevant irrelevant im_torch model model_type='vgg'# set up model
<block_start>model.eval()<line_sep>mods=list(model.modules())[1:]<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[0])<line_sep># print(relevant.shape)
relevant,irrelevant=propagate_relu(relevant irrelevant mods[1])<line_sep>relevant,irrelevant=propagate_dropout(relevant irrelevant mods[2])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[3])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[4])<line_sep>relevant,irrelevant=propagate_dropout(relevant irrelevant mods[5])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[6])<line_sep># only interested in not cancer, which is class 0
#model.train()
<return>relevant irrelevant<block_end><def_stmt>cd_track_vgg blob im_torch model model_type='vgg'# set up model
<block_start>model.eval()<line_sep># set up blobs
blob=torch.cuda.FloatTensor(blob)<line_sep>relevant=blob<times>im_torch<line_sep>irrelevant=(1-blob)<times>im_torch<line_sep>mods=list(model.modules())[2:]<line_sep># (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[0])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[1])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[2])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[3])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[4] model_type=model_type)<line_sep># (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[5])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[6])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[7])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[8])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[9] model_type=model_type)<line_sep># (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[10])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[11])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[12])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[13])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[14])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[15])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[16] model_type=model_type)<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[17])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[18])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[19])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[20])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[21])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[22])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[23] model_type=model_type)<line_sep># scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[24])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[25])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[26])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[27])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[28])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[29])<line_sep>relevant,irrelevant=propagate_pooling(relevant irrelevant mods[30] model_type=model_type)<line_sep>relevant,irrelevant=propagate_AdaptiveAvgPool2d(relevant irrelevant mods[31])<line_sep># scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant=relevant.view(relevant.size(0) -1)<line_sep>irrelevant=irrelevant.view(irrelevant.size(0) -1)<line_sep># (classifier): Sequential(
# (0): Linear(in_features=25088, out_features=4096)
# (1): ReLU(inplace)
# (2): Dropout(p=0.5)
# (3): Linear(in_features=4096, out_features=4096)
# (4): ReLU(inplace)
# (5): Dropout(p=0.5)
# (6): Linear(in_features=4096, out_features=1000)
relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[33])<line_sep># print(relevant.shape)
relevant,irrelevant=propagate_relu(relevant irrelevant mods[34])<line_sep>relevant,irrelevant=propagate_dropout(relevant irrelevant mods[35])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[36])<line_sep>relevant,irrelevant=propagate_relu(relevant irrelevant mods[37])<line_sep>relevant,irrelevant=propagate_dropout(relevant irrelevant mods[38])<line_sep>relevant,irrelevant=propagate_conv_linear(relevant irrelevant mods[39])<line_sep><return>relevant irrelevant<block_end> |
<import_from_stmt>..utils AnalysisException<import_from_stmt>.expressions Expression<class_stmt>Literal(Expression)<block_start><def_stmt>__init__ self value<block_start>super().__init__()<line_sep>self.value=value<block_end><def_stmt>eval self row schema<block_start><return>self.value<block_end><def_stmt>__str__ self<block_start><if_stmt>self.value<is><true><block_start><return>"true"<block_end><if_stmt>self.value<is><false><block_start><return>"false"<block_end><if_stmt>self.value<is><none><block_start><return>"NULL"<block_end><return>str(self.value)<block_end><def_stmt>get_literal_value self<block_start><if_stmt>hasattr(self.value "expr")<or>isinstance(self.value Expression)<block_start><raise>AnalysisException("Value should not be a Column or an Expression,"<concat>f" but got {type(self)}: {self}")<block_end><return>self.value<block_end><def_stmt>args self<block_start><return>(self.value )<block_end><block_end>__all__=["Literal"]<line_sep> |
<import_stmt>numpy<as>np<line_sep>golden=np.array([[100.0 100.0] [206.226840616 179.610387213] [1190.25124092 1197.15702025] [1250.76639667 1250.3933971] [1261.76760093 1250.17718583] [1237.4846285 1237.56490579] [1273.56730356 1266.82141705] [1272.899992 1259.92589118] [1.17000308922e-06 1.21115462165e-06] [4.69048419035e-08 5.61093645301e-08] [1.50244060584e-09 2.44292250731e-09] [8.47391624349e-11 1.15593790738e-10] [5.10649970307e-12 4.80114236959e-12] [8.34326950279e-13 4.1368839091e-13] [3.66142109259e-14 4.95319932219e-14] [8.20801944862e-15 4.94154683061e-14]])<line_sep> |
<import_from_stmt>functools partial<import_from_stmt>typing Callable<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>...config Conf<import_from_stmt>.menu Menu MenuEntry MenuSeparator<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>...ui.views.disassembly_view DisassemblyView<block_end><class_stmt>DisasmInsnContextMenu(Menu)<block_start>"""
Dissembly Instruction's Context Menu Items and callback funcion.
It provides context menu for dissembly instructions in the Dissembly View.
For adding items in plugins, use `Workspace.add_disasm_insn_ctx_menu_entry`
and `Workspace.remove_disasm_insn_ctx_menu_entry`.
"""<def_stmt>__init__ self disasm_view:'DisassemblyView'<block_start>super().__init__("" parent=disasm_view)<line_sep>self.insn_addr=<none><line_sep>self.entries.extend([MenuEntry('T&oggle selection' self._toggle_instruction_selection) MenuSeparator() MenuEntry('&XRefs...' self._popup_xrefs) MenuSeparator() ])<if_stmt>Conf.has_operation_mango<block_start>self.entries.extend([MenuEntry("&Depends on..." self._popup_dependson_dialog) MenuSeparator() ])<block_end>self.entries.extend([MenuEntry('E&xecute symbolically...' self._popup_newstate_dialog) MenuEntry('&Avoid in execution...' self._avoid_in_execution) MenuEntry('&Find in execution...' self._find_in_execution) MenuEntry('Add &hook...' self._add_hook) MenuEntry('View function &documentation...' self._view_docs)])<block_end>@property<def_stmt>_disasm_view self<arrow>'DisassemblyView'<block_start><return>self.parent<block_end><def_stmt>_popup_newstate_dialog self<block_start>self._disasm_view.popup_newstate_dialog(async_=<true>)<block_end><def_stmt>_popup_dependson_dialog self<block_start>self._disasm_view.popup_dependson_dialog(use_operand=<true>)<block_end><def_stmt>_toggle_instruction_selection self<block_start>self._disasm_view.infodock.toggle_instruction_selection(self.insn_addr)<block_end><def_stmt>_avoid_in_execution self<block_start>self._disasm_view.avoid_addr_in_exec(self.insn_addr)<line_sep>self._disasm_view.refresh()<block_end><def_stmt>_find_in_execution self<block_start>self._disasm_view.find_addr_in_exec(self.insn_addr)<line_sep>self._disasm_view.refresh()<block_end><def_stmt>_add_hook self<block_start>self._disasm_view.popup_hook_dialog(async_=<true>)<block_end><def_stmt>_view_docs self<block_start><if_stmt>self._disasm_view<is><none><block_start><return><block_end>addr=self._disasm_view._address_in_selection()<if_stmt>addr<is><not><none><block_start>self._disasm_view.popup_func_doc_dialog(addr)<block_end><block_end><def_stmt>_popup_xrefs self<block_start><if_stmt>self._disasm_view<is><none><or>self._disasm_view._flow_graph<is><none><block_start><return><block_end>r=self._disasm_view._flow_graph.get_selected_operand_info()<if_stmt>r<is><not><none><block_start>_,ins_addr,operand=r<line_sep>self._disasm_view.parse_operand_and_popup_xref_dialog(ins_addr operand async_=<true>)<block_end><block_end>#
# Public Methods
#
<def_stmt>add_menu_entry self text callback:Callable[['DisasmInsnContextMenu'] <none>] add_separator_first=<true><block_start><if_stmt>add_separator_first<block_start>self.entries.append(MenuSeparator())<block_end>self.entries.append(MenuEntry(text partial(callback self)))<block_end><def_stmt>remove_menu_entry self text remove_preceding_separator=<true><block_start><for_stmt>idx,m enumerate(self.entries)<block_start><if_stmt><not>isinstance(m MenuEntry)<block_start><continue><block_end><if_stmt>m.caption<eq>text<block_start>self.entries.remove(m)<if_stmt>remove_preceding_separator<block_start>self.entries.pop(idx-1)<block_end><block_end><block_end><block_end><block_end> |
<class_stmt>AnalyticalModelStick(AnalyticalModel IDisposable)<block_start>"""
An element that represents a stick in the structural analytical model.
Could be one of beam,brace or column type.
"""<def_stmt>Dispose self<block_start>""" Dispose(self: Element,A_0: bool) """<line_sep><pass><block_end><def_stmt>GetAlignmentMethod self selector<block_start>"""
GetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> AnalyticalAlignmentMethod
Gets the alignment method for a given selector.
selector: End of the analytical model.
Returns: The alignment method at a given end.
"""<line_sep><pass><block_end><def_stmt>getBoundingBox self *args<block_start>""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """<line_sep><pass><block_end><def_stmt>GetLocalCoordinateSystem self *__args<block_start>"""
GetLocalCoordinateSystem(self: AnalyticalModelStick,point: XYZ) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified point.
point: The point on the analytical model stick element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
GetLocalCoordinateSystem(self: AnalyticalModelStick,parameter: float) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified parameter value along a curve.
parameter: The parameter value along a curve that should be in the range [0,1],where 0
represents start and 1 represents end of the element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
"""<line_sep><pass><block_end><def_stmt>GetMemberForces self<block_start>"""
GetMemberForces(self: AnalyticalModelStick) -> IList[MemberForces]
Gets the member forces associated with this element.
Returns: Returns a collection of Member Forces associated with this element. Empty
collection will be returned if element doesn't have any Member Forces.
To
find out with which end member forces are associated use
Autodesk::Revit::DB::Structure::MemberForces::Position
property to obtain a
position of Member Forces on element.
"""<line_sep><pass><block_end><def_stmt>GetProjectionPlaneY self selector<block_start>"""
GetProjectionPlaneY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""<line_sep><pass><block_end><def_stmt>GetProjectionPlaneZ self selector<block_start>"""
GetProjectionPlaneZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""<line_sep><pass><block_end><def_stmt>GetProjectionY self selector<block_start>"""
GetProjectionY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionY
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""<line_sep><pass><block_end><def_stmt>GetProjectionZ self selector<block_start>"""
GetProjectionZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionZ
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""<line_sep><pass><block_end><def_stmt>GetReleases self start fx fy fz mx my mz<block_start>"""
GetReleases(self: AnalyticalModelStick,start: bool) -> (bool,bool,bool,bool,bool,bool)
Gets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""<line_sep><pass><block_end><def_stmt>GetReleaseType self start<block_start>"""
GetReleaseType(self: AnalyticalModelStick,start: bool) -> ReleaseType
Gets the release type.
start: The position on analytical model stick element. True for start,false for end.
Returns: The type of release.
"""<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: Element,disposing: bool) """<line_sep><pass><block_end><def_stmt>RemoveAllMemberForces self<block_start>"""
RemoveAllMemberForces(self: AnalyticalModelStick) -> bool
Removes all member forces associated with element.
Returns: True if any member forces were removed,false otherwise.
"""<line_sep><pass><block_end><def_stmt>RemoveMemberForces self start<block_start>"""
RemoveMemberForces(self: AnalyticalModelStick,start: bool) -> bool
Removes member forces defined for given position.
start: Member Forces position on analytical model stick element. True for start,false
for end.
Returns: True if member forces for provided position were removed,false otherwise.
"""<line_sep><pass><block_end><def_stmt>SetAlignmentMethod self selector method<block_start>"""
SetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector,method: AnalyticalAlignmentMethod)
Sets the alignment method for a given selector.
selector: End of the analytical model.
method: The alignment method at a given end.
"""<line_sep><pass><block_end><def_stmt>setElementType self *args<block_start>""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """<line_sep><pass><block_end><def_stmt>SetMemberForces self *__args<block_start>"""
SetMemberForces(self: AnalyticalModelStick,start: bool,force: XYZ,moment: XYZ)
Adds Member Forces to element.
start: Member Forces position on analytical model stick element. True for start,false
for end.
force: The translational forces at specified position of the element.
The x value
of XYZ object represents force along x-axis of the analytical model coordinate
system,y along y-axis,z along z-axis respectively.
moment: The rotational forces at specified position of the element.
The x value of
XYZ object represents moment about x-axis of the analytical model coordinate
system,y about y-axis,z about z-axis respectively.
SetMemberForces(self: AnalyticalModelStick,memberForces: MemberForces)
Sets Member Forces to element.
memberForces: End to which member forces will be added is defined by setting
Autodesk::Revit::DB::Structure::MemberForces::Position
property in provided
Member Forces object.
"""<line_sep><pass><block_end><def_stmt>SetProjection self selector *__args<block_start>"""
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
"""<line_sep><pass><block_end><def_stmt>SetReleases self start fx fy fz mx my mz<block_start>"""
SetReleases(self: AnalyticalModelStick,start: bool,fx: bool,fy: bool,fz: bool,mx: bool,my: bool,mz: bool)
Sets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""<line_sep><pass><block_end><def_stmt>SetReleaseType self start releaseType<block_start>"""
SetReleaseType(self: AnalyticalModelStick,start: bool,releaseType: ReleaseType)
Sets the release type.
start: The position on analytical model stick element. True for start,false for end.
releaseType: The type of release.
"""<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><block_end> |
<import_stmt>doctest<import_from_stmt>nose.tools assert_equal assert_true<import_from_stmt>corehq.apps.fixtures.models FieldList FixtureDataItem FixtureItemField <import_from_stmt>custom.abt.reports fixture_utils<import_from_stmt>custom.abt.reports.fixture_utils dict_values_in fixture_data_item_to_dict <def_stmt>test_dict_values_in_param_none <block_start>swallow={'permutation':'unladen'}<line_sep>result=dict_values_in(swallow <none>)<line_sep>assert_true(result)<block_end><def_stmt>test_dict_values_in_param_empty <block_start>swallow={'permutation':'unladen'}<line_sep>result=dict_values_in(swallow {})<line_sep>assert_true(result)<block_end><def_stmt>test_dict_values_in_value_none <block_start>swallow={'permutation':'unladen'}<line_sep>result=dict_values_in(swallow {'permutation':<none>})<line_sep>assert_true(result)<block_end><def_stmt>test_fixture_data_item_to_dict <block_start>data_item=FixtureDataItem(domain='test-domain' data_type_id='123456' fields={'id':FieldList(doc_type='FieldList' field_list=[FixtureItemField(doc_type='FixtureItemField' field_value='789abc' properties={})]) 'name':FieldList(doc_type='FieldList' field_list=[FixtureItemField(doc_type='FixtureItemField' field_value='John' properties={'lang':'en'}) FixtureItemField(doc_type='FixtureItemField' field_value='Jan' properties={'lang':'nld'}) FixtureItemField(doc_type='FixtureItemField' field_value='Jean' properties={'lang':'fra'}) ])})<line_sep>dict_=fixture_data_item_to_dict(data_item)<line_sep>assert_equal(dict_ {'id':'789abc' 'name':'John'})<block_end><def_stmt>test_empty_fixture_data_item_to_dict <block_start>data_item=FixtureDataItem(domain='test-domain' data_type_id='123456' fields={'id':FieldList(doc_type='FieldList' field_list=[]) 'name':FieldList(doc_type='FieldList' field_list=[])})<line_sep>dict_=fixture_data_item_to_dict(data_item)<line_sep>assert_equal(dict_ {'id':<none> 'name':<none> })<block_end><def_stmt>test_doctests <block_start>results=doctest.testmod(fixture_utils)<assert_stmt>results.failed<eq>0<block_end> |
<import_from_stmt>.body Body<import_from_stmt>.camera Camera<import_from_stmt>.base_scene BaseScene<import_from_stmt>.caching BodyCache TextureCache<import_from_stmt>.textures apply_random_textures<line_sep> |
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""<line_sep>__author__='VMware, Inc.'<import_from_stmt>com.vmware.vcenter_client Datacenter Folder <def_stmt>folder_list_datacenter_folder context<block_start><return>context.client.vcenter.Folder.list(Folder.FilterSpec(type=Folder.Type.DATACENTER))<block_end><def_stmt>detect_datacenter context datacenter_name<block_start>"""Find the datacenter with the given name"""<line_sep>names=set([datacenter_name])<line_sep>datacenter_summaries=context.client.vcenter.Datacenter.list(Datacenter.FilterSpec(names=names))<if_stmt>len(datacenter_summaries)<g>0<block_start>datacenter=datacenter_summaries[0].datacenter<line_sep>print("Detected Datacenter '{}' as {}".format(datacenter_name datacenter))<line_sep>context.testbed.entities['DATACENTER_IDS'][datacenter_name]=datacenter<line_sep><return><true><block_end><else_stmt><block_start>print("Datacenter '{}' missing".format(datacenter_name))<line_sep><return><false><block_end><block_end><def_stmt>detect_datacenters context<block_start>"""Find datacenters to run the vcenter samples"""<line_sep>context.testbed.entities['DATACENTER_IDS']={}<line_sep># Look for the two datacenters
datacenter1_name=context.testbed.config['DATACENTER1_NAME']<line_sep>datacenter2_name=context.testbed.config['DATACENTER2_NAME']<line_sep><return>(detect_datacenter(context datacenter1_name)<and>detect_datacenter(context datacenter2_name))<block_end><def_stmt>cleanup_datacenters context<block_start>"""Cleanup datacenters after sample run"""<line_sep># Look for the two datacenters
datacenter1_name=context.testbed.config['DATACENTER1_NAME']<line_sep>datacenter2_name=context.testbed.config['DATACENTER2_NAME']<line_sep>names=set([datacenter1_name datacenter2_name])<line_sep>datacenter_summaries=context.client.vcenter.Datacenter.list(Datacenter.FilterSpec(names=names))<line_sep>print("Found {} Datacenters matching names {}".format(len(datacenter_summaries) ", ".join(["'{}'".format(n)<for>n names])))<for_stmt>datacenter_summary datacenter_summaries<block_start>datacenter=datacenter_summary.datacenter<line_sep>print("Deleting Datacenter '{}' ({})".format(datacenter datacenter_summary.name))<line_sep>context.client.vcenter.Datacenter.delete(datacenter force=<true>)<block_end><block_end><def_stmt>setup_datacenters context<block_start>"""Create datacenters for running vcenter samples"""<line_sep># Find a Folder in which to put the Datacenters
folder_summaries=folder_list_datacenter_folder(context)<line_sep>folder=folder_summaries[0].folder<line_sep>print("Creating datacenters in Folder '{}' ({})".format(folder folder_summaries[0].name))<line_sep># Create first datacenter
datacenter1_name=context.testbed.config['DATACENTER1_NAME']<line_sep>datacenter1=context.client.vcenter.Datacenter.create(Datacenter.CreateSpec(name=datacenter1_name folder=folder))<line_sep>print("Created Datacenter '{}' ({})".format(datacenter1 datacenter1_name))<line_sep># Create second datacenter
datacenter2_name=context.testbed.config['DATACENTER2_NAME']<line_sep>datacenter2=context.client.vcenter.Datacenter.create(Datacenter.CreateSpec(name=datacenter2_name folder=folder))<line_sep>print("Created Datacenter '{}' ({})".format(datacenter2 datacenter2_name))<line_sep># Save datacenter name to identifier mappings for later use
context.testbed.entities['DATACENTER_IDS']={datacenter1_name:datacenter1 datacenter2_name:datacenter2}<block_end><def_stmt>cleanup context<block_start>cleanup_datacenters(context)<block_end><def_stmt>setup context<block_start>setup_datacenters(context)<block_end><def_stmt>validate context<block_start><return>detect_datacenters(context)<block_end> |
# coding: utf-8
<import_stmt>datetime<import_stmt>unittest<import_stmt>jpholiday<class_stmt>TestYear2018(unittest.TestCase)<block_start><def_stmt>test_holiday self<block_start>"""
2018年祝日
"""<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 1 1)) '元日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 1 8)) '成人の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 2 11)) '建国記念の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 2 12)) '建国記念の日 振替休日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 3 21)) '春分の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 4 29)) '昭和の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 4 30)) '昭和の日 振替休日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 5 3)) '憲法記念日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 5 4)) 'みどりの日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 5 5)) 'こどもの日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 7 16)) '海の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 8 11)) '山の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 9 17)) '敬老の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 9 23)) '秋分の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 9 24)) '秋分の日 振替休日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 10 8)) '体育の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 11 3)) '文化の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 11 23)) '勤労感謝の日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 12 23)) '天皇誕生日')<line_sep>self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018 12 24)) '天皇誕生日 振替休日')<block_end><def_stmt>test_count_month self<block_start>"""
2018年月祝日数
"""<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 1)) 2)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 2)) 2)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 3)) 1)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 4)) 2)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 5)) 3)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 6)) 0)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 7)) 1)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 8)) 1)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 9)) 3)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 10)) 1)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 11)) 2)<line_sep>self.assertEqual(len(jpholiday.month_holidays(2018 12)) 2)<block_end><def_stmt>test_count_year self<block_start>"""
2018年祝日数
"""<line_sep>self.assertEqual(len(jpholiday.year_holidays(2018)) 20)<block_end><block_end> |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
<import_from_stmt>pathlib Path<import_stmt>pytest<import_from_stmt>pants.option.custom_types DictValueComponent ListValueComponent UnsetBool dict_with_files_option dir_option file_option <import_from_stmt>pants.option.options_fingerprinter OptionsFingerprinter<import_from_stmt>pants.testutil.rule_runner RuleRunner<line_sep>@pytest.fixture<def_stmt>rule_runner <arrow>RuleRunner<block_start><return>RuleRunner()<block_end><def_stmt>test_fingerprint_dict <arrow><none><block_start>d1={"b":1 "a":2}<line_sep>d2={"a":2 "b":1}<line_sep>d3={"a":1 "b":2}<line_sep>fp1,fp2,fp3=(OptionsFingerprinter().fingerprint(DictValueComponent.create d)<for>d (d1 d2 d3))<assert_stmt>fp1<eq>fp2<assert_stmt>fp1<ne>fp3<block_end><def_stmt>test_fingerprint_dict_with_non_string_keys <arrow><none><block_start>d={("a" 2):(3 4)}<line_sep>fp=OptionsFingerprinter().fingerprint(DictValueComponent.create d)<assert_stmt>fp<eq>"3852a094612ce1c22c08ee2ddcdc03d09e87ad97"<block_end><def_stmt>test_fingerprint_list <arrow><none><block_start>l1=[1 2 3]<line_sep>l2=[1 3 2]<line_sep>fp1,fp2=(OptionsFingerprinter().fingerprint(ListValueComponent.create l)<for>l (l1 l2))<assert_stmt>fp1<ne>fp2<block_end><def_stmt>test_fingerprint_file rule_runner:RuleRunner<arrow><none><block_start>fp1,fp2,fp3=(OptionsFingerprinter().fingerprint(file_option rule_runner.write_files({f:c})[0])<for>(f c) (("foo/bar.config" "blah blah blah") ("foo/bar.config" "meow meow meow") ("spam/egg.config" "blah blah blah") ))<assert_stmt>fp1<ne>fp2<assert_stmt>fp1<ne>fp3<assert_stmt>fp2<ne>fp3<block_end><def_stmt>test_fingerprint_file_outside_buildroot tmp_path:Path rule_runner:RuleRunner<arrow><none><block_start>outside_buildroot=rule_runner.write_files({(tmp_path/"foobar").as_posix():"foobar"})[0]<with_stmt>pytest.raises(ValueError)<block_start>OptionsFingerprinter().fingerprint(file_option outside_buildroot)<block_end><block_end><def_stmt>test_fingerprint_file_list rule_runner:RuleRunner<arrow><none><block_start>f1,f2,f3=(rule_runner.write_files({f:c})[0]<for>(f c) (("foo/bar.config" "blah blah blah") ("foo/bar.config" "meow meow meow") ("spam/egg.config" "blah blah blah") ))<line_sep>fp1=OptionsFingerprinter().fingerprint(file_option [f1 f2])<line_sep>fp2=OptionsFingerprinter().fingerprint(file_option [f2 f1])<line_sep>fp3=OptionsFingerprinter().fingerprint(file_option [f1 f3])<assert_stmt>fp1<eq>fp2<assert_stmt>fp1<ne>fp3<block_end><def_stmt>test_fingerprint_primitive <arrow><none><block_start>fp1,fp2=(OptionsFingerprinter().fingerprint("" v)<for>v ("foo" 5))<assert_stmt>fp1<ne>fp2<block_end><def_stmt>test_fingerprint_unset_bool <arrow><none><block_start>fp1=OptionsFingerprinter().fingerprint(UnsetBool UnsetBool)<line_sep>fp2=OptionsFingerprinter().fingerprint(UnsetBool UnsetBool)<assert_stmt>fp1<eq>fp2<block_end><def_stmt>test_fingerprint_dir rule_runner:RuleRunner<arrow><none><block_start>d1=rule_runner.create_dir("a")<line_sep>d2=rule_runner.create_dir("b")<line_sep>d3=rule_runner.create_dir("c")<line_sep>rule_runner.write_files({"a/bar/bar.config":"blah blah blah" "a/foo/foo.config":"meow meow meow" "b/foo/foo.config":"meow meow meow" "b/bar/bar.config":"blah blah blah" "c/bar/bar.config":"blah meow blah" })<line_sep>dp1=OptionsFingerprinter().fingerprint(dir_option [d1])<line_sep>dp2=OptionsFingerprinter().fingerprint(dir_option [d1 d2])<line_sep>dp3=OptionsFingerprinter().fingerprint(dir_option [d2 d1])<line_sep>dp4=OptionsFingerprinter().fingerprint(dir_option [d3])<assert_stmt>dp1<eq>dp1<assert_stmt>dp2<eq>dp2<assert_stmt>dp1<ne>dp3<assert_stmt>dp1<ne>dp4<assert_stmt>dp2<ne>dp3<block_end><def_stmt>test_fingerprint_dict_with_files_order rule_runner:RuleRunner<arrow><none><block_start>f1,f2=(rule_runner.write_files({f:c})[0]<for>(f c) (("foo/bar.config" "blah blah blah") ("foo/bar.config" "meow meow meow") ))<line_sep>fp1=OptionsFingerprinter().fingerprint(dict_with_files_option {"properties":f"{f1},{f2}"})<line_sep>fp2=OptionsFingerprinter().fingerprint(dict_with_files_option {"properties":f"{f2},{f1}"})<assert_stmt>fp1<eq>fp2<block_end><def_stmt>test_fingerprint_dict_with_file_content_change rule_runner:RuleRunner<arrow><none><block_start>f1,f2=(rule_runner.write_files({f:c})[0]<for>(f c) (("foo/bar.config" "blah blah blah") ("foo/bar.config" "meow meow meow") ))<line_sep>fp1=OptionsFingerprinter().fingerprint(dict_with_files_option {"properties":f"{f1},{f2}"})<with_stmt>open(f1 "w")<as>f<block_start>f.write("123")<block_end>fp2=OptionsFingerprinter().fingerprint(dict_with_files_option {"properties":f"{f1},{f2}"})<assert_stmt>fp1<ne>fp2<block_end> |
"""
scaffoldgraph.analysis.enrichment
Module contains an implementation of Compound Set Enrichment from the papers:
- Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
- Mining for bioactive scaffolds with scaffold networks: Improved compound set enrichment from primary screening data.
"""<import_from_stmt>networkx set_node_attributes<import_from_stmt>scipy.stats ks_2samp binom_test<import_from_stmt>loguru logger<def_stmt>_btp scaffoldgraph activity_key alternative pd<block_start>"""CSE - binomial test (used in cse functions)."""<line_sep>result,active,total={} 0 0<for_stmt>m,a scaffoldgraph.get_molecule_nodes(activity_key)<block_start><if_stmt>int(a)<eq>1<block_start>active<augadd>1<block_end>total<augadd>1<block_end><if_stmt>pd<is><none><block_start>pd=active/total<block_end>logger.debug(f'(BTP) Total: {total}, Active: {active}, pd: {pd}')<for_stmt>scaffold scaffoldgraph.get_scaffold_nodes()<block_start>mols,acts=zip(*scaffoldgraph.get_molecules_for_scaffold(scaffold activity_key))<line_sep>N,K=len(mols) acts.count(1)<line_sep>pval=binom_test(K N pd alternative=alternative)<line_sep>logger.debug(f'(BTP) {scaffold}, {K}, {N}, {pval}')<line_sep>result[scaffold]={'pval':pval '_active':K '_total':N}<block_end><return>result<block_end><def_stmt>_ksp scaffoldgraph activity_key alternative<block_start>"""CSE - Kolmogorov-Smirnov test (used in cse functions)."""<line_sep>result,background={} []<for_stmt>_,activity scaffoldgraph.get_molecule_nodes(activity_key)<block_start>background.append(activity)<block_end><for_stmt>scaffold scaffoldgraph.get_scaffold_nodes()<block_start>mols,acts=zip(*scaffoldgraph.get_molecules_for_scaffold(scaffold activity_key))<line_sep>N=len(mols)<line_sep>dmax,pval=ks_2samp(acts background alternative 'auto')<line_sep>logger.debug(f'(KSP) {scaffold}, {N}, {dmax}, {pval}')<line_sep>result[scaffold]={'pval':pval 'dmax':dmax '_total':N}<block_end><return>result<block_end><def_stmt>bonferroni_correction scaffoldgraph crit<block_start>"""Returns bonferroni corrected significance level for each hierarchy.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
crit : float
The critical significance value to apply bonferroni correction at
each scaffold hierarchy.
Returns
-------
dict
A dictionary containing the corrected critical significance value
at each scaffold hierarchy {hierarchy: crit}.
"""<line_sep>hier=scaffoldgraph.get_hierarchy_sizes()<line_sep><return>{k:crit/v<for>k,v hier.items()}<block_end><def_stmt>calc_scaffold_enrichment scaffoldgraph activity mode='ks' alternative='greater' p=<none><block_start>"""
Calculate scaffold enrichment using the Kolmogorov-Smirnov or binomal test.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
activity : str
A scaffold node attribute key corresponding to an activity value.
If the test is binomial this value should be a binary attribute
(0 or 1 / True or False).
mode : {'ks', 'b'}, optional
A string specifying the statistical test to perform. 'ks' specifies a
Kolmogorov-Smirnov test and 'b' or 'binomial' specifies a binomial test.
The default is 'ks'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available:
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
The default is 'greater'.
p : float, None, optional
The hypothesized probability of success. 0 <= p <= 1. Used in binomial mode.
If not specified p is set automatically (number of active / total compounds).
The default is None.
Returns
-------
dict
A dict of dicts in the format {scaffold: {results}} where results is the set
of results returned by the statistical test and scaffold is a scaffold node
key corresponding to a scaffold in the ScaffoldGraph object.
See Also
--------
scaffoldgraph.analysis.enrichment.compound_set_enrichment
References
----------
.. [1] <NAME>., <NAME>., <NAME>., and <NAME>. (2011). Mining for bioactive scaffolds
with scaffold networks: Improved compound set enrichment from primary screening data.
Journal of Chemical Information and Modeling, 51(7), 1528–1538.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2010)
Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
Journal of Chemical Information and Modeling, 50(12), 2067-2078.
"""<if_stmt>mode<eq>'binomial'<or>mode<eq>'b'<block_start><return>_btp(scaffoldgraph activity alternative p)<block_end><elif_stmt>mode<eq>'ks'<or>mode<eq>'k'<block_start><return>_ksp(scaffoldgraph activity alternative)<block_end><else_stmt><block_start><raise>ValueError(f'scaffold enrichment mode: {mode}, not implemented')<block_end><block_end><def_stmt>compound_set_enrichment scaffoldgraph activity mode='ks' alternative='greater' crit=0.01 p=<none><block_start>"""
Perform compound set enrichment (CSE), calculating scaffolds enriched for bioactivity.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
activity : str
A scaffold node attribute key corresponding to an activity value.
If the test is binomial this value should be a binary attribute
(0 or 1 / True or False).
mode : {'ks', 'b'}, optional
A string specifying the statistical test to perform. 'ks' specifies a
Kolmogorov-Smirnov test and 'b' or 'binomial' specifies a binomial test.
The default is 'ks'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available:
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
The default is 'greater'.
crit : float, optional
The critical significance level. The default is 0.01
p : float, None, optional
The hypothesized probability of success. 0 <= p <= 1. Used in binomial mode.
If not specified p is set automatically (number of active / total compounds).
The default is None.
Returns
-------
A tuple of 'enriched' scaffold classes in the format: (scaffold, {data}) where data
is the corresponding node attributes for the returned scaffold.
Notes
-----
P-values are added as node attributes with the key 'pval'.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., and <NAME>. (2011). Mining for bioactive scaffolds
with scaffold networks: Improved compound set enrichment from primary screening data.
Journal of Chemical Information and Modeling, 51(7), 1528–1538.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2010)
Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
Journal of Chemical Information and Modeling, 50(12), 2067-2078.
"""<line_sep>set_node_attributes(scaffoldgraph calc_scaffold_enrichment(scaffoldgraph activity mode alternative p))<line_sep>bonferroni=bonferroni_correction(scaffoldgraph crit)<line_sep>result=[]<for_stmt>scaffold,data scaffoldgraph.get_scaffold_nodes(<true>)<block_start><if_stmt>data['pval']<l>bonferroni[data['hierarchy']]<block_start>result.append((scaffold data))<block_end><block_end><return>tuple(sorted(result key=<lambda>x:x[1]['pval']))<block_end> |
# Copyright (c) 2015 IBM Corporation and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>IPython.core.magic Magics magics_class line_magic cell_magic line_cell_magic<import_stmt>pandas<as>pd<import_stmt>brunel.brunel_main<as>brunel<line_sep>ipy=get_ipython()<line_sep>@magics_class<class_stmt>BrunelMagics(Magics)<block_start>@line_cell_magic<def_stmt>brunel self line cell=<none><block_start>"Magic that works both as %brunel and as %%brunel"<line_sep>datas=self.find_dataframes()<line_sep># print("Found dataframes", list(datas.keys()))
<if_stmt>cell<is><not><none><block_start>line=line+' '+cell.replace('\n' ' ')<block_end># print ("Command =", line)
data=<none><line_sep>height=400<line_sep>width=500<line_sep>output='d3'<line_sep>online_js=<false><line_sep>parts=line.split('::')<line_sep>action=parts[0].strip()<line_sep>datasets_in_brunel=brunel.get_dataset_names(action)<line_sep>self.cache_data(datasets_in_brunel datas)<if_stmt>len(parts)<g>2<block_start><raise>ValueError("Only one ':' allowed in brunel magic. Format is 'ACTION : key=value, ...'")<block_end><if_stmt>len(parts)<g>1<block_start>extras=parts[1].strip()<line_sep>dataName=self.find_term('data' extras)<if_stmt>dataName<is><not><none><block_start><try_stmt><block_start>data=datas[dataName]<block_end><except_stmt><block_start><raise>ValueError("Could not find pandas DataFrame named '"+dataName+"'")<block_end><block_end>width=self.find_term('width' extras width)<line_sep>height=self.find_term('height' extras height)<line_sep>online_js=self.find_term('online_js' extras online_js)<block_end><if_stmt>data<is><none><and>len(datasets_in_brunel)<eq>0<block_start>data=self.best_match(self.get_vars(action) list(datas.values()))<block_end><return>brunel.display(action data width height online_js)<block_end><def_stmt>cache_data self datasets_in_brunel dataframes<block_start><for_stmt>data_name datasets_in_brunel<block_start><try_stmt><block_start>data=dataframes[data_name]<line_sep>brunel.cacheData(data_name brunel.to_csv(data))<block_end><except_stmt><block_start><pass><block_end><block_end><block_end><def_stmt>find_term self key string default=<none><block_start><for_stmt>expr string.split(',')<block_start>terms=expr.split('=')<if_stmt>len(terms)<ne>2<block_start><raise>ValueError("Bad format for key=value pair: "+expr)<block_end><if_stmt>key<eq>terms[0].strip().lower()<block_start><return>terms[1].strip()<block_end><block_end><return>default<block_end><def_stmt>find_dataframes self<block_start>result={}<for_stmt>name list(self.shell.user_ns.keys())<block_start>v=self.shell.user_ns[name]<if_stmt>name[0]<ne>'_'<and>isinstance(v pd.DataFrame)<block_start>result[name]=v<block_end><block_end><return>result<block_end><def_stmt>get_vars self line<block_start>"Search for the internal bits of 'x(a,b)' and return as ['a','b']"<line_sep>result=[]<for_stmt>part line.split('(')<block_start>p=part.find(')')<if_stmt>p<g>0<block_start>inner=part[:p].split(',')<for_stmt>term inner<block_start>result.append(term.strip())<block_end><block_end><block_end><return>result<block_end><def_stmt>best_match self variables datas# print("Searching for", variables, "in", len(datas), "dataframes")
<block_start>all=[[self.match(variables v.columns.values) v]<for>v datas]<line_sep>all.sort(key=<lambda>x:x[0])<line_sep><return>all[0][1]<block_end><def_stmt>match self names1 names2<block_start>n=0<for_stmt>i names1<block_start><for_stmt>j names2<block_start><if_stmt>str(i).lower()<eq>str(j).lower()<block_start>n<augadd>1<block_end><block_end><block_end><return>-n<block_end><block_end># Register with IPython
ipy.register_magics(BrunelMagics)<line_sep> |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>unittest<import_from_stmt>magma.enodebd.tr069.models DeviceIdStruct<import_from_stmt>spyne ComplexModelBase<class_stmt>DeviceIdStructTests(unittest.TestCase)<block_start><def_stmt>test_as_dict_memory_leak self<block_start>"""
Test to ensure as_dict() doesn't leak model instances
"""<line_sep>thing=DeviceIdStruct(Manufacturer='abc' OUI='def' ProductClass='ghi' SerialNumber='jkl' )<line_sep>res=thing.as_dict()<line_sep>self.assertEqual({'Manufacturer':'abc' 'OUI':'def' 'ProductClass':'ghi' 'SerialNumber':'jkl' } res )<line_sep># inspect the spyne.util.memoize object that wraps the staticmethod
self.assertEqual(1 len(ComplexModelBase.get_flat_type_info.memo))<line_sep># should produce a different result and not grow the size of memo
thing.OUI='aaaa'<line_sep>res=thing.as_dict()<line_sep>self.assertEqual({'Manufacturer':'abc' 'OUI':'aaaa' 'ProductClass':'ghi' 'SerialNumber':'jkl' } res )<line_sep>self.assertEqual(1 len(ComplexModelBase.get_flat_type_info.memo))<line_sep># use a different object this time. Again should not grow memo
thing=DeviceIdStruct(Manufacturer='abc' OUI='def' ProductClass='ghi' SerialNumber='jkl' )<line_sep>res=thing.as_dict()<line_sep>self.assertEqual({'Manufacturer':'abc' 'OUI':'def' 'ProductClass':'ghi' 'SerialNumber':'jkl' } res )<line_sep>self.assertEqual(1 len(ComplexModelBase.get_flat_type_info.memo))<block_end><block_end> |
<import_from_stmt>standard HDB NatureStd<if_stmt>__name__<eq>"__main__"<block_start>hb=HDB('hbba')<line_sep>db=HDB('dbba')<line_sep>data=db.search('政务云工程评价指标体系及方法')<line_sep>print(data)<line_sep># first_record = data["records"][0]
# name = f'{first_record["code"]}({first_record["chName"]}'
# db.download(pk=first_record['pk'], name=name)
# std = NatureStd()
# std.search("")
# std.download("http://www.nrsis.org.cn/portal/stdDetail/211166", "乡(镇)土地利用总体规划制图规范.pdf") # 行标
<block_end> |
d={'key1':1 'key2':2 'key3':3}<for_stmt>k d<block_start>print(k)<block_end># key1
# key2
# key3
<for_stmt>k d.keys()<block_start>print(k)<block_end># key1
# key2
# key3
keys=d.keys()<line_sep>print(keys)<line_sep>print(type(keys))<line_sep># dict_keys(['key1', 'key2', 'key3'])
# <class 'dict_keys'>
k_list=list(d.keys())<line_sep>print(k_list)<line_sep>print(type(k_list))<line_sep># ['key1', 'key2', 'key3']
# <class 'list'>
<for_stmt>v d.values()<block_start>print(v)<block_end># 1
# 2
# 3
values=d.values()<line_sep>print(values)<line_sep>print(type(values))<line_sep># dict_values([1, 2, 3])
# <class 'dict_values'>
v_list=list(d.values())<line_sep>print(v_list)<line_sep>print(type(v_list))<line_sep># [1, 2, 3]
# <class 'list'>
<for_stmt>k,v d.items()<block_start>print(k v)<block_end># key1 1
# key2 2
# key3 3
<for_stmt>t d.items()<block_start>print(t)<line_sep>print(type(t))<line_sep>print(t[0])<line_sep>print(t[1])<line_sep>print('---')<block_end># ('key1', 1)
# <class 'tuple'>
# key1
# 1
# ---
# ('key2', 2)
# <class 'tuple'>
# key2
# 2
# ---
# ('key3', 3)
# <class 'tuple'>
# key3
# 3
# ---
items=d.items()<line_sep>print(items)<line_sep>print(type(items))<line_sep># dict_items([('key1', 1), ('key2', 2), ('key3', 3)])
# <class 'dict_items'>
i_list=list(d.items())<line_sep>print(i_list)<line_sep>print(type(i_list))<line_sep># [('key1', 1), ('key2', 2), ('key3', 3)]
# <class 'list'>
print(i_list[0])<line_sep>print(type(i_list[0]))<line_sep># ('key1', 1)
# <class 'tuple'>
|
<import_stmt>django<line_sep>DATABASES={'default':{'ENGINE':'django.db.backends.sqlite3' 'NAME':':memory:' }}<line_sep>INSTALLED_APPS=['flows' 'flows.statestore.tests' 'django_nose']<line_sep>SECRET_KEY='flow_tests'<if_stmt>django.VERSION<l>(1 6)<block_start>TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner'<block_end>TEST_RUNNER='django_nose.NoseTestSuiteRunner'<line_sep>MIDDLEWARE_CLASSES=[]<line_sep>ROOT_URLCONF=''<if_stmt>django.VERSION<l>(1 7)<block_start><try_stmt><block_start>__import__('south')<block_end><except_stmt>ImportError<block_start><pass><block_end><else_stmt><block_start>INSTALLED_APPS.append('south')<block_end><block_end> |
<if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<import_stmt>pickle<import_stmt>os<import_from_stmt>tqdm tqdm<import_from_stmt>build_scene *<import_from_stmt>block_data COLOR_BID_MAP<line_sep>BLOCK_DATA=pickle.load(open("/private/home/aszlam/minecraft_specs/block_images/block_data" "rb"))<line_sep>allowed_blocktypes=[]<line_sep>count=0<for_stmt>c,l COLOR_BID_MAP.items()<block_start><for_stmt>idm l<block_start>allowed_blocktypes.append(BLOCK_DATA["bid_to_name"][idm])<line_sep>count<augadd>1<block_end><block_end>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--target" default="/checkpoint/aszlam/minecraft/inverse_model/flat_ads/")<line_sep>parser.add_argument("--N" type=int default=10000000)<line_sep># parser.add_argument("--num_per_chunk", type=int, default=10000000)
args=parser.parse_args()<line_sep>template_attributes={"count":range(1 5)}<line_sep>template_attributes["step"]=range(1 10)<line_sep>template_attributes["non_shape_names"]=["triangle" "circle" "disk" "rectangle"]<line_sep>template_attributes["mob_names"]=["pig" "sheep" "cow" "chicken"]<line_sep>template_attributes["allowed_blocktypes"]=allowed_blocktypes<line_sep>template_attributes["distribution"]={"MOVE":1.0 "BUILD":1.0 "DESTROY":1.0 "DIG":0.8 "COPY":0.8 "FILL":0.8 "SPAWN":0.1 "DANCE":0.8 }<line_sep>scenes=[]<for_stmt>i tqdm(range(args.N))<block_start>S=build_scene(template_attributes sl=16 flat=<true>)<line_sep>scenes.append(S)<block_end>f=open(os.path.join(args.target "flat_scenes_dump.pk") "wb")<line_sep>pickle.dump(scenes f)<line_sep>f.close()<block_end> |
<import_from_stmt>stix_shifter_utils.modules.base.stix_transmission.base_delete_connector BaseDeleteConnector<class_stmt>DeleteConnector(BaseDeleteConnector)<block_start><def_stmt>__init__ self api_client<block_start>self.api_client=api_client<block_end><def_stmt>delete_query_connection self search_id<block_start><return>{"success":<true>}<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>tests.test_crud.main app<line_sep>@pytest.fixture(scope='session' autouse=<true>)<def_stmt>startup <block_start><import_stmt>asyncio<line_sep># asyncio.run(app.router.startup())
loop=asyncio.get_event_loop()<line_sep>loop.run_until_complete(app.router.startup())<block_end> |
"""
This module contains the general settings used across modules.
"""<line_sep>FPS=60<line_sep>WINDOW_WIDTH=1100<line_sep>WINDOW_HEIGHT=600<line_sep>TIME_MULTIPLIER=1.0<line_sep> |
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_future_stmt> division<try_stmt><block_start>basestring<block_end><except_stmt>NameError<block_start>basestring=str<block_end><import_stmt>os<import_stmt>sys<import_stmt>ast<import_from_stmt>compas_rhino.forms TextForm<import_from_stmt>compas_rhino.forms ImageForm<import_stmt>System<import_stmt>rhinoscriptsyntax<as>rs<import_stmt>Rhino<import_stmt>clr<line_sep>clr.AddReference('Rhino.UI')<import_stmt>Rhino.UI# noqa: E402
<import_from_stmt>Rhino.UI.Dialogs ShowMessageBox# noqa: E402
<try_stmt><block_start><import_from_stmt>compas_rhino.forms PropertyListForm<block_end><except_stmt>ImportError<block_start><import_from_stmt>Rhino.UI.Dialogs ShowPropertyListBox<block_end>__all__=['wait' 'get_tolerance' 'toggle_toolbargroup' 'pick_point' 'browse_for_folder' 'browse_for_file' 'print_display_on' 'display_message' 'display_text' 'display_image' 'display_html' 'update_settings' 'update_named_values' 'screenshot_current_view' 'select_folder' 'select_file' 'unload_modules' ]<line_sep># ==============================================================================
# Truly miscellaneous :)
# ==============================================================================
<def_stmt>screenshot_current_view path width=1920 height=1080 scale=1 draw_grid=<false> draw_world_axes=<false> draw_cplane_axes=<false> background=<false><block_start>"""Take a screenshot of the current view.
Parameters
----------
path : str
The filepath for saving the screenshot.
Other Parameters
----------------
width : int, optional
height : int, optional
scale : float, optional
draw_grid : bool, optional
draw_world_axes : bool, optional
draw_cplane_axes : bool, optional
background : bool, optional
Returns
-------
bool
True if the command was successful.
False otherwise.
"""<line_sep>properties=[draw_grid draw_world_axes draw_cplane_axes background]<line_sep>properties=["Yes"<if>item<else>"No"<for>item properties]<line_sep>scale=max(1 scale)# the rhino command requires a scale > 1
rs.EnableRedraw(<true>)<line_sep>rs.Sleep(0)<line_sep>result=rs.Command("-_ViewCaptureToFile \""+os.path.abspath(path)+"\""<concat>" Width="+str(width)+" Height="+str(height)+" Scale="+str(scale)+" DrawGrid="+properties[0]+" DrawWorldAxes="+properties[1]+" DrawCPlaneAxes="+properties[2]+" TransparentBackground="+properties[3]+" _enter" <false>)<line_sep>rs.EnableRedraw(<false>)<line_sep><return>result<block_end><def_stmt>wait <block_start><return>Rhino.RhinoApp.Wait()<block_end><def_stmt>get_tolerance <block_start>"""Get the absolute tolerance.
Returns
-------
float
The tolerance.
"""<line_sep><return>rs.UnitAbsoluteTolerance()<block_end><def_stmt>toggle_toolbargroup rui group<block_start><if_stmt><not>os.path.exists(rui)<or><not>os.path.isfile(rui)<block_start><return><block_end>collection=rs.IsToolbarCollection(rui)<if_stmt><not>collection<block_start>collection=rs.OpenToolbarCollection(rui)<if_stmt>rs.IsToolbar(collection group <true>)<block_start>rs.ShowToolbar(collection group)<block_end><block_end><else_stmt><block_start><if_stmt>rs.IsToolbar(collection group <true>)<block_start><if_stmt>rs.IsToolbarVisible(collection group)<block_start>rs.HideToolbar(collection group)<block_end><else_stmt><block_start>rs.ShowToolbar(collection group)<block_end><block_end><block_end><block_end><def_stmt>pick_point message='Pick a point.'<block_start>point=rs.GetPoint(message)<if_stmt>point<block_start><return>list(point)<block_end><return><none><block_end># ==============================================================================
# File system
# ==============================================================================
<def_stmt>browse_for_folder message=<none> default=<none><block_start><return>rs.BrowseForFolder(folder=default message=message title='compas')<block_end>select_folder=browse_for_folder<def_stmt>browse_for_file title=<none> folder=<none> filter=<none><block_start><if_stmt>filter<eq>'json'<block_start>filter='JSON files (*.json)|*.json||'<block_end><elif_stmt>filter<eq>'obj'<block_start>filter='OBJ files (*.obj)|*.obj||'<block_end><elif_stmt>filter<eq>'fofin'<block_start>filter='FOFIN session files (*.fofin)|*.fofin||'<block_end><else_stmt><block_start><pass><block_end><return>rs.OpenFileName(title filter=filter folder=folder)<block_end>select_file=browse_for_file<line_sep># ==============================================================================
# Display
# ==============================================================================
<def_stmt>print_display_on on=<true><block_start><if_stmt>on<block_start>rs.Command('_PrintDisplay State On Color Display Thickness 1 _Enter')<block_end><else_stmt><block_start>rs.Command('_PrintDisplay State Off _Enter')<block_end><block_end><def_stmt>display_message message<block_start><return>ShowMessageBox(message 'Message')<block_end><def_stmt>display_text text title='Text' width=800 height=600<block_start><if_stmt>isinstance(text (list tuple))<block_start>text='{0}'.format(System.Environment.NewLine).join(text)<block_end>form=TextForm(text title width height)<line_sep><return>form.show()<block_end><def_stmt>display_image image title='Image' width=800 height=600<block_start>form=ImageForm(image title width height)<line_sep><return>form.show()<block_end><def_stmt>display_html <block_start><raise>NotImplementedError<block_end># ==============================================================================
# Settings and attributes
# ==============================================================================
<def_stmt>update_named_values names values message='' title='Update named values' evaluate=<false><block_start><try_stmt><block_start>dialog=PropertyListForm(names values)<block_end><except_stmt>Exception<block_start>values=ShowPropertyListBox(message title names values)<block_end><else_stmt><block_start><if_stmt>dialog.ShowModal(Rhino.UI.RhinoEtoApp.MainWindow)<block_start>values=dialog.values<block_end><else_stmt><block_start>values=<none><block_end><block_end><if_stmt>evaluate<block_start><if_stmt>values<block_start>values=list(values)<for_stmt>i range(len(values))<block_start>value=values[i]<try_stmt><block_start>value=ast.literal_eval(value)<block_end><except_stmt>(TypeError ValueError SyntaxError)<block_start><pass><block_end>values[i]=value<block_end><block_end><block_end><return>values<block_end><def_stmt>update_settings settings message='' title='Update settings'<block_start>names=sorted(settings.keys())<line_sep>values=[str(settings[name])<for>name names]<line_sep>values=update_named_values(names values message=message title=title)<if_stmt>values<block_start>values=list(values)<for_stmt>name,value zip(names values)<block_start><try_stmt><block_start>settings[name]=ast.literal_eval(value)<block_end><except_stmt>(TypeError ValueError SyntaxError)<block_start>settings[name]=value<block_end><block_end><return><true><block_end><return><false><block_end><def_stmt>unload_modules top_level_module_name<block_start>"""Unloads all modules named starting with the specified string.
This function eases the development workflow when editing a library that is
used from Rhino/Grasshopper.
Parameters
----------
top_level_module_name : :obj:`str`
Name of the top-level module to unload.
Returns
-------
list
List of unloaded module names.
"""<line_sep>modules=filter(<lambda>m:m.startswith(top_level_module_name) sys.modules)<for_stmt>module modules<block_start>sys.modules.pop(module)<block_end><return>modules<block_end> |
<import_stmt>unittest<import_from_stmt>unittest TestCase<import_from_stmt>e2cnn.nn *<import_from_stmt>e2cnn.gspaces *<import_stmt>random<class_stmt>TestNonLinearitiesFlipRotations(TestCase)<block_start><def_stmt>test_dihedral_norm_relu self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>r=FieldType(g list(g.representations.values())<times>4)<line_sep>nnl=NormNonLinearity(r function='n_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_norm_sigmoid self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>r=FieldType(g list(g.representations.values())<times>4)<line_sep>nnl=NormNonLinearity(r function='n_sigmoid')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_pointwise_relu self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'pointwise'<in>r.supported_nonlinearities]<line_sep>r=FieldType(g reprs)<line_sep>nnl=PointwiseNonLinearity(r function='p_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_pointwise_sigmoid self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'pointwise'<in>r.supported_nonlinearities]<line_sep>r=FieldType(g reprs)<line_sep>nnl=PointwiseNonLinearity(r function='p_sigmoid')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated_one_input_shuffled_gated self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>ngates=len(reprs)<line_sep>reprs<augadd>[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>r=FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated_one_input_sorted_gated self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>r=FieldType(g reprs).sorted()<line_sep>ngates=len(r)<line_sep>reprs=[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>r=r+FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated_one_input_all_shuffled self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>2<line_sep>ngates=len(reprs)<line_sep>reprs<augadd>[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>t=list(zip(reprs gates))<line_sep>random.shuffle(t)<line_sep>reprs,gates=zip(*t)<line_sep>r=FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated_two_inputs_shuffled_gated self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>gated=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>ngates=len(gated)<line_sep>gates=[g.trivial_repr]<times>ngates<line_sep>gates=FieldType(g gates)<line_sep>gated=FieldType(g gated)<line_sep>nnl=GatedNonLinearity2((gates gated))<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated_two_inputs_sorted_gated self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>gated=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>2<line_sep>ngates=len(gated)<line_sep>gates=[g.trivial_repr]<times>ngates<line_sep>gates=FieldType(g gates)<line_sep>gated=FieldType(g gated).sorted()<line_sep>nnl=GatedNonLinearity2((gates gated))<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_concat_relu self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<line_sep>reprs=[r<for>r g.representations.values()<if>'concatenated'<in>r.supported_nonlinearities]<for_stmt>rep reprs<block_start>r=FieldType(g [rep])<line_sep>nnl=ConcatenatedNonLinearity(r function='c_relu')<line_sep>nnl.check_equivariance()<block_end><block_end><def_stmt>test_dihedral_induced_norm_relu self<block_start>N=9<line_sep>g=FlipRot2dOnR2(N)<line_sep>sg_id=(<none> N)<line_sep>so2,_,_=g.fibergroup.subgroup(sg_id)<line_sep>r=FieldType(g [g.induced_repr(sg_id so2.irrep(k))<for>k range(1 int(N<floordiv>2))]<times>4).sorted()<line_sep>nnl=InducedNormNonLinearity(r function='n_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_induced_norm_relu self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>sg_id=(<none> -1)<line_sep>so2,_,_=g.fibergroup.subgroup(sg_id)<line_sep>r=FieldType(g [g.induced_repr(sg_id so2.irrep(k))<for>k range(1 7)]<times>4).sorted()<line_sep>nnl=InducedNormNonLinearity(r function='n_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_induced_gated self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>sg_id=(<none> -1)<line_sep>so2,_,_=g.fibergroup.subgroup(sg_id)<line_sep>reprs=[g.induced_repr(sg_id so2.irrep(k))<for>k range(1 3)]<times>5<line_sep>ngates=len(reprs)<line_sep>reprs<augadd>[g.induced_repr(sg_id so2.trivial_representation)]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>r=FieldType(g reprs)<line_sep>nnl=InducedGatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_norm_relu self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>r=FieldType(g list(g.representations.values())<times>4)<line_sep>nnl=NormNonLinearity(r function='n_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_norm_sigmoid self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>r=FieldType(g list(g.representations.values())<times>4)<line_sep>nnl=NormNonLinearity(r function='n_sigmoid')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_pointwise_relu self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>reprs=[r<for>r g.representations.values()<if>'pointwise'<in>r.supported_nonlinearities]<line_sep>r=FieldType(g reprs)<line_sep>nnl=PointwiseNonLinearity(r function='p_relu')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_pointwise_sigmoid self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>reprs=[r<for>r g.representations.values()<if>'pointwise'<in>r.supported_nonlinearities]<line_sep>r=FieldType(g reprs)<line_sep>nnl=PointwiseNonLinearity(r function='p_sigmoid')<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_gated_one_input_shuffled_gated self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>ngates=len(reprs)<line_sep>reprs<augadd>[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>r=FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_gated_one_input_sorted_gated self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>2<line_sep>r=FieldType(g reprs).sorted()<line_sep>ngates=len(r)<line_sep>reprs=[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>r=r+FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_gated_one_input_all_shuffled self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>reprs=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>ngates=len(reprs)<line_sep>reprs<augadd>[g.trivial_repr]<times>ngates<line_sep>gates=['gated']<times>ngates+['gate']<times>ngates<line_sep>t=list(zip(reprs gates))<line_sep>random.shuffle(t)<line_sep>reprs,gates=zip(*t)<line_sep>r=FieldType(g reprs)<line_sep>nnl=GatedNonLinearity1(r gates=gates)<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_gated_two_inputs_shuffled_gated self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>gated=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>3<line_sep>ngates=len(gated)<line_sep>gates=[g.trivial_repr]<times>ngates<line_sep>gates=FieldType(g gates)<line_sep>gated=FieldType(g gated)<line_sep>nnl=GatedNonLinearity2((gates gated))<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_o2_gated_two_inputs_sorted_gated self<block_start>g=FlipRot2dOnR2(-1 10)<line_sep>gated=[r<for>r g.representations.values()<if>'gated'<in>r.supported_nonlinearities]<times>2<line_sep>ngates=len(gated)<line_sep>gates=[g.trivial_repr]<times>ngates<line_sep>gated=FieldType(g gated).sorted()<line_sep>gates=FieldType(g gates)<line_sep>nnl=GatedNonLinearity2((gates gated))<line_sep>nnl.check_equivariance()<block_end><def_stmt>test_dihedral_gated1_error self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<for_stmt>r g.representations.values()<block_start><if_stmt>'gated'<not><in>r.supported_nonlinearities<block_start>r1=FieldType(g [r g.trivial_repr])<line_sep>gates=['gated' 'gate']<line_sep>self.assertRaises(AssertionError GatedNonLinearity1 r1 gates=gates)<block_end><block_end><for_stmt>r g.representations.values()<block_start><if_stmt>'gate'<not><in>r.supported_nonlinearities<block_start>r1=FieldType(g [g.trivial_repr r])<line_sep>gates=['gated' 'gate']<line_sep>self.assertRaises(AssertionError GatedNonLinearity1 r1 gates=gates)<block_end><block_end><block_end><def_stmt>test_dihedral_gated2_error self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<for_stmt>r g.representations.values()<block_start><if_stmt>'gated'<not><in>r.supported_nonlinearities<block_start>gates=FieldType(g [g.trivial_repr])<line_sep>gated=FieldType(g [r])<line_sep>self.assertRaises(AssertionError GatedNonLinearity2 (gates gated))<block_end><block_end><for_stmt>r g.representations.values()<block_start><if_stmt>'gate'<not><in>r.supported_nonlinearities<block_start>gates=FieldType(g [r])<line_sep>gated=FieldType(g [g.trivial_repr])<line_sep>self.assertRaises(AssertionError GatedNonLinearity2 (gates gated))<block_end><block_end><block_end><def_stmt>test_dihedral_norm_error self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<for_stmt>r g.representations.values()<block_start><if_stmt>'norm'<not><in>r.supported_nonlinearities<block_start>r1=FieldType(g [r])<line_sep>self.assertRaises(AssertionError NormNonLinearity r1)<block_end><block_end><block_end><def_stmt>test_dihedral_pointwise_error self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<for_stmt>r g.representations.values()<block_start><if_stmt>'pointwise'<not><in>r.supported_nonlinearities<block_start>r1=FieldType(g [r])<line_sep>self.assertRaises(AssertionError PointwiseNonLinearity r1)<block_end><block_end><block_end><def_stmt>test_dihedral_concat_error self<block_start>N=8<line_sep>g=FlipRot2dOnR2(N)<for_stmt>r g.representations.values()<block_start><if_stmt>'concatenated'<not><in>r.supported_nonlinearities<block_start>r1=FieldType(g [r])<line_sep>self.assertRaises(AssertionError ConcatenatedNonLinearity r1)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import unicode_literals print_function<import_from_stmt>datetime datetime<import_stmt>os<import_from_stmt>os.path dirname join<import_stmt>sys<import_stmt>time<import_stmt>unittest<import_stmt>uuid<import_stmt>logging<line_sep>LOGGING_FORMAT='\n%(levelname)s %(asctime)s %(message)s'<line_sep>logging.basicConfig(level=logging.INFO format=LOGGING_FORMAT)<line_sep>logger=logging.getLogger(__name__)<import_stmt>six<import_stmt>django<import_from_stmt>requests.exceptions ConnectionError<import_from_stmt>qiniu BucketManager<import_from_stmt>.utils retry<line_sep># Add repo/demo_site to sys.path
DEMO_SITE_DIR=join(dirname(dirname(__file__)) 'demo_site')<line_sep>sys.path.append(DEMO_SITE_DIR)<line_sep>os.environ.setdefault("DJANGO_SETTINGS_MODULE" "demo_site.settings")<try_stmt><block_start>django.setup()<block_end><except_stmt>AttributeError# Setup isn't necessary in Django < 1.7
<block_start><pass><block_end><import_from_stmt>django.conf settings<import_from_stmt>qiniustorage.backends QiniuPrivateStorage QiniuFile get_qiniu_config<import_from_stmt>qiniustorage.utils QiniuError<line_sep>USING_TRAVIS=os.environ.get('USING_TRAVIS' <none>)<is><none><line_sep>UNIQUE_PATH=str(uuid.uuid4())<class_stmt>QiniuStorageTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.storage=QiniuPrivateStorage(bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME') bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN') )<block_end><def_stmt>test_read_file self<block_start>ASSET_FILE_NAMES=[u'Read.txt' u'读.txt']<for_stmt>assert_file_name ASSET_FILE_NAMES<block_start>REMOTE_PATH=join(UNIQUE_PATH assert_file_name)<line_sep>test_file=six.BytesIO()<line_sep>test_file.write(u"你好世界 Hello World".encode('utf-8'))<line_sep>test_file.seek(0)<line_sep>self.storage.save(REMOTE_PATH test_file)<line_sep>fil=self.storage.open(REMOTE_PATH 'r')<assert_stmt>fil._is_read<eq><false><line_sep>content=fil.read()<assert_stmt>content.startswith(u"你好")<assert_stmt>fil._is_read<eq><true><line_sep># Test open mode
fil=self.storage.open(REMOTE_PATH 'rb')<line_sep>bin_content=fil.read()<assert_stmt>bin_content.startswith(u"你好".encode('utf-8'))<block_end><block_end>@classmethod<def_stmt>teardown_class cls<block_start>"""Delete all files in the test bucket.
"""<line_sep>storage=QiniuPrivateStorage(bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME') bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN') )<line_sep>auth=storage.auth<line_sep>bucket=BucketManager(auth)<while_stmt><true><block_start>ret,eof,info=bucket.list(storage.bucket_name limit=100)<if_stmt>ret<is><none><block_start>print(info)<line_sep><break><block_end><for_stmt>item ret['items']<block_start>name=item['key']<if_stmt>six.PY2<block_start>name=name.encode('utf-8')<block_end>ret,info=bucket.delete(storage.bucket_name name)<if_stmt>ret<is><none><block_start>print(info)<block_end><block_end><if_stmt>eof<block_start><break><block_end><block_end><block_end><block_end> |
"""!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""<import_stmt>math<import_stmt>random<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pyclustering.core.som_wrapper<as>wrapper<import_from_stmt>pyclustering.core.wrapper ccore_library<import_from_stmt>pyclustering.utils euclidean_distance_square<import_from_stmt>pyclustering.utils.dimension dimension_info<import_from_stmt>enum IntEnum<class_stmt>type_conn(IntEnum)<block_start>"""!
@brief Enumeration of connection types for SOM.
@see som
"""<line_sep>## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four=0<line_sep>## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight=1<line_sep>## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb=2<line_sep>## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor=3<block_end><class_stmt>type_init(IntEnum)<block_start>"""!
@brief Enumeration of initialization types for SOM.
@see som
"""<line_sep>## Weights are randomly distributed using Gaussian distribution (0, 1).
random=0<line_sep>## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid=1<line_sep>## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface=2<line_sep>## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid=3<block_end><class_stmt>som_parameters<block_start>"""!
@brief Represents SOM parameters.
"""<def_stmt>__init__ self<block_start>"""!
@brief Creates SOM parameters.
"""<line_sep>## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type=type_init.uniform_grid<line_sep>## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius=<none><line_sep>## Rate of learning.
self.init_learn_rate=0.1<line_sep>## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold=0.001<line_sep>## Seed for random state (by default is `None`, current system time is used).
self.random_state=<none><block_end><block_end><class_stmt>som<block_start>"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""<line_sep>@property<def_stmt>size self<block_start>"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._size=wrapper.som_get_size(self.__ccore_som_pointer)<block_end><return>self._size<block_end>@property<def_stmt>weights self<block_start>"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._weights=wrapper.som_get_weights(self.__ccore_som_pointer)<block_end><return>self._weights<block_end>@property<def_stmt>awards self<block_start>"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._award=wrapper.som_get_awards(self.__ccore_som_pointer)<block_end><return>self._award<block_end>@property<def_stmt>capture_objects self<block_start>"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._capture_objects=wrapper.som_get_capture_objects(self.__ccore_som_pointer)<block_end><return>self._capture_objects<block_end><def_stmt>__init__ self rows cols conn_type=type_conn.grid_eight parameters=<none> ccore=<true><block_start>"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""<line_sep># some of these parameters are required despite core implementation, for example, for network visualization.
self._cols=cols<line_sep>self._rows=rows<line_sep>self._size=cols<times>rows<line_sep>self._conn_type=conn_type<line_sep>self._data=<none><line_sep>self._neighbors=<none><line_sep>self._local_radius=0.0<line_sep>self._learn_rate=0.0<line_sep>self.__ccore_som_pointer=<none><line_sep>self._params=parameters<or>som_parameters()<if_stmt>self._params.init_radius<is><none><block_start>self._params.init_radius=self.__initialize_initial_radius(rows cols)<block_end><if_stmt>(ccore<is><true>)<and>ccore_library.workable()<block_start>self.__ccore_som_pointer=wrapper.som_create(rows cols conn_type self._params)<block_end><else_stmt># location
<block_start>self._location=self.__initialize_locations(rows cols)<line_sep># default weights
self._weights=[[0.0]]<times>self._size<line_sep># awards
self._award=[0]<times>self._size<line_sep># captured objects
self._capture_objects=[[]<for>i range(self._size)]<line_sep># distances - calculate and store them only during training
self._sqrt_distances=<none><line_sep># connections
<if_stmt>conn_type<ne>type_conn.func_neighbor<block_start>self._create_connections(conn_type)<block_end><block_end><block_end><def_stmt>__del__ self<block_start>"""!
@brief Destructor of the self-organized feature map.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>wrapper.som_destroy(self.__ccore_som_pointer)<block_end><block_end><def_stmt>__len__ self<block_start>"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""<line_sep><return>self._size<block_end><def_stmt>__getstate__ self<block_start>"""
@brief Returns state of SOM network that can be used to store network.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self.__download_dump_from_ccore()<line_sep><return>self.__get_dump_from_python(<true>)<block_end><return>self.__get_dump_from_python(<false>)<block_end><def_stmt>__setstate__ self som_state<block_start>"""
@brief Set state of SOM network that can be used to load network.
"""<if_stmt>som_state['ccore']<is><true><and>ccore_library.workable()<block_start>self.__upload_dump_to_ccore(som_state['state'])<block_end><else_stmt><block_start>self.__upload_dump_to_python(som_state['state'])<block_end><block_end><def_stmt>__initialize_initial_radius self rows cols<block_start>"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""<if_stmt>(cols+rows)/4.0<g>1.0<block_start><return>2.0<block_end><elif_stmt>(cols<g>1)<and>(rows<g>1)<block_start><return>1.5<block_end><else_stmt><block_start><return>1.0<block_end><block_end><def_stmt>__initialize_locations self rows cols<block_start>"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""<line_sep>location=list()<for_stmt>i range(rows)<block_start><for_stmt>j range(cols)<block_start>location.append([float(i) float(j)])<block_end><block_end><return>location<block_end><def_stmt>__initialize_distances self size location<block_start>"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""<line_sep>sqrt_distances=[[[]<for>i range(size)]<for>j range(size)]<for_stmt>i range(size)<block_start><for_stmt>j range(i size 1)<block_start>dist=euclidean_distance_square(location[i] location[j])<line_sep>sqrt_distances[i][j]=dist<line_sep>sqrt_distances[j][i]=dist<block_end><block_end><return>sqrt_distances<block_end><def_stmt>_create_initial_weights self init_type<block_start>"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""<line_sep>dim_info=dimension_info(self._data)<line_sep>step_x=dim_info.get_center()[0]<if_stmt>self._rows<g>1<block_start>step_x=dim_info.get_width()[0]/(self._rows-1)<block_end>step_y=0.0<if_stmt>dim_info.get_dimensions()<g>1<block_start>step_y=dim_info.get_center()[1]<if_stmt>self._cols<g>1<block_start>step_y=dim_info.get_width()[1]/(self._cols-1)<block_end><block_end># generate weights (topological coordinates)
random.seed(self._params.random_state)<line_sep># Uniform grid.
<if_stmt>init_type<eq>type_init.uniform_grid# Predefined weights in line with input data.
<block_start>self._weights=[[[]<for>i range(dim_info.get_dimensions())]<for>j range(self._size)]<for_stmt>i range(self._size)<block_start>location=self._location[i]<for_stmt>dim range(dim_info.get_dimensions())<block_start><if_stmt>dim<eq>0<block_start><if_stmt>self._rows<g>1<block_start>self._weights[i][dim]=dim_info.get_minimum_coordinate()[dim]+step_x<times>location[dim]<block_end><else_stmt><block_start>self._weights[i][dim]=dim_info.get_center()[dim]<block_end><block_end><elif_stmt>dim<eq>1<block_start><if_stmt>self._cols<g>1<block_start>self._weights[i][dim]=dim_info.get_minimum_coordinate()[dim]+step_y<times>location[dim]<block_end><else_stmt><block_start>self._weights[i][dim]=dim_info.get_center()[dim]<block_end><block_end><else_stmt><block_start>self._weights[i][dim]=dim_info.get_center()[dim]<block_end><block_end><block_end><block_end><elif_stmt>init_type<eq>type_init.random_surface# Random weights at the full surface.
<block_start>self._weights=[[random.uniform(dim_info.get_minimum_coordinate()[i] dim_info.get_maximum_coordinate()[i])<for>i range(dim_info.get_dimensions())]<for>_ range(self._size)]<block_end><elif_stmt>init_type<eq>type_init.random_centroid# Random weights at the center of input data.
<block_start>self._weights=[[(random.random()+dim_info.get_center()[i])<for>i range(dim_info.get_dimensions())]<for>_ range(self._size)]<block_end><else_stmt># Random weights of input data.
<block_start>self._weights=[[random.random()<for>i range(dim_info.get_dimensions())]<for>_ range(self._size)]<block_end><block_end><def_stmt>_create_connections self conn_type<block_start>"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""<line_sep>self._neighbors=[[]<for>index range(self._size)]<for_stmt>index range(0 self._size 1)<block_start>upper_index=index-self._cols<line_sep>upper_left_index=index-self._cols-1<line_sep>upper_right_index=index-self._cols+1<line_sep>lower_index=index+self._cols<line_sep>lower_left_index=index+self._cols-1<line_sep>lower_right_index=index+self._cols+1<line_sep>left_index=index-1<line_sep>right_index=index+1<line_sep>node_row_index=math.floor(index/self._cols)<line_sep>upper_row_index=node_row_index-1<line_sep>lower_row_index=node_row_index+1<if_stmt>(conn_type<eq>type_conn.grid_eight)<or>(conn_type<eq>type_conn.grid_four)<block_start><if_stmt>upper_index<ge>0<block_start>self._neighbors[index].append(upper_index)<block_end><if_stmt>lower_index<l>self._size<block_start>self._neighbors[index].append(lower_index)<block_end><block_end><if_stmt>(conn_type<eq>type_conn.grid_eight)<or>(conn_type<eq>type_conn.grid_four)<or>(conn_type<eq>type_conn.honeycomb)<block_start><if_stmt>(left_index<ge>0)<and>(math.floor(left_index/self._cols)<eq>node_row_index)<block_start>self._neighbors[index].append(left_index)<block_end><if_stmt>(right_index<l>self._size)<and>(math.floor(right_index/self._cols)<eq>node_row_index)<block_start>self._neighbors[index].append(right_index)<block_end><block_end><if_stmt>conn_type<eq>type_conn.grid_eight<block_start><if_stmt>(upper_left_index<ge>0)<and>(math.floor(upper_left_index/self._cols)<eq>upper_row_index)<block_start>self._neighbors[index].append(upper_left_index)<block_end><if_stmt>(upper_right_index<ge>0)<and>(math.floor(upper_right_index/self._cols)<eq>upper_row_index)<block_start>self._neighbors[index].append(upper_right_index)<block_end><if_stmt>(lower_left_index<l>self._size)<and>(math.floor(lower_left_index/self._cols)<eq>lower_row_index)<block_start>self._neighbors[index].append(lower_left_index)<block_end><if_stmt>(lower_right_index<l>self._size)<and>(math.floor(lower_right_index/self._cols)<eq>lower_row_index)<block_start>self._neighbors[index].append(lower_right_index)<block_end><block_end><if_stmt>conn_type<eq>type_conn.honeycomb<block_start><if_stmt>(node_row_index%2)<eq>0<block_start>upper_left_index=index-self._cols<line_sep>upper_right_index=index-self._cols+1<line_sep>lower_left_index=index+self._cols<line_sep>lower_right_index=index+self._cols+1<block_end><else_stmt><block_start>upper_left_index=index-self._cols-1<line_sep>upper_right_index=index-self._cols<line_sep>lower_left_index=index+self._cols-1<line_sep>lower_right_index=index+self._cols<block_end><if_stmt>(upper_left_index<ge>0)<and>(math.floor(upper_left_index/self._cols)<eq>upper_row_index)<block_start>self._neighbors[index].append(upper_left_index)<block_end><if_stmt>(upper_right_index<ge>0)<and>(math.floor(upper_right_index/self._cols)<eq>upper_row_index)<block_start>self._neighbors[index].append(upper_right_index)<block_end><if_stmt>(lower_left_index<l>self._size)<and>(math.floor(lower_left_index/self._cols)<eq>lower_row_index)<block_start>self._neighbors[index].append(lower_left_index)<block_end><if_stmt>(lower_right_index<l>self._size)<and>(math.floor(lower_right_index/self._cols)<eq>lower_row_index)<block_start>self._neighbors[index].append(lower_right_index)<block_end><block_end><block_end><block_end><def_stmt>_competition self x<block_start>"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""<line_sep>index=0<line_sep>minimum=euclidean_distance_square(self._weights[0] x)<for_stmt>i range(1 self._size 1)<block_start>candidate=euclidean_distance_square(self._weights[i] x)<if_stmt>candidate<l>minimum<block_start>index=i<line_sep>minimum=candidate<block_end><block_end><return>index<block_end><def_stmt>_adaptation self index x<block_start>"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""<line_sep>dimension=len(self._weights[0])<if_stmt>self._conn_type<eq>type_conn.func_neighbor<block_start><for_stmt>neuron_index range(self._size)<block_start>distance=self._sqrt_distances[index][neuron_index]<if_stmt>distance<l>self._local_radius<block_start>influence=math.exp(-(distance/(2.0<times>self._local_radius)))<for_stmt>i range(dimension)<block_start>self._weights[neuron_index][i]=self._weights[neuron_index][i]+self._learn_rate<times>influence<times>(x[i]-self._weights[neuron_index][i])<block_end><block_end><block_end><block_end><else_stmt><block_start><for_stmt>i range(dimension)<block_start>self._weights[index][i]=self._weights[index][i]+self._learn_rate<times>(x[i]-self._weights[index][i])<block_end><for_stmt>neighbor_index self._neighbors[index]<block_start>distance=self._sqrt_distances[index][neighbor_index]<if_stmt>distance<l>self._local_radius<block_start>influence=math.exp(-(distance/(2.0<times>self._local_radius)))<for_stmt>i range(dimension)<block_start>self._weights[neighbor_index][i]=self._weights[neighbor_index][i]+self._learn_rate<times>influence<times>(x[i]-self._weights[neighbor_index][i])<block_end><block_end><block_end><block_end><block_end><def_stmt>train self data epochs autostop=<false><block_start>"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""<line_sep>self._data=data<if_stmt>self.__ccore_som_pointer<is><not><none><block_start><return>wrapper.som_train(self.__ccore_som_pointer data epochs autostop)<block_end>self._sqrt_distances=self.__initialize_distances(self._size self._location)<for_stmt>i range(self._size)<block_start>self._award[i]=0<line_sep>self._capture_objects[i].clear()<block_end># weights
self._create_initial_weights(self._params.init_type)<line_sep>previous_weights=<none><for_stmt>epoch range(1 epochs+1)# Depression term of coupling
<block_start>self._local_radius=(self._params.init_radius<times>math.exp(-(epoch/epochs)))<power>2<line_sep>self._learn_rate=self._params.init_learn_rate<times>math.exp(-(epoch/epochs))<line_sep># Clear statistics
<if_stmt>autostop<block_start><for_stmt>i range(self._size)<block_start>self._award[i]=0<line_sep>self._capture_objects[i].clear()<block_end><block_end><for_stmt>i range(len(self._data))# Step 1: Competition:
<block_start>index=self._competition(self._data[i])<line_sep># Step 2: Adaptation:
self._adaptation(index self._data[i])<line_sep># Update statistics
<if_stmt>(autostop<is><true>)<or>(epoch<eq>epochs)<block_start>self._award[index]<augadd>1<line_sep>self._capture_objects[index].append(i)<block_end><block_end># Check requirement of stopping
<if_stmt>autostop<block_start><if_stmt>previous_weights<is><not><none><block_start>maximal_adaptation=self._get_maximal_adaptation(previous_weights)<if_stmt>maximal_adaptation<l>self._params.adaptation_threshold<block_start><return>epoch<block_end><block_end>previous_weights=[item[:]<for>item self._weights]<block_end><block_end><return>epochs<block_end><def_stmt>simulate self input_pattern<block_start>"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start><return>wrapper.som_simulate(self.__ccore_som_pointer input_pattern)<block_end><return>self._competition(input_pattern)<block_end><def_stmt>_get_maximal_adaptation self previous_weights<block_start>"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""<line_sep>dimension=len(self._data[0])<line_sep>maximal_adaptation=0.0<for_stmt>neuron_index range(self._size)<block_start><for_stmt>dim range(dimension)<block_start>current_adaptation=previous_weights[neuron_index][dim]-self._weights[neuron_index][dim]<if_stmt>current_adaptation<l>0<block_start>current_adaptation=-current_adaptation<block_end><if_stmt>maximal_adaptation<l>current_adaptation<block_start>maximal_adaptation=current_adaptation<block_end><block_end><block_end><return>maximal_adaptation<block_end><def_stmt>get_winner_number self<block_start>"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._award=wrapper.som_get_awards(self.__ccore_som_pointer)<block_end>winner_number=0<for_stmt>i range(self._size)<block_start><if_stmt>self._award[i]<g>0<block_start>winner_number<augadd>1<block_end><block_end><return>winner_number<block_end><def_stmt>show_distance_matrix self<block_start>"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""<line_sep>distance_matrix=self.get_distance_matrix()<line_sep>plt.imshow(distance_matrix cmap=plt.get_cmap('hot') interpolation='kaiser')<line_sep>plt.title("U-Matrix")<line_sep>plt.colorbar()<line_sep>plt.show()<block_end><def_stmt>get_distance_matrix self<block_start>"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._weights=wrapper.som_get_weights(self.__ccore_som_pointer)<if_stmt>self._conn_type<ne>type_conn.func_neighbor<block_start>self._neighbors=wrapper.som_get_neighbors(self.__ccore_som_pointer)<block_end><block_end>distance_matrix=[[0.0]<times>self._cols<for>i range(self._rows)]<for_stmt>i range(self._rows)<block_start><for_stmt>j range(self._cols)<block_start>neuron_index=i<times>self._cols+j<if_stmt>self._conn_type<eq>type_conn.func_neighbor<block_start>self._create_connections(type_conn.grid_eight)<block_end><for_stmt>neighbor_index self._neighbors[neuron_index]<block_start>distance_matrix[i][j]<augadd>euclidean_distance_square(self._weights[neuron_index] self._weights[neighbor_index])<block_end>distance_matrix[i][j]<augdiv>len(self._neighbors[neuron_index])<block_end><block_end><return>distance_matrix<block_end><def_stmt>show_density_matrix self surface_divider=20.0<block_start>"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""<line_sep>density_matrix=self.get_density_matrix(surface_divider)<line_sep>plt.imshow(density_matrix cmap=plt.get_cmap('hot') interpolation='kaiser')<line_sep>plt.title("P-Matrix")<line_sep>plt.colorbar()<line_sep>plt.show()<block_end><def_stmt>get_density_matrix self surface_divider=20.0<block_start>"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._weights=wrapper.som_get_weights(self.__ccore_som_pointer)<block_end>density_matrix=[[0]<times>self._cols<for>i range(self._rows)]<line_sep>dimension=len(self._weights[0])<line_sep>dim_max=[float('-Inf')]<times>dimension<line_sep>dim_min=[float('Inf')]<times>dimension<for_stmt>weight self._weights<block_start><for_stmt>index_dim range(dimension)<block_start><if_stmt>weight[index_dim]<g>dim_max[index_dim]<block_start>dim_max[index_dim]=weight[index_dim]<block_end><if_stmt>weight[index_dim]<l>dim_min[index_dim]<block_start>dim_min[index_dim]=weight[index_dim]<block_end><block_end><block_end>radius=[0.0]<times>len(self._weights[0])<for_stmt>index_dim range(dimension)<block_start>radius[index_dim]=(dim_max[index_dim]-dim_min[index_dim])/surface_divider<block_end>## TODO: do not use data
<for_stmt>point self._data<block_start><for_stmt>index_neuron range(len(self))<block_start>point_covered=<true><for_stmt>index_dim range(dimension)<block_start><if_stmt>abs(point[index_dim]-self._weights[index_neuron][index_dim])<g>radius[index_dim]<block_start>point_covered=<false><line_sep><break><block_end><block_end>row=int(math.floor(index_neuron/self._cols))<line_sep>col=index_neuron-row<times>self._cols<if_stmt>point_covered<is><true><block_start>density_matrix[row][col]<augadd>1<block_end><block_end><block_end><return>density_matrix<block_end><def_stmt>show_winner_matrix self<block_start>"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._award=wrapper.som_get_awards(self.__ccore_som_pointer)<block_end>(fig ax)=plt.subplots()<line_sep>winner_matrix=[[0]<times>self._cols<for>_ range(self._rows)]<for_stmt>i range(self._rows)<block_start><for_stmt>j range(self._cols)<block_start>neuron_index=i<times>self._cols+j<line_sep>winner_matrix[i][j]=self._award[neuron_index]<line_sep>ax.text(i j str(winner_matrix[i][j]) va='center' ha='center')<block_end><block_end>ax.imshow(winner_matrix cmap=plt.get_cmap('cool') interpolation='none')<line_sep>ax.grid(<true>)<line_sep>plt.title("Winner Matrix")<line_sep>plt.show()<line_sep>plt.close(fig)<block_end><def_stmt>show_network self awards=<false> belongs=<false> coupling=<true> dataset=<true> marker_type='o'<block_start>"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""<if_stmt>self.__ccore_som_pointer<is><not><none><block_start>self._size=wrapper.som_get_size(self.__ccore_som_pointer)<line_sep>self._weights=wrapper.som_get_weights(self.__ccore_som_pointer)<line_sep>self._neighbors=wrapper.som_get_neighbors(self.__ccore_som_pointer)<line_sep>self._award=wrapper.som_get_awards(self.__ccore_som_pointer)<block_end>dimension=len(self._weights[0])<line_sep>fig=plt.figure()<line_sep># Check for dimensions
<if_stmt>(dimension<eq>1)<or>(dimension<eq>2)<block_start>axes=fig.add_subplot(111)<block_end><elif_stmt>dimension<eq>3<block_start>axes=fig.gca(projection='3d')<block_end><else_stmt><block_start><raise>NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')<block_end><if_stmt>(self._data<is><not><none>)<and>(dataset<is><true>)<block_start><for_stmt>x self._data<block_start><if_stmt>dimension<eq>1<block_start>axes.plot(x[0] 0.0 'b|' ms=30)<block_end><elif_stmt>dimension<eq>2<block_start>axes.plot(x[0] x[1] 'b.')<block_end><elif_stmt>dimension<eq>3<block_start>axes.scatter(x[0] x[1] x[2] c='b' marker='.')<block_end><block_end><block_end># Show neurons
<for_stmt>index range(self._size)<block_start>color='g'<if_stmt>self._award[index]<eq>0<block_start>color='y'<block_end><if_stmt>dimension<eq>1<block_start>axes.plot(self._weights[index][0] 0.0 color+marker_type)<if_stmt>awards<block_start>location='{0}'.format(self._award[index])<line_sep>axes.text(self._weights[index][0] 0.0 location color='black' fontsize=10)<block_end><if_stmt>belongs<and>self._data<is><not><none><block_start>location='{0}'.format(index)<line_sep>axes.text(self._weights[index][0] 0.0 location color='black' fontsize=12)<for_stmt>k range(len(self._capture_objects[index]))<block_start>point=self._data[self._capture_objects[index][k]]<line_sep>axes.text(point[0] 0.0 location color='blue' fontsize=10)<block_end><block_end><block_end><if_stmt>dimension<eq>2<block_start>axes.plot(self._weights[index][0] self._weights[index][1] color+marker_type)<if_stmt>awards<block_start>location='{0}'.format(self._award[index])<line_sep>axes.text(self._weights[index][0] self._weights[index][1] location color='black' fontsize=10)<block_end><if_stmt>belongs<and>self._data<is><not><none><block_start>location='{0}'.format(index)<line_sep>axes.text(self._weights[index][0] self._weights[index][1] location color='black' fontsize=12)<for_stmt>k range(len(self._capture_objects[index]))<block_start>point=self._data[self._capture_objects[index][k]]<line_sep>axes.text(point[0] point[1] location color='blue' fontsize=10)<block_end><block_end><if_stmt>(self._conn_type<ne>type_conn.func_neighbor)<and>(coupling<is><true>)<block_start><for_stmt>neighbor self._neighbors[index]<block_start><if_stmt>neighbor<g>index<block_start>axes.plot([self._weights[index][0] self._weights[neighbor][0]] [self._weights[index][1] self._weights[neighbor][1]] 'g' linewidth=0.5)<block_end><block_end><block_end><block_end><elif_stmt>dimension<eq>3<block_start>axes.scatter(self._weights[index][0] self._weights[index][1] self._weights[index][2] c=color marker=marker_type)<if_stmt>(self._conn_type<ne>type_conn.func_neighbor)<and>(coupling<ne><false>)<block_start><for_stmt>neighbor self._neighbors[index]<block_start><if_stmt>neighbor<g>index<block_start>axes.plot([self._weights[index][0] self._weights[neighbor][0]] [self._weights[index][1] self._weights[neighbor][1]] [self._weights[index][2] self._weights[neighbor][2]] 'g-' linewidth=0.5)<block_end><block_end><block_end><block_end><block_end>plt.title("Network Structure")<line_sep>plt.grid()<line_sep>plt.show()<line_sep>plt.close(fig)<block_end><def_stmt>__get_dump_from_python self ccore_usage<block_start><return>{'ccore':ccore_usage 'state':{'cols':self._cols 'rows':self._rows 'size':self._size 'conn_type':self._conn_type 'neighbors':self._neighbors 'local_radius':self._local_radius 'learn_rate':self._learn_rate 'params':self._params 'location':self._location 'weights':self._weights 'award':self._award 'capture_objects':self._capture_objects}}<block_end><def_stmt>__download_dump_from_ccore self<block_start>self._location=self.__initialize_locations(self._rows self._cols)<line_sep>self._weights=wrapper.som_get_weights(self.__ccore_som_pointer)<line_sep>self._award=wrapper.som_get_awards(self.__ccore_som_pointer)<line_sep>self._capture_objects=wrapper.som_get_capture_objects(self.__ccore_som_pointer)<block_end><def_stmt>__upload_common_part self state_dump<block_start>self._cols=state_dump['cols']<line_sep>self._rows=state_dump['rows']<line_sep>self._size=state_dump['size']<line_sep>self._conn_type=state_dump['conn_type']<line_sep>self._neighbors=state_dump['neighbors']<line_sep>self._local_radius=state_dump['local_radius']<line_sep>self._learn_rate=state_dump['learn_rate']<line_sep>self._params=state_dump['params']<line_sep>self._neighbors=<none><block_end><def_stmt>__upload_dump_to_python self state_dump<block_start>self.__ccore_som_pointer=<none><line_sep>self.__upload_common_part(state_dump)<line_sep>self._location=state_dump['location']<line_sep>self._weights=state_dump['weights']<line_sep>self._award=state_dump['award']<line_sep>self._capture_objects=state_dump['capture_objects']<line_sep>self._location=self.__initialize_locations(self._rows self._cols)<line_sep>self._create_connections(self._conn_type)<block_end><def_stmt>__upload_dump_to_ccore self state_dump<block_start>self.__upload_common_part(state_dump)<line_sep>self.__ccore_som_pointer=wrapper.som_create(self._rows self._cols self._conn_type self._params)<line_sep>wrapper.som_load(self.__ccore_som_pointer state_dump['weights'] state_dump['award'] state_dump['capture_objects'])<block_end><block_end> |
# -*- coding: utf-8 -*-
"""Cluster plotting tools"""<line_sep>__author__=["<NAME>" "<NAME>"]<line_sep>__all__=["plot_cluster_algorithm"]<import_stmt>pandas<as>pd<import_from_stmt>sktime.clustering.base._typing NumpyOrDF<import_from_stmt>sktime.clustering.base.base BaseClusterer<import_from_stmt>sktime.clustering.partitioning._lloyds_partitioning TimeSeriesLloydsPartitioning <import_from_stmt>sktime.datatypes._panel._convert from_nested_to_2d_array<import_from_stmt>sktime.utils.validation._dependencies _check_soft_dependencies<def_stmt>_plot cluster_values center axes<block_start><for_stmt>cluster_series cluster_values<block_start>axes.plot(cluster_series color="b")<block_end>axes.plot(center color="r")<block_end><def_stmt>plot_cluster_algorithm model:BaseClusterer predict_series:NumpyOrDF k:int<block_start>"""
Method that is used to plot a clustering algorithms output
Parameters
----------
model: BaseClusterer
Clustering model to plot
predict_series: Numpy or Dataframe
The series to predict the values for
k: int
Number of centers
"""<line_sep>_check_soft_dependencies("matplotlib")<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.patches<as>mpatches<if_stmt>isinstance(predict_series pd.DataFrame)<block_start>predict_series=from_nested_to_2d_array(predict_series return_numpy=<true>)<block_end>plt.figure(figsize=(5 10))<line_sep>plt.rcParams["figure.dpi"]=100<line_sep>indexes=model.predict(predict_series)<line_sep>centers=model.get_centers()<line_sep>series_values=TimeSeriesLloydsPartitioning.get_cluster_values(indexes predict_series k)<line_sep>fig,axes=plt.subplots(nrows=k ncols=1)<for_stmt>i range(k)<block_start>_plot(series_values[i] centers[i] axes[i])<block_end>blue_patch=mpatches.Patch(color="blue" label="Series that belong to the cluster")<line_sep>red_patch=mpatches.Patch(color="red" label="Cluster centers")<line_sep>plt.legend(handles=[red_patch blue_patch] loc="upper center" bbox_to_anchor=(0.5 -0.40) fancybox=<true> shadow=<true> ncol=5 )<line_sep>plt.tight_layout()<line_sep>plt.show()<block_end> |
<import_from_stmt>pyramda.function.curry curry<import_from_stmt>pyramda.function.always always<import_from_stmt>pyramda.iterable.reduce reduce<import_from_stmt>.either either<line_sep>@curry<def_stmt>any_pass ps v<block_start><return>reduce(either always(<false>) ps)(v)<block_end> |
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
<import_stmt>itertools<import_stmt>glob<import_stmt>os.path<import_stmt>tempfile<import_stmt>mxnet<as>mx<import_stmt>pytest<import_stmt>sockeye.encoder<import_stmt>sockeye.model<import_stmt>sockeye.training<import_stmt>sockeye.constants<as>C<def_stmt>test_cleanup_param_files <block_start><with_stmt>tempfile.TemporaryDirectory()<as>tmp_dir<block_start><for_stmt>n itertools.chain(range(1 20 2) range(21 41))# Create empty files
<block_start>open(os.path.join(tmp_dir C.PARAMS_NAME%n) "w").close()<block_end>sockeye.training.cleanup_params_files(tmp_dir 5 40 17 <false> 8 "perplexity" "best")<line_sep>expectedSurviving=set([os.path.join(tmp_dir C.PARAMS_NAME%n)<for>n [17 36 37 38 39 40]])<line_sep># 17 must survive because it is the best one
<assert_stmt>set(glob.glob(os.path.join(tmp_dir C.PARAMS_PREFIX+"*")))<eq>expectedSurviving<block_end><block_end><def_stmt>test_cleanup_param_files_keep_first <block_start><with_stmt>tempfile.TemporaryDirectory()<as>tmp_dir<block_start><for_stmt>n itertools.chain(range(0 20 2) range(21 41))# Create empty files
<block_start>open(os.path.join(tmp_dir C.PARAMS_NAME%n) "w").close()<block_end>sockeye.training.cleanup_params_files(tmp_dir 5 40 16 <true> 8 "perplexity" "best")<line_sep>expectedSurviving=set([os.path.join(tmp_dir C.PARAMS_NAME%n)<for>n [0 16 36 37 38 39 40]])<line_sep># 16 must survive because it is the best one
# 0 should also survive because we set keep_first to True
<assert_stmt>set(glob.glob(os.path.join(tmp_dir C.PARAMS_PREFIX+"*")))<eq>expectedSurviving<block_end><block_end><def_stmt>mock_model <block_start>config_embed=sockeye.encoder.EmbeddingConfig(vocab_size=20 num_embed=4 dropout=0.0)<line_sep>config_encoder=sockeye.encoder.EncoderConfig(model_size=4 attention_heads=1 feed_forward_num_hidden=4 act_type='relu' num_layers=1 dropout_attention=0.0 dropout_act=0.0 dropout_prepost=0.0 positional_embedding_type='fixed' preprocess_sequence='none' postprocess_sequence='none' max_seq_len_source=30 max_seq_len_target=30)<line_sep>config=sockeye.model.ModelConfig(config_data=<none> vocab_source_size=20 vocab_target_size=20 config_embed_source=config_embed config_embed_target=config_embed config_encoder=config_encoder config_decoder=config_encoder)<line_sep>model=sockeye.model.SockeyeModel(config=config)<line_sep><return>model<block_end><def_stmt>test_set_parameters <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>p=mx.gluon.Parameter('source_target_embed_weight' shape=(20 4))<line_sep>p.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>model.set_parameters({'source_target_embed_weight':p})<assert_stmt>mx.test_utils.same(model.params['source_target_embed_weight'].data() p.data())<block_end><def_stmt>test_set_parameters_allow_missing <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>model.set_parameters({} allow_missing=<true>)<assert_stmt>'source_target_embed_weight'<in>model.params<with_stmt>pytest.raises(AssertionError)<as>e<block_start>model.set_parameters({} allow_missing=<false>)<block_end><assert_stmt>str(e.value)<eq>"Parameter 'source_target_embed_weight' is missing in new_params dictionary. "<concat>"Set allow_missing=True to ignore missing parameters."<block_end><def_stmt>test_set_parameters_ignore_extra <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>p=mx.gluon.Parameter('source_target_embed_weight' shape=(20 4))<line_sep>p.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>q=mx.gluon.Parameter('q' shape=(1 1))<line_sep>q.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>params={'source_target_embed_weight':p 'q':q}<line_sep>model.set_parameters(params ignore_extra=<true>)<assert_stmt>'source_target_embed_weight'<in>model.params<assert_stmt>'q'<not><in>model.params<with_stmt>pytest.raises(ValueError)<as>e<block_start>model.set_parameters(params ignore_extra=<false>)<block_end><assert_stmt>str(e.value)<eq>"Parameter 'q' in new_params dictionary is not preset in ParameterDict. "<concat>"Set ignore_extra=True to ignore."<block_end><def_stmt>test_set_parameters_context <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=[mx.cpu(0) mx.cpu(1)])<line_sep>p=mx.gluon.Parameter('source_target_embed_weight' shape=(20 4))<line_sep>p.initialize(init='xavier' ctx=mx.cpu(2))<line_sep>model.set_parameters({'source_target_embed_weight':p})<for_stmt>i range(2)<block_start><assert_stmt>mx.test_utils.same(model.params['source_target_embed_weight'].data(mx.cpu(i)) p.data(mx.cpu(2)))<block_end><block_end><def_stmt>test_set_parameters_shape <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>p=mx.gluon.Parameter('source_target_embed_weight' shape=(10 10))<line_sep>p.initialize(init='xavier' ctx=mx.cpu(0))<with_stmt>pytest.raises(AssertionError)<as>e<block_start>model.set_parameters({'source_target_embed_weight':p})<block_end><assert_stmt>str(e.value)<eq>"Parameter 'source_target_embed_weight' has shape '(20, 4)' in the model but shape "<concat>"'(10, 10)' in the new_params dictionary."<block_end><def_stmt>test_set_parameters_uninitialized <block_start>model=mock_model()<line_sep>model.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>p=mx.gluon.Parameter('source_target_embed_weight' shape=(20 4))<with_stmt>pytest.raises(AssertionError)<as>e<block_start>model.set_parameters({'source_target_embed_weight':p})<block_end><assert_stmt>str(e.value)<eq>"Parameter 'source_target_embed_weight' is not initialized in new_params dictionary."<line_sep>p.initialize(init='xavier' ctx=mx.cpu(0))<line_sep>model=mock_model()<with_stmt>pytest.raises(AssertionError)<as>e<block_start>model.set_parameters({'source_target_embed_weight':p})<block_end><assert_stmt>str(e.value)<eq>"Parameter 'source_target_embed_weight' must be initialized before it can be reset using "<concat>"set_parameters."<block_end> |
<import_stmt>random<import_stmt>requests<import_stmt>time<line_sep>HOSTS=['us-east-1' 'us-west-1' 'eu-west-1' ]<line_sep>VEHICLES=['bike' 'scooter' 'car' ]<if_stmt>__name__<eq>"__main__"<block_start>print(f"starting load generator")<line_sep>time.sleep(15)<line_sep>print('done sleeping')<while_stmt><true><block_start>host=HOSTS[random.randint(0 len(HOSTS)-1)]<line_sep>vehicle=VEHICLES[random.randint(0 len(VEHICLES)-1)]<line_sep>print(f"requesting {vehicle} from {host}")<line_sep>resp=requests.get(f'http://web:8000/{vehicle}')<line_sep>print(f"received {resp}")<line_sep>time.sleep(random.uniform(0.2 0.4))<block_end><block_end> |
<import_from_stmt>slugify slugify<import_from_stmt>app.db.errors EntityDoesNotExist<import_from_stmt>app.db.repositories.articles ArticlesRepository<import_from_stmt>app.models.domain.articles Article<import_from_stmt>app.models.domain.users User<async_keyword><def_stmt>check_article_exists articles_repo:ArticlesRepository slug:str<arrow>bool<block_start><try_stmt><block_start><await>articles_repo.get_article_by_slug(slug=slug)<block_end><except_stmt>EntityDoesNotExist<block_start><return><false><block_end><return><true><block_end><def_stmt>get_slug_for_article title:str<arrow>str<block_start><return>slugify(title)<block_end><def_stmt>check_user_can_modify_article article:Article user:User<arrow>bool<block_start><return>article.author.username<eq>user.username<block_end> |
<import_from_stmt>unittest TestCase<import_from_stmt>schemer Schema Array ValidationException<import_from_stmt>dusty.schemas.base_schema_class DustySchema DustySpecs<import_from_stmt>...testcases DustyTestCase<class_stmt>TestDustySchemaClass(TestCase)<block_start><def_stmt>setUp self<block_start>self.base_schema=Schema({'street':{'type':basestring} 'house_number':{'type':int 'default':1}})<line_sep>self.bigger_schema=Schema({'address':{'type':self.base_schema 'default':{}} 'first_name':{'type':basestring 'required':<true>} 'last_name':{'type':basestring 'default':'johnson'}})<block_end><def_stmt>test_init_invalid_doc self<block_start>doc={'street':'dogstoon' 'house_number':'1'}<with_stmt>self.assertRaises(ValidationException)<block_start>DustySchema(self.base_schema doc)<block_end><block_end><def_stmt>test_valid_doc self<block_start>doc={'street':'dogstoon' 'house_number':1}<line_sep>dusty_schema=DustySchema(self.base_schema doc)<line_sep>self.assertEquals(dusty_schema['street'] 'dogstoon')<line_sep>self.assertEquals(dusty_schema['house_number'] 1)<block_end><def_stmt>test_setting_defaults self<block_start>doc={'street':'dogstoon'}<line_sep>dusty_schema=DustySchema(self.base_schema doc)<line_sep>self.assertEquals(dusty_schema['street'] 'dogstoon')<line_sep>self.assertEquals(dusty_schema['house_number'] 1)<block_end><def_stmt>test_setting_defaults_more_complicated_1 self<block_start>doc={'first_name':'dusty'}<line_sep>dusty_schema=DustySchema(self.bigger_schema doc)<line_sep>self.assertEquals(dusty_schema['first_name'] 'dusty')<line_sep>self.assertEquals(dusty_schema['last_name'] 'johnson')<line_sep>self.assertEquals(dusty_schema['address'] {'house_number':1})<block_end><def_stmt>test_setting_defaults_more_complicated_2 self<block_start>doc={'first_name':'dusty' 'address':{'street':'dogstoon'}}<line_sep>dusty_schema=DustySchema(self.bigger_schema doc)<line_sep>self.assertEquals(dusty_schema['address']['street'] 'dogstoon')<line_sep>self.assertEquals(dusty_schema['address']['house_number'] 1)<block_end><def_stmt>test_in_1 self<block_start>doc={'first_name':'dusty' 'address':{'street':'dogstoon'}}<line_sep>dusty_schema=DustySchema(self.bigger_schema doc)<line_sep>self.assertTrue('first_name'<in>dusty_schema)<block_end><def_stmt>test_in_2 self<block_start>doc={'first_name':'dusty' 'address':{'street':'dogstoon'}}<line_sep>dusty_schema=DustySchema(self.bigger_schema doc)<line_sep>self.assertFalse('first_names'<in>dusty_schema)<block_end><def_stmt>test_keys self<block_start>doc={'street':'dogstoon' 'house_number':1}<line_sep>dusty_schema=DustySchema(self.base_schema doc)<line_sep>self.assertEquals(set(['street' 'house_number']) set(dusty_schema.keys()))<block_end><def_stmt>test_values self<block_start>doc={'street':'dogstoon' 'house_number':1}<line_sep>dusty_schema=DustySchema(self.base_schema doc)<line_sep>self.assertEquals(set(['dogstoon' 1]) set(dusty_schema.values()))<block_end><block_end><class_stmt>TestDustySpecsClass(DustyTestCase)<block_start><def_stmt>test_finds_app_or_lib self<block_start>specs=DustySpecs(self.temp_specs_path)<line_sep>self.assertEquals(specs.get_app_or_lib('app-a') specs['apps']['app-a'])<line_sep>self.assertEquals(specs.get_app_or_lib('lib-a') specs['libs']['lib-a'])<block_end><def_stmt>test_raises_without_app_or_lib self<block_start>specs=DustySpecs(self.temp_specs_path)<with_stmt>self.assertRaises(KeyError)<block_start>specs.get_app_or_lib('non-existant-thingy')<block_end><block_end><def_stmt>test_get_app_or_service self<block_start>specs=DustySpecs(self.temp_specs_path)<line_sep>self.assertEquals(specs.get_app_or_service('app-a') specs['apps']['app-a'])<line_sep>self.assertEquals(specs.get_app_or_service('service-a') specs['services']['service-a'])<block_end><block_end> |
<import_stmt>torch<import_from_stmt>torch.autograd Function<class_stmt>Identity(Function)<block_start>@staticmethod<def_stmt>forward ctx x name<block_start>ctx.name=name<line_sep><return>x.clone()<block_end><def_stmt>backward ctx grad<block_start><import_stmt>pydevd<line_sep>pydevd.settrace(suspend=<false> trace_only_current_thread=<true>)<line_sep>grad_temp=grad.clone()<line_sep><return>grad_temp <none><block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># This modifier sets replaces the default pattern recognition with mkFit for tobTecStep
trackingMkFitTobTecStep=cms.Modifier()<line_sep> |
<import_from_stmt>plugin.scrobbler.core SessionEngine SessionHandler<line_sep>@SessionEngine.register<class_stmt>PlayingHandler(SessionHandler)<block_start>__event__='playing'<line_sep>__src__=['create' 'pause' 'stop' 'start']<line_sep>__dst__=['start' 'stop']<line_sep>@classmethod<def_stmt>process cls session payload# Handle media change
<block_start><if_stmt>cls.has_media_changed(session payload)<and>session.state<in>['start' 'pause']<block_start><yield>'stop' session.payload<block_end># Handle current media
<if_stmt>cls.has_finished(session payload)<block_start><if_stmt>session.state<in>['start' 'pause']<block_start><yield>'stop' payload<block_end><block_end><elif_stmt>session.state<in>['create' 'pause' 'stop']<block_start><yield>'start' payload<block_end><elif_stmt>session.state<eq>'start'<block_start><yield><none> payload<block_end><block_end><block_end> |
# Placeholder for adding logic specific to application
# and backend key store.
#
<import_stmt>os<import_stmt>json<import_stmt>sys<import_from_stmt>azure.identity DefaultAzureCredential<import_from_stmt>azure.keyvault.keys KeyClient<import_from_stmt>azure.keyvault.keys.crypto CryptographyClient EncryptionAlgorithm<line_sep># Append the current application path to sys path to be able to resolve local modules.
#
sys.path.append('.')<line_sep>sys.path.append('./model')<import_from_stmt>constants ConfigurationConstants Operations CryptoConstants<import_stmt>utils<import_from_stmt>json_objects EncryptDecryptRequest JsonWebKeyResponse EncryptDecryptResponse<def_stmt>decrypt request json_key_attributes_dict pin version<block_start>"""
This method will be called by the application entry point
for decrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""<line_sep>set_env(json_key_attributes_dict pin)<line_sep>credential=DefaultAzureCredential()<line_sep>key_vault_key=get_akv_key(json_key_attributes_dict credential)<line_sep>crypto_client=CryptographyClient(key_vault_key credential=credential)<line_sep>decrypted_payload=crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep request.value)<line_sep>response=EncryptDecryptResponse(decrypted_payload.plaintext)<line_sep><return>response<block_end><def_stmt>encrypt request json_key_attributes_dict pin version<block_start>"""
This method will be called by the application entry point
for encrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""<line_sep>set_env(json_key_attributes_dict pin)<line_sep>credential=DefaultAzureCredential()<line_sep>key_vault_key=get_akv_key(json_key_attributes_dict credential)<line_sep>crypto_client=CryptographyClient(key_vault_key credential=credential)<line_sep>encrypted_payload=crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep request.value)<line_sep>response=EncryptDecryptResponse(encrypted_payload.ciphertext)<line_sep><return>response<block_end><def_stmt>get_key json_key_attributes_dict pin version<block_start>set_env(json_key_attributes_dict pin)<line_sep>credential=DefaultAzureCredential()<line_sep>key_vault_key=get_akv_key(json_key_attributes_dict credential)<line_sep># JsonWebKeyResponse expects integer inputs and converts them to byte array
# However AKV SDK already provides byte arrays for Exponent and Modulus.
# We will instantiate the object with a dummy value and then overwrite the
# exponent and module value.
#
dummy_val=1<line_sep>key_response=JsonWebKeyResponse(1 1)<line_sep>key_response.e=utils.urlsafe_b64encode_as_str(key_vault_key.key.e)<line_sep>key_response.n=utils.urlsafe_b64encode_as_str(key_vault_key.key.n)<line_sep><return>key_response<block_end><def_stmt>get_akv_key json_key_attributes_dict credential<block_start>"""
Gets the AKV key object.
"""<if_stmt>"vault_url"<in>json_key_attributes_dict<block_start>vault_url=json_key_attributes_dict["vault_url"]<block_end><else_stmt><block_start><raise>KeyError('vault_url was expected in the parameters but not found')<block_end><if_stmt>"keyname"<in>json_key_attributes_dict<block_start>key_name=json_key_attributes_dict["keyname"]<block_end><else_stmt><block_start><raise>KeyError('keyname was expected in the parameters but not found')<block_end><if_stmt>"keyversion"<in>json_key_attributes_dict<block_start>key_version=json_key_attributes_dict["keyversion"]<block_end><else_stmt><block_start><raise>KeyError('keyversion was expected in the parameters but not found')<block_end>key_client=KeyClient(vault_url=vault_url credential=credential)<line_sep>key_vault_key=key_client.get_key(key_name key_version)<line_sep><return>key_vault_key<block_end><def_stmt>set_env json_key_attributes_dict pin<block_start>"""
Sets the environment variables for the MS identity credential lookup to work.
"""<if_stmt>"azure_client_id"<in>json_key_attributes_dict<block_start>key_version=json_key_attributes_dict["azure_client_id"]<block_end><else_stmt><block_start><raise>KeyError('azure_client_id was expected in the parameters but not found')<block_end><if_stmt>"azure_tenant_id"<in>json_key_attributes_dict<block_start>key_version=json_key_attributes_dict["azure_tenant_id"]<block_end><else_stmt><block_start><raise>KeyError('azure_tenant_id was expected in the parameters but not found')<block_end>os.environ["AZURE_CLIENT_ID"]=json_key_attributes_dict["azure_client_id"]<line_sep>os.environ["AZURE_TENANT_ID"]=json_key_attributes_dict["azure_tenant_id"]<line_sep>os.environ["AZURE_CLIENT_SECRET"]=pin<block_end> |
# Standard Library imports
# Core Django imports
# Third-party imports
<import_from_stmt>rest_framework permissions<import_from_stmt>rest_framework.throttling UserRateThrottle AnonRateThrottle<line_sep># App imports
<class_stmt>BurstRateThrottle(UserRateThrottle)<block_start>scope='burst'<block_end><class_stmt>SustainedRateThrottle(UserRateThrottle)<block_start>scope='sustained'<block_end><class_stmt>HighAnonThrottle(AnonRateThrottle)<block_start>rate='5000000/day'<block_end><class_stmt>AccountCreation(permissions.BasePermission)<block_start>""" A user should be able to create an account without being authenticated, but only the
owner of an account should be able to access that account's data in a GET method.
"""<def_stmt>has_permission self request view<block_start><if_stmt>(request.method<eq>"POST")<or>request.user.is_authenticated<block_start><return><true><block_end><return><false><block_end><block_end> |
# Generated by Django 2.2.4 on 2019-09-02 11:59
<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('communications' '0001_initial') ]<line_sep>operations=[migrations.AddField(model_name='message' name='author' field=models.ForeignKey(default=1 on_delete=django.db.models.deletion.CASCADE related_name='author_messages' to=settings.AUTH_USER_MODEL) preserve_default=<false> ) migrations.AddField(model_name='message' name='friend' field=models.ForeignKey(default=1 on_delete=django.db.models.deletion.CASCADE related_name='friend_messages' to=settings.AUTH_USER_MODEL) preserve_default=<false> ) ]<block_end> |
<import_stmt>os<import_from_stmt>sys platform<def_stmt>say_beep n:int<block_start><for_stmt>i range(0 n)<block_start><if_stmt>platform<eq>"darwin"<block_start>os.system("say beep")<block_end><block_end><block_end> |
<import_stmt>json<import_from_stmt>argparse ArgumentParser ArgumentDefaultsHelpFormatter FileType<import_from_stmt>pathlib Path<def_stmt>main <block_start>parser=ArgumentParser(description='Collect markdown files, and write JSON.' formatter_class=ArgumentDefaultsHelpFormatter)<line_sep>project_path=Path(__file__).parent.parent.parent.parent<line_sep>parser.add_argument('--source' type=Path default=project_path/'html'/'tutorials')<line_sep>parser.add_argument('--target' type=FileType('w') default=str(project_path/'html'/'src'/'tutorials.json'))<line_sep>args=parser.parse_args()<line_sep>tutorials={}<line_sep># source_file: Path
<for_stmt>source_file args.source.rglob('*.md')<block_start>name=str(source_file.relative_to(args.source).with_suffix(''))<if_stmt>name<eq>'README'<block_start><continue><block_end>source=source_file.read_text()<line_sep>tutorials[name]=source<block_end>json.dump(tutorials args.target)<block_end>main()<line_sep> |
# Alias to SDK PyTorch utils
<import_from_stmt>aim.sdk.adapters.pytorch track_params_dists track_gradients_dists# noqa
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
<import_from_stmt>. _AlibabaCloud<class_stmt>_Analytics(_AlibabaCloud)<block_start>_type="analytics"<line_sep>_icon_dir="resources/alibabacloud/analytics"<block_end><class_stmt>AnalyticDb(_Analytics)<block_start>_icon="analytic-db.png"<block_end><class_stmt>ClickHouse(_Analytics)<block_start>_icon="click-house.png"<block_end><class_stmt>DataLakeAnalytics(_Analytics)<block_start>_icon="data-lake-analytics.png"<block_end><class_stmt>ElaticMapReduce(_Analytics)<block_start>_icon="elatic-map-reduce.png"<block_end><class_stmt>OpenSearch(_Analytics)<block_start>_icon="open-search.png"<block_end># Aliases
|
print('hi')<line_sep><raise>Exception('fibble-fah')<line_sep> |
<import_stmt>sys<import_stmt>time<import_stmt>digitalocean<import_stmt>subprocess<def_stmt>test_ssh drop<block_start>droplet_ip_address=drop.ip_address<line_sep>result=subprocess.call(f"ssh -o StrictHostKeyChecking=no root@{droplet_ip_address} ls" shell=<true>)<if_stmt>result<eq>0<block_start><return><true><block_end><return><false><block_end>TOKEN_FILE="/srv/secrets-newsblur/keys/digital_ocean.token"<line_sep>droplet_name=sys.argv[1]<with_stmt>open(TOKEN_FILE)<as>f<block_start>token=f.read().strip()<block_end>manager=digitalocean.Manager(token=token)<line_sep>timeout=180<line_sep>timer=0<line_sep>ssh_works=<false><while_stmt><not>ssh_works<block_start><if_stmt>timer<g>timeout<block_start><raise>Exception(f"The {droplet_name} droplet was not created.")<block_end>droplets=[drop<for>drop manager.get_all_droplets()<if>drop.name<eq>droplet_name]<if_stmt>droplets<block_start>droplet=droplets[0]<line_sep>print(f"Found the {droplet_name} droplet. IP address is {droplet.ip_address}. Testing ssh...")<line_sep>ssh_works=test_ssh(droplet)<block_end>time.sleep(3)<line_sep>timer<augadd>3<block_end>print("Success!")<line_sep> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>os<import_stmt>sys<import_from_stmt>conversion_imagenet TestModels<import_from_stmt>conversion_imagenet is_paddle_supported<def_stmt>get_test_table <block_start><return>{'paddle':{'resnet50':[TestModels.onnx_emit #TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit TestModels.keras_emit TestModels.mxnet_emit TestModels.pytorch_emit TestModels.tensorflow_emit] 'resnet101':[#TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit TestModels.keras_emit TestModels.mxnet_emit TestModels.pytorch_emit TestModels.tensorflow_emit] 'vgg16':[TestModels.onnx_emit #TestModels.caffe_emit,
#TestModels.cntk_emit,
#TestModels.coreml_emit,
#TestModels.keras_emit,
#TestModels.mxnet_emit,
#TestModels.pytorch_emit,
#TestModels.tensorflow_emit
] }}<block_end><def_stmt>test_paddle <block_start><if_stmt><not>is_paddle_supported()<block_start><return><block_end># omit tensorflow lead to crash
<import_stmt>tensorflow<as>tf<line_sep>test_table=get_test_table()<line_sep>tester=TestModels(test_table)<line_sep>tester._test_function('paddle' tester.paddle_parse)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_paddle()<block_end> |
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>brancher.variables ProbabilisticModel<import_from_stmt>brancher.standard_variables NormalVariable DeterministicVariable LogNormalVariable<import_stmt>brancher.functions<as>BF<import_from_stmt>brancher.visualizations plot_density<import_from_stmt>brancher.transformations PlanarFlow<import_from_stmt>brancher inference<import_from_stmt>brancher.visualizations plot_posterior<line_sep># Model
M=8<line_sep>y=NormalVariable(torch.zeros((M )) 1.<times>torch.ones((M )) "y")<line_sep>y0=DeterministicVariable(y[1] "y0")<line_sep>d=NormalVariable(y torch.ones((M )) "d")<line_sep>model=ProbabilisticModel([d y y0])<line_sep># get samples
d.observe(d.get_sample(55 input_values={y:1.<times>torch.ones((M ))}))<line_sep># Variational distribution
u1=DeterministicVariable(torch.normal(0. 1. (M 1)) "u1" learnable=<true>)<line_sep>w1=DeterministicVariable(torch.normal(0. 1. (M 1)) "w1" learnable=<true>)<line_sep>b1=DeterministicVariable(torch.normal(0. 1. (1 1)) "b1" learnable=<true>)<line_sep>u2=DeterministicVariable(torch.normal(0. 1. (M 1)) "u2" learnable=<true>)<line_sep>w2=DeterministicVariable(torch.normal(0. 1. (M 1)) "w2" learnable=<true>)<line_sep>b2=DeterministicVariable(torch.normal(0. 1. (1 1)) "b2" learnable=<true>)<line_sep>z=NormalVariable(torch.zeros((M 1)) torch.ones((M 1)) "z" learnable=<true>)<line_sep>Qy=PlanarFlow(w2 u2 b2)(PlanarFlow(w1 u1 b1)(z))<line_sep>Qy.name="y"<line_sep>Qy0=DeterministicVariable(Qy[1] "y0")<line_sep>#Qy._get_sample(4)[Qy].shape
variational_model=ProbabilisticModel([Qy Qy0])<line_sep>model.set_posterior_model(variational_model)<line_sep># Inference #
inference.perform_inference(model number_iterations=400 number_samples=100 optimizer="Adam" lr=0.5)<line_sep>loss_list1=model.diagnostics["loss curve"]<line_sep>#Plot posterior
plot_posterior(model variables=["y0"])<line_sep>plt.show()<line_sep># Variational distribution
Qy=NormalVariable(torch.zeros((M )) 0.5<times>torch.ones((M )) "y" learnable=<true>)<line_sep>Qy0=DeterministicVariable(Qy[1] "y0")<line_sep>variational_model=ProbabilisticModel([Qy Qy0])<line_sep>model.set_posterior_model(variational_model)<line_sep># Inference #
inference.perform_inference(model number_iterations=400 number_samples=100 optimizer="Adam" lr=0.01)<line_sep>loss_list2=model.diagnostics["loss curve"]<line_sep>#Plot posterior
plot_posterior(model variables=["y0"])<line_sep>plt.show()<line_sep>plt.plot(loss_list1)<line_sep>plt.plot(loss_list2)<line_sep>plt.show()<line_sep> |
<import_stmt>heterocl<as>hcl<line_sep>hcl.init()<line_sep>target=hcl.Platform.xilinx_zc706<line_sep>initiation_interval=4<line_sep>a=hcl.placeholder((10 20) name="a")<line_sep>b=hcl.placeholder((10 20) name="b")<line_sep>c=hcl.placeholder((10 20) name="c")<line_sep>d=hcl.placeholder((10 20) name="d")<line_sep>e=hcl.placeholder((10 20) name="e")<def_stmt>add_mul a b c d e<block_start>@hcl.def_([a.shape b.shape c.shape])<def_stmt>ret_add a b c<block_start><with_stmt>hcl.for_(0 a.shape[0])<as>i<block_start><with_stmt>hcl.for_(0 a.shape[1])<as>j<block_start>c[i j]=a[i j]+b[i j]<block_end><block_end><block_end>@hcl.def_([c.shape d.shape e.shape])<def_stmt>ret_mul c d e# hcl.update(c, lambda x, y: a[x, y] * b[x, y], 'c_mul')
<block_start><with_stmt>hcl.for_(0 c.shape[0])<as>i<block_start><with_stmt>hcl.for_(0 c.shape[1])<as>j<block_start>e[i j]=c[i j]<times>d[i j]<block_end><block_end><block_end>ret_add(a b c)<line_sep>ret_mul(c d e)<block_end># compute customization
s=hcl.create_schedule([a b c d e] add_mul)<line_sep># op1 = add_mul.ret_add.c
# op2 = add_mul.ret_mul.c
# s[op1].pipeline(op1.axis[0], initiation_interval)
# stream into modules / device
a0,b0=s.to([a b] target.xcel)<line_sep>d0=s.to(d target.xcel)<line_sep>#s.partition(b0, dim=2, factor=2)
s.to([a0 b0] s[add_mul.ret_add])<line_sep>s.to(d0 s[add_mul.ret_mul])<line_sep># within device move producer to consumer
s.to(c s[add_mul.ret_mul] s[add_mul.ret_add] depth=10)<line_sep># return tensor for inter-device move
# e0 = s.stream_to(e, hcl.CPU('riscv'))
# print(add_mul.ret_mul._buf, c._buf)
print(hcl.lower(s))<line_sep>code=hcl.build(s target)<line_sep>print(code)<line_sep>#
# with open("example.cl", "w") as f:
# f.write(code)
# f.close()
|
<import_stmt>pytest<import_from_stmt>qcodes.instrument.parameter Parameter<import_from_stmt>qcodes.instrument.sweep_values SweepValues<import_from_stmt>qcodes.utils.validators Numbers<line_sep>@pytest.fixture(name='c0')<def_stmt>_make_c0 <block_start>c0=Parameter('c0' vals=Numbers(-10 10) get_cmd=<none> set_cmd=<none>)<line_sep><yield>c0<block_end>@pytest.fixture(name='c1')<def_stmt>_make_c1 <block_start>c1=Parameter('c1' get_cmd=<none> set_cmd=<none>)<line_sep><yield>c1<block_end>@pytest.fixture(name='c2')<def_stmt>_make_c2 <block_start>c2=Parameter('c2' get_cmd=<lambda>:42)<line_sep><yield>c2<block_end><def_stmt>test_errors c0 c1 c2# only complete 3-part slices are valid
<block_start><with_stmt>pytest.raises(TypeError)<block_start>c0[1:2]# For Int params this could be defined as step=1
<block_end><with_stmt>pytest.raises(TypeError)<block_start>c0[:2:3]<block_end><with_stmt>pytest.raises(TypeError)<block_start>c0[1::3]<block_end><with_stmt>pytest.raises(TypeError)<block_start>c0[:]<block_end># For Enum params we *could* define this one too...
# fails if the parameter has no setter
<with_stmt>pytest.raises(TypeError)<block_start>c2[0:0.1:0.01]<block_end># validates every step value against the parameter's Validator
<with_stmt>pytest.raises(ValueError)<block_start>c0[5:15:1]<block_end><with_stmt>pytest.raises(ValueError)<block_start>c0[5.0:15.0:1.0]<block_end><with_stmt>pytest.raises(ValueError)<block_start>c0[-12]<block_end><with_stmt>pytest.raises(ValueError)<block_start>c0[-5 12 5]<block_end><with_stmt>pytest.raises(ValueError)<block_start>c0[-5 12:8:1 5]<block_end># cannot combine SweepValues for different parameters
<with_stmt>pytest.raises(TypeError)<block_start>c0[0.1]+c1[0.2]<block_end># improper use of extend
<with_stmt>pytest.raises(TypeError)<block_start>c0[0.1].extend(5)<block_end># SweepValue object has no getter, even if the parameter does
<with_stmt>pytest.raises(AttributeError)<block_start>c0[0.1].get<block_end><block_end><def_stmt>test_valid c0<block_start>c0_sv=c0[1]<line_sep># setter gets mapped
<assert_stmt>c0_sv.set<eq>c0.set<line_sep># normal sequence operations access values
<assert_stmt>list(c0_sv)<eq>[1]<assert_stmt>c0_sv[0]<eq>1<assert_stmt>1<in>c0_sv<assert_stmt><not>(2<in>c0_sv)<line_sep># in-place and copying addition
c0_sv<augadd>c0[1.5:1.8:0.1]<line_sep>c0_sv2=c0_sv+c0[2]<assert_stmt>list(c0_sv)<eq>[1 1.5 1.6 1.7]<assert_stmt>list(c0_sv2)<eq>[1 1.5 1.6 1.7 2]<line_sep># append and extend
c0_sv3=c0[2]<line_sep># append only works with straight values
c0_sv3.append(2.1)<line_sep># extend can use another SweepValue, (even if it only has one value)
c0_sv3.extend(c0[2.2])<line_sep># extend can also take a sequence
c0_sv3.extend([2.3])<line_sep># as can addition
c0_sv3<augadd>[2.4]<line_sep>c0_sv4=c0_sv3+[2.5 2.6]<assert_stmt>list(c0_sv3)<eq>[2 2.1 2.2 2.3 2.4]<assert_stmt>list(c0_sv4)<eq>[2 2.1 2.2 2.3 2.4 2.5 2.6]<line_sep># len
<assert_stmt>len(c0_sv3)<eq>5<line_sep># in-place and copying reverse
c0_sv.reverse()<line_sep>c0_sv5=reversed(c0_sv)<assert_stmt>list(c0_sv)<eq>[1.7 1.6 1.5 1]<assert_stmt>list(c0_sv5)<eq>[1 1.5 1.6 1.7]<line_sep># multi-key init, where first key is itself a list
c0_sv6=c0[[1 3] 4]<line_sep># copying
c0_sv7=c0_sv6.copy()<assert_stmt>list(c0_sv6)<eq>[1 3 4]<assert_stmt>list(c0_sv7)<eq>[1 3 4]<assert_stmt><not>(c0_sv6<is>c0_sv7)<block_end><def_stmt>test_base <block_start>p=Parameter('p' get_cmd=<none> set_cmd=<none>)<with_stmt>pytest.raises(NotImplementedError)<block_start>iter(SweepValues(p))<block_end><block_end><def_stmt>test_snapshot c0<block_start><assert_stmt>c0[0].snapshot()<eq>{'parameter':c0.snapshot() 'values':[{'item':0}]}<assert_stmt>c0[0:5:0.3].snapshot()['values']<eq>[{'first':0 'last':4.8 'num':17 'type':'linear'}]<line_sep>sv=c0.sweep(start=2 stop=4 num=5)<assert_stmt>sv.snapshot()['values']<eq>[{'first':2 'last':4 'num':5 'type':'linear'}]<line_sep># mixture of bare items, nested lists, and slices
sv=c0[1 7 3.2 [1 2 3] 6:9:1 -4.5 5.3]<assert_stmt>sv.snapshot()['values']<eq>[{'first':1 'last':5.3 'min':-4.5 'max':8 'num':11 'type':'sequence'}]<assert_stmt>(c0[0]+c0[1]).snapshot()['values']<eq>[{'item':0} {'item':1}]<assert_stmt>(c0[0:3:1]+c0[4 6 9]).snapshot()['values']<eq>[{'first':0 'last':2 'num':3 'type':'linear'} {'first':4 'last':9 'min':4 'max':9 'num':3 'type':'sequence'}]<block_end><def_stmt>test_repr c0<block_start>sv=c0[0]<assert_stmt>repr(sv)<eq>(f'<qcodes.instrument.sweep_values.SweepFixedValues: c0 at {id(sv)}>')<block_end> |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""<import_stmt>platform<line_sep>__is_windows_system=platform.platform().lower().startswith('window')<line_sep>__is_linux_system=platform.platform().lower().startswith('linux')<if_stmt>__is_windows_system<block_start><import_from_stmt>.demo_windows.PlatformWeTest PlatformWeTest<import_from_stmt>.demo_windows.common.AdbTool AdbTool<block_end><elif_stmt>__is_linux_system<block_start><import_from_stmt>.demo_ubuntu16.PlatformWeTest PlatformWeTest<import_from_stmt>.demo_ubuntu16.common.AdbTool AdbTool<block_end><else_stmt><block_start><raise>Exception('system is not support!')<block_end><def_stmt>GetInstance <block_start><return>PlatformWeTest()<block_end> |
<import_stmt>pickle<import_from_stmt>io BytesIO StringIO<import_from_stmt>kombu.utils.div emergency_dump_state<class_stmt>MyStringIO(StringIO)<block_start><def_stmt>close self<block_start><pass><block_end><block_end><class_stmt>MyBytesIO(BytesIO)<block_start><def_stmt>close self<block_start><pass><block_end><block_end><class_stmt>test_emergency_dump_state<block_start><def_stmt>test_dump self stdouts<block_start>fh=MyBytesIO()<line_sep>stderr=StringIO()<line_sep>emergency_dump_state({'foo':'bar'} open_file=<lambda>n m:fh stderr=stderr)<assert_stmt>pickle.loads(fh.getvalue())<eq>{'foo':'bar'}<assert_stmt>stderr.getvalue()<assert_stmt><not>stdouts.stdout.getvalue()<block_end><def_stmt>test_dump_second_strategy self stdouts<block_start>fh=MyStringIO()<line_sep>stderr=StringIO()<def_stmt>raise_something *args **kwargs<block_start><raise>KeyError('foo')<block_end>emergency_dump_state({'foo':'bar'} open_file=<lambda>n m:fh dump=raise_something stderr=stderr )<assert_stmt>'foo'<in>fh.getvalue()<assert_stmt>'bar'<in>fh.getvalue()<assert_stmt>stderr.getvalue()<assert_stmt><not>stdouts.stdout.getvalue()<block_end><block_end> |
<class_stmt>TransportContext(object)<block_start>""" The System.Net.TransportContext class provides additional context about the underlying transport layer. """<def_stmt>GetChannelBinding self kind<block_start>"""
GetChannelBinding(self: TransportContext,kind: ChannelBindingKind) -> ChannelBinding
Retrieves the requested channel binding.
kind: The type of channel binding to retrieve.
Returns: The requested System.Security.Authentication.ExtendedProtection.ChannelBinding,or null if the
channel binding is not supported by the current transport or by the operating system.
"""<line_sep><pass><block_end><def_stmt>GetTlsTokenBindings self<block_start>""" GetTlsTokenBindings(self: TransportContext) -> IEnumerable[TokenBinding] """<line_sep><pass><block_end><block_end> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet input pipeline.
"""<import_stmt>os<import_stmt>pickle<import_stmt>jax<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tensorflow_datasets<as>tfds<line_sep>TRAIN_IMAGES=1281167<line_sep>TEST_IMAGES=50000<line_sep>MEAN_RGB=[0.485<times>255 0.456<times>255 0.406<times>255]<line_sep>STDDEV_RGB=[0.229<times>255 0.224<times>255 0.225<times>255]<def_stmt>normalize_image image<block_start>image<augsub>tf.constant(MEAN_RGB shape=[1 1 3] dtype=image.dtype)<line_sep>image<augdiv>tf.constant(STDDEV_RGB shape=[1 1 3] dtype=image.dtype)<line_sep><return>image<block_end><def_stmt>random_crop image min_object_covered=0.1 aspect_ratio_range=(0.75 1.33) area_range=(0.05 1.0) max_attempts=100 <block_start>"""Randomly crop an input image.
Args:
image: The image to be cropped.
min_object_covered: The minimal percentage of the target object that should
be in the final crop.
aspect_ratio_range: The cropped area of the image must have an aspect
ratio = width / height within this range.
area_range: The cropped area of the image must contain a fraction of the
input image within this range.
max_attempts: Number of attempts at generating a cropped region of the image
of the specified constraints. After max_attempts failures,
the original image is returned.
Returns:
A random crop of the supplied image.
"""<line_sep>bbox=tf.constant([0.0 0.0 1.0 1.0] dtype=tf.float32 shape=[1 1 4])<line_sep>sample_distorted_bounding_box=tf.image.sample_distorted_bounding_box(tf.shape(image) bounding_boxes=bbox min_object_covered=min_object_covered aspect_ratio_range=aspect_ratio_range area_range=area_range max_attempts=max_attempts use_image_if_no_bounding_boxes=<true>)<line_sep>bbox_begin,bbox_size,_=sample_distorted_bounding_box<line_sep>offset_y,offset_x,_=tf.unstack(bbox_begin)<line_sep>target_height,target_width,_=tf.unstack(bbox_size)<line_sep>crop=tf.image.crop_to_bounding_box(image offset_y offset_x target_height target_width)<line_sep><return>crop<block_end><def_stmt>center_crop image image_size crop_padding=32<block_start>"""Crop an image in the center while preserving aspect ratio.
Args:
image: The image to be cropped.
image_size: the desired crop size.
crop_padding: minimal distance of the crop from the edge of the image.
Returns:
The center crop of the provided image.
"""<line_sep>shape=tf.shape(image)<line_sep>image_height=shape[0]<line_sep>image_width=shape[1]<line_sep>padded_center_crop_size=tf.cast(((image_size/(image_size+crop_padding))<times>tf.cast(tf.minimum(image_height image_width) tf.float32)) tf.int32)<line_sep>offset_height=((image_height-padded_center_crop_size)+1)<floordiv>2<line_sep>offset_width=((image_width-padded_center_crop_size)+1)<floordiv>2<line_sep>crop=tf.image.crop_to_bounding_box(image offset_height offset_width padded_center_crop_size padded_center_crop_size)<line_sep><return>crop<block_end><def_stmt>colour_jitter image greyscale_prob=0.0<block_start>"""Colour jitter augmentation.
Args:
image: The image to be augmented
greyscale_prob: probability of greyscale conversion
Returns:
Augmented image
"""<line_sep># Make sure it has 3 channels so random_saturation and random_hue don't
# fail on greyscale images
image=image<times>tf.ones([1 1 3] dtype=image.dtype)<if_stmt>greyscale_prob<g>0.0<block_start><def_stmt>f_grey <block_start><return>tf.image.rgb_to_grayscale(image)<block_end><def_stmt>f_colour <block_start>image_col=tf.image.random_saturation(image 0.7 1.4)<line_sep>image_col=tf.image.random_hue(image_col 0.1)<line_sep><return>image_col<block_end>p=tf.random.uniform([1])<line_sep>image=tf.cond(tf.less(p[0] greyscale_prob) f_grey f_colour)<block_end><else_stmt><block_start>image=tf.image.random_saturation(image 0.7 1.4)<line_sep>image=tf.image.random_hue(image 0.1)<block_end>image=tf.image.random_contrast(image 0.7 1.4)<line_sep>image=tf.image.random_brightness(image 0.4)<line_sep><return>image<block_end><def_stmt>preprocess_train_image image apply_colour_jitter=<false> greyscale_prob=0.0 image_size=224<block_start>"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
apply_colour_jitter: If True, apply colour jitterring.
greyscale_prob: Probability of converting image to greyscale.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""<line_sep>image=random_crop(image)<line_sep>image=tf.image.resize([image] [image_size image_size] method=tf.image.ResizeMethod.BICUBIC)[0]<line_sep># Randomly flip the image horizontally.
image=tf.image.random_flip_left_right(image)<if_stmt>apply_colour_jitter<block_start>image=colour_jitter(image greyscale_prob=greyscale_prob)<block_end>image=normalize_image(image)<line_sep><return>image<block_end><def_stmt>preprocess_eval_image image image_size=224<block_start>"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""<line_sep>image=center_crop(image image_size)<line_sep>image=tf.image.resize([image] [image_size image_size] method=tf.image.ResizeMethod.BICUBIC)[0]<line_sep>image=normalize_image(image)<line_sep><return>image<block_end>_JPEG_ENCODED_FEATURE_DESCRIPTION={'label':tf.io.FixedLenFeature([] tf.int64 default_value=0) 'image':tf.io.FixedLenFeature([] tf.string) 'file_name':tf.io.FixedLenFeature([] tf.string) }<def_stmt>_filter_tfds_by_file_name in_ds subset_filenames<block_start>kv_init=tf.lookup.KeyValueTensorInitializer(np.array(subset_filenames) np.ones((len(subset_filenames) ) dtype=int) key_dtype=tf.string value_dtype=tf.int64)<line_sep>ht=tf.lookup.StaticHashTable(kv_init 0)<def_stmt>pred_fn x<block_start><return>tf.equal(ht.lookup(x['file_name']) 1)<block_end><return>in_ds.filter(pred_fn)<block_end><def_stmt>_deserialize_and_decode_jpeg serialized_sample<block_start>sample=tf.io.parse_single_example(serialized_sample _JPEG_ENCODED_FEATURE_DESCRIPTION)<line_sep>sample['image']=tf.io.decode_jpeg(sample['image'])<line_sep><return>sample<block_end><def_stmt>_deserialize_sample serialized_sample<block_start><return>tf.io.parse_example(serialized_sample _JPEG_ENCODED_FEATURE_DESCRIPTION)<block_end><def_stmt>_decode_jpeg sample<block_start>image=tf.io.decode_jpeg(sample['image'])<line_sep><return>dict(label=sample['label'] file_name=sample['file_name'] image=image)<block_end><def_stmt>deserialize_and_decode_image_dataset ds batch_size<block_start><if_stmt>batch_size<is><not><none><and>batch_size<g>1<block_start><return>ds.batch(batch_size).map(_deserialize_sample num_parallel_calls=tf.data.experimental.AUTOTUNE).unbatch().map(_decode_jpeg num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end><else_stmt><block_start><return>ds.map(_deserialize_and_decode_jpeg num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end><block_end><def_stmt>_load_tfds_imagenet split_name n_total<block_start>"""Load ImageNet from TFDS."""<line_sep>split_size=float(n_total)<floordiv>jax.host_count()<line_sep>start=split_size<times>jax.host_id()<line_sep>end=start+split_size<line_sep>start_index=int(round(start))<line_sep>end_index=int(round(end))<line_sep>split='{}[{}:{}]'.format(split_name start_index end_index)<line_sep><return>tfds.load('imagenet2012:5.*.*' split=split)<block_end><def_stmt>_load_custom_imagenet_split split_path<block_start>"""Load a custom split of the ImageNet dataset."""<if_stmt><not>tf.io.gfile.exists(split_path)<block_start><raise>RuntimeError('Cannot find {}'.format(split_path))<block_end>shard_filenames=tf.io.gfile.listdir(split_path)<line_sep>shard_filenames.sort()<if_stmt>jax.host_count()<g>1<block_start>n_hosts=jax.host_count()<line_sep>host_id=jax.host_id()<line_sep>shard_filenames=[f<for>i,f enumerate(shard_filenames)<if>(i%n_hosts)<eq>host_id]<block_end>files_in_split=[os.path.join(split_path f)<for>f shard_filenames]<line_sep>ds=tf.data.TFRecordDataset(files_in_split buffer_size=128<times>1024<times>1024 num_parallel_reads=len(files_in_split))<line_sep># ds = deserialize_and_decode_image_dataset(ds, batch_size=256)
ds=deserialize_and_decode_image_dataset(ds batch_size=1)<line_sep><return>ds<block_end>_SUP_PATH_PAT=r'{imagenet_subset_dir}/imagenet_{n_sup}_seed{subset_seed}'<line_sep>_VAL_TVSPLIT_PATH_PAT=r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_split.pkl'<line_sep>_VAL_PATH_PAT=r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_val'<line_sep>_VAL_SUP_PATH_PAT=r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_{n_sup}_seed{subset_seed}'<class_stmt>ImageNetDataSource(object)<block_start>"""ImageNet data source.
Attributes:
n_train: number of training samples
n_sup: number of supervised samples
n_val: number of validation samples
n_test: number of test samples
train_semisup_ds: Semi-supervised training dataset
train_unsup_ds: Unsupervised training dataset
train_sup_ds: Supervised training dataset
val_ds: Validation dataset
test_ds: Test dataset
n_classes: Number of classes
"""<def_stmt>__init__ self imagenet_subset_dir n_val n_sup train_batch_size eval_batch_size augment_twice apply_colour_jitter=<false> greyscale_prob=0.0 load_test_set=<true> image_size=224 subset_seed=12345 val_seed=131<block_start><if_stmt>n_val<eq>0# We are using the complete ImageNet training set for traininig
# No samples are being held out for validation
# Draw unsupervised samples from complete training set
<block_start>train_unsup_ds=_load_tfds_imagenet('train' TRAIN_IMAGES)<line_sep>self.n_train=TRAIN_IMAGES<if_stmt>n_sup<eq>-1<or>n_sup<eq>TRAIN_IMAGES# All training samples are supervised
<block_start>train_sup_ds=train_unsup_ds<line_sep>self.n_sup=TRAIN_IMAGES<block_end><else_stmt><block_start>sup_path=_SUP_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir n_sup=n_sup subset_seed=subset_seed)<line_sep>train_sup_ds=_load_custom_imagenet_split(sup_path)<line_sep>self.n_sup=n_sup<block_end>val_ds=<none><line_sep>self.n_val=0<block_end><else_stmt># A validation set has been requested
# Load the pickle file that tells us which file names are train / val
<block_start>tvsplit_path=_VAL_TVSPLIT_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir n_val=n_val val_seed=val_seed)<with_stmt>tf.io.gfile.GFile(tvsplit_path 'rb')<as>f_tvsplit<block_start>tvsplit=pickle.load(f_tvsplit)<block_end>train_fn=tvsplit['train_fn']<line_sep># Filter the dataset to select samples in the training set
trainval_ds=_load_tfds_imagenet('train' TRAIN_IMAGES)<line_sep>train_unsup_ds=_filter_tfds_by_file_name(trainval_ds train_fn)<line_sep>self.n_train=len(train_fn)<line_sep># Load the validation set from a custom dataset
val_path=_VAL_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir n_val=n_val val_seed=val_seed)<line_sep>val_ds=_load_custom_imagenet_split(val_path)<line_sep>self.n_val=n_val<if_stmt>n_sup<eq>-1<or>n_sup<eq>len(train_fn)# All training samples are supervised
<block_start>train_sup_ds=train_unsup_ds<line_sep>self.n_sup=len(train_fn)<block_end><else_stmt><block_start>sup_path=_VAL_SUP_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir n_val=n_val val_seed=val_seed n_sup=n_sup subset_seed=subset_seed)<line_sep>train_sup_ds=_load_custom_imagenet_split(sup_path)<line_sep>self.n_sup=n_sup<block_end><block_end>train_sup_ds=train_sup_ds.repeat()<line_sep>train_sup_ds=train_sup_ds.shuffle(8<times>train_batch_size)<line_sep>train_unsup_ds=train_unsup_ds.repeat()<line_sep>train_unsup_ds=train_unsup_ds.shuffle(8<times>train_batch_size)<line_sep>train_semisup_ds=tf.data.Dataset.zip((train_sup_ds train_unsup_ds))<def_stmt>_augment_sup sup_sample<block_start>"""Augment supervised sample."""<line_sep>sample={'sup_image':preprocess_train_image(sup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) 'sup_label':sup_sample['label'] }<line_sep><return>sample<block_end><def_stmt>_augment_unsup_once unsup_sample<block_start>"""Augment unsupervised sample, single augmentation."""<line_sep>unsup_x0=preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size)<line_sep>sample={'unsup_image0':unsup_x0 'unsup_image1':unsup_x0 }<line_sep><return>sample<block_end><def_stmt>_augment_unsup_twice unsup_sample<block_start>"""Augment unsupervised sample, two augmentations."""<line_sep>sample={'unsup_image0':preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) 'unsup_image1':preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) }<line_sep><return>sample<block_end><def_stmt>_augment_semisup_once sup_sample unsup_sample<block_start>"""Augment semi-supervised sample, single augmentation."""<line_sep>unsup_x0=preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size)<line_sep>semisup_sample={'sup_image':preprocess_train_image(sup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) 'sup_label':sup_sample['label'] 'unsup_image0':unsup_x0 'unsup_image1':unsup_x0 }<line_sep><return>semisup_sample<block_end><def_stmt>_augment_semisup_twice sup_sample unsup_sample<block_start>"""Augment semi-supervised sample, two augmentations."""<line_sep>semisup_sample={'sup_image':preprocess_train_image(sup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) 'sup_label':sup_sample['label'] 'unsup_image0':preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) 'unsup_image1':preprocess_train_image(unsup_sample['image'] apply_colour_jitter=apply_colour_jitter greyscale_prob=greyscale_prob image_size=image_size) }<line_sep><return>semisup_sample<block_end><def_stmt>_process_eval_sample x<block_start>"""Pre-process evaluation sample."""<line_sep>image=preprocess_eval_image(x['image'] image_size=image_size)<line_sep>batch={'image':image 'label':x['label']}<line_sep><return>batch<block_end><if_stmt>augment_twice<block_start>train_semisup_ds=train_semisup_ds.map(_augment_semisup_twice num_parallel_calls=128)<line_sep>train_unsup_only_ds=train_unsup_ds.map(_augment_unsup_twice num_parallel_calls=128)<block_end><else_stmt><block_start>train_semisup_ds=train_semisup_ds.map(_augment_semisup_once num_parallel_calls=128)<line_sep>train_unsup_only_ds=train_unsup_ds.map(_augment_unsup_once num_parallel_calls=128)<block_end>train_sup_only_ds=train_sup_ds.map(_augment_sup num_parallel_calls=128)<line_sep>train_semisup_ds=train_semisup_ds.batch(train_batch_size drop_remainder=<true>)<line_sep>train_unsup_only_ds=train_unsup_only_ds.batch(train_batch_size drop_remainder=<true>)<line_sep>train_sup_only_ds=train_sup_only_ds.batch(train_batch_size drop_remainder=<true>)<line_sep>train_semisup_ds=train_semisup_ds.prefetch(10)<line_sep>train_unsup_only_ds=train_unsup_only_ds.prefetch(10)<line_sep>train_sup_only_ds=train_sup_only_ds.prefetch(10)<line_sep>self.train_semisup_ds=train_semisup_ds<line_sep>self.train_unsup_ds=train_unsup_only_ds<line_sep>self.train_sup_ds=train_sup_only_ds<line_sep>#
# Validation set
#
<if_stmt>n_val<g>0<block_start>val_ds=val_ds.cache()<line_sep>val_ds=val_ds.map(_process_eval_sample num_parallel_calls=128)<line_sep>val_ds=val_ds.batch(eval_batch_size)<line_sep>val_ds=val_ds.repeat()<line_sep>val_ds=val_ds.prefetch(10)<line_sep>self.val_ds=val_ds<block_end><else_stmt><block_start>self.val_ds=<none><block_end><if_stmt>load_test_set#
# Test set
#
<block_start>test_ds=_load_tfds_imagenet('validation' TEST_IMAGES)<line_sep>test_ds=test_ds.cache()<line_sep>test_ds=test_ds.map(_process_eval_sample num_parallel_calls=128)<line_sep>test_ds=test_ds.batch(eval_batch_size)<line_sep>test_ds=test_ds.repeat()<line_sep>test_ds=test_ds.prefetch(10)<line_sep>self.test_ds=test_ds<line_sep>self.n_test=TEST_IMAGES<block_end><else_stmt><block_start>self.test_ds=<none><line_sep>self.n_test=0<block_end>self.n_classes=1000<block_end><block_end> |
<import_stmt>time<import_stmt>urllib<import_stmt>random<import_stmt>logging<import_stmt>requests<import_stmt>datetime<import_from_stmt>os sys path makedirs<import_from_stmt>PyQt5.QtCore Qt QTimer QDateTime<import_from_stmt>PyQt5.QtWidgets QWidget QPushButton QVBoxLayout QLabel QComboBox QDateTimeEdit QCheckBox QLineEdit QRadioButton<line_sep>root_path=path.dirname(path.dirname(path.abspath(__file__)))<import_from_stmt>StockAnalysisSystem.core.Utility.common *<import_from_stmt>StockAnalysisSystem.core.Utility.ui_utility *<import_from_stmt>StockAnalysisSystem.core.Utility.task_queue *<import_from_stmt>StockAnalysisSystem.core.Utility.time_utility *<import_from_stmt>StockAnalysisSystem.ui.Utility.ui_context UiContext<import_from_stmt>StockAnalysisSystem.interface.interface SasInterface<as>sasIF<import_from_stmt>StockAnalysisSystem.core.Utility.securities_selector SecuritiesSelector<line_sep># 20200217: It doesn't work anymore - Move to recycled
# -------------------------------------------- class AnnouncementDownloader --------------------------------------------
# -----------------------------------------------------------
# Get code from : https://github.com/gaodechen/cninfo_process
# -----------------------------------------------------------
User_Agent=["Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)" "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)" "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)" "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)" "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1" "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0"]<line_sep>headers={'Accept':'application/json, text/javascript, */*; q=0.01' "Content-Type":"application/x-www-form-urlencoded; charset=UTF-8" "Accept-Encoding":"gzip, deflate" "Accept-Language":"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5" 'Host':'www.cninfo.com.cn' 'Origin':'http://www.cninfo.com.cn' 'Referer':'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice' 'X-Requested-With':'XMLHttpRequest'}<class_stmt>AnnouncementDownloader<block_start><def_stmt>__init__ self<block_start><pass><block_end>@staticmethod<def_stmt>format_query_time_range time_range:any<arrow>str<block_start><if_stmt>time_range<is><none><block_start><return>AnnouncementDownloader.format_query_time_range((years_ago(3) now()))<block_end><if_stmt>isinstance(time_range str)<block_start><return>time_range<block_end><if_stmt>isinstance(time_range datetime.datetime)<block_start><return>AnnouncementDownloader.format_query_time_range((time_range time_range))<block_end><if_stmt><not>isinstance(time_range (tuple list))<block_start><return>AnnouncementDownloader.format_query_time_range(<none>)<block_end><if_stmt>len(time_range)<eq>0<block_start><return>AnnouncementDownloader.format_query_time_range(<none>)<block_end><if_stmt>len(time_range)<eq>1<block_start><return>AnnouncementDownloader.format_query_time_range((time_range[0] time_range[0]))<block_end>since=time_range[0]<line_sep>until=time_range[1]<line_sep><return>'%s+~+%s'%(since.strftime('%Y-%m-%d') until.strftime('%Y-%m-%d'))<block_end>@staticmethod<def_stmt>get_szse_annual_report_pages page:int stock:str time_range:any=<none><block_start>query_path='http://www.cninfo.com.cn/new/hisAnnouncement/query'<line_sep>headers['User-Agent']=random.choice(User_Agent)# 定义User_Agent
time_range=AnnouncementDownloader.format_query_time_range(time_range)<line_sep>query={'pageNum':page # 页码
'pageSize':30 'tabName':'fulltext' 'column':'szse' # 深交所
'stock':stock 'searchkey':'' 'secid':'' 'plate':'sz' 'category':'category_ndbg_szsh;' # 年度报告
'trade':'' 'seDate':time_range }<line_sep>namelist=requests.post(query_path headers=headers data=query)<line_sep><return>namelist.json()['announcements']<block_end>@staticmethod<def_stmt>get_sse_annual_report_pages page:int stock:str time_range:any=<none><block_start>query_path='http://www.cninfo.com.cn/new/hisAnnouncement/query'<line_sep>headers['User-Agent']=random.choice(User_Agent)# 定义User_Agent
time_range=AnnouncementDownloader.format_query_time_range(time_range)<line_sep>query={'pageNum':page # 页码
'pageSize':30 'tabName':'fulltext' 'column':'sse' 'stock':stock 'searchkey':'' 'secid':'' 'plate':'sh' 'category':'category_ndbg_szsh;' # 年度报告
'trade':'' 'seDate':time_range}<line_sep>namelist=requests.post(query_path headers=headers data=query)<line_sep><return>namelist.json()['announcements']<block_end># json中的年度报告信息
@staticmethod<def_stmt>execute_download report_pages include_filter:[str]<or><none>=<none> exclude_filter:[str]<or><none>=<none> quit_flag:[bool]=<none><block_start><if_stmt>report_pages<is><none><block_start><return><block_end># download_headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5',
# 'Host': 'www.cninfo.com.cn',
# 'Origin': 'http://www.cninfo.com.cn'
# }
# download_headers['User-Agent'] = random.choice(User_Agent)
download_path='http://static.cninfo.com.cn/'<for_stmt>page report_pages<block_start><if_stmt>quit_flag<is><not><none><and>quit_flag[0]<block_start><break><block_end>title=page['announcementTitle']<line_sep>allowed=AnnouncementDownloader.check_filter_allowed(title include_filter exclude_filter)<if_stmt><not>allowed<block_start>print(' %s -> Ignore'%title)<line_sep><continue><block_end>print(' %s -> Download'%title)<line_sep>download=download_path+page["adjunctUrl"]<line_sep>file_name=AnnouncementDownloader.format_download_path(page)<if_stmt>'*'<in>file_name<block_start>file_name=file_name.replace('*' '')<block_end>time.sleep(random.random()<times>5)<line_sep>r=requests.get(download)<line_sep>f=open(file_name "wb")<line_sep>f.write(r.content)<line_sep>f.close()<block_end><block_end>@staticmethod<def_stmt>format_download_path page<arrow>str<block_start>file_name=page['secName']+'_'+page['announcementTitle']+'.pdf'<line_sep>file_path=path.join(root_path 'Download' 'report' page['secCode'])<line_sep>makedirs(file_path exist_ok=<true>)<line_sep><return>path.join(file_path file_name)<block_end>@staticmethod<def_stmt>check_filter_allowed text:str include_filter:[str]<or><none> exclude_filter:[str]<or><none><arrow>bool<block_start>allowed=<false><if_stmt>include_filter<is><not><none><and>len(include_filter)<g>0<block_start><for_stmt>inc include_filter<block_start><if_stmt>inc<in>text<block_start>allowed=<true><line_sep><break><block_end><block_end><block_end><else_stmt><block_start>allowed=<true><block_end><if_stmt>exclude_filter<is><not><none><and>len(exclude_filter)<g>0<block_start><for_stmt>exc exclude_filter<block_start><if_stmt>exc<in>text<block_start>allowed=<false><line_sep><break><block_end><block_end><block_end><return>allowed<block_end># ----------------------------------------- Interface -----------------------------------------
@staticmethod<def_stmt>download_annual_report stock_identity:str<or>list time_range:any=<none> quit_flag:[bool]=<none><block_start><if_stmt><not>isinstance(stock_identity (list tuple))<block_start>stock_identity=[stock_identity]<block_end><for_stmt>identity stock_identity<block_start>s,f=AnnouncementDownloader.__detect_stock_code_and_page_entry(identity)<line_sep>AnnouncementDownloader.__download_report_for_securities(s f time_range quit_flag)<block_end><block_end>@staticmethod<def_stmt>__detect_stock_code_and_page_entry stock_identity:str<arrow>tuple<block_start><if_stmt>stock_identity.endswith('.SSE')<block_start>s=stock_identity[:-4]<line_sep>f=AnnouncementDownloader.get_sse_annual_report_pages<block_end><elif_stmt>stock_identity.endswith('.SZSE')<block_start>s=stock_identity[:-5]<line_sep>f=AnnouncementDownloader.get_szse_annual_report_pages<block_end><else_stmt><block_start>s=stock_identity<line_sep>exchange=get_stock_exchange(stock_identity)<if_stmt>exchange<eq>'SSE'<block_start>f=AnnouncementDownloader.get_sse_annual_report_pages<block_end><elif_stmt>exchange<eq>'SZSE'<block_start>f=AnnouncementDownloader.get_szse_annual_report_pages<block_end><else_stmt><block_start>f=AnnouncementDownloader.get_sse_annual_report_pages<block_end><block_end><return>s f<block_end>@staticmethod<def_stmt>__download_report_for_securities s f time_range quit_flag<block_start>page=1<while_stmt>page<l>1000# Max limit
<block_start><if_stmt>quit_flag<is><not><none><and>quit_flag[0]<block_start><break><block_end><try_stmt><block_start>print('Downloading report for %s, page %s'%(s page))<line_sep>page_data=f(page s time_range)<if_stmt>len(page_data)<eq>0<block_start><break><block_end>AnnouncementDownloader.execute_download(page_data include_filter=['年年度报告'] exclude_filter=['确认意见' '摘要' '已取消'] quit_flag=quit_flag)<if_stmt>len(page_data)<ne>30<block_start><break><block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>print('Maybe page reaches end.')<line_sep><break><block_end><finally_stmt><block_start>page<augadd>1<block_end><block_end><block_end><block_end># ----------------------------------------------------------------------------------------------------------------------
ALL_STOCK_TEXT='所有'<line_sep>DEFAULT_INFO='''
本扩展程序功能:从巨朝网下载上市公司公开报告
1.下载代码来自:https://github.com/gaodechen/cninfo_process
2.如果选择“自定义”,请自行设置关键字以根据报告标题进行过滤
3.默认下载路径为当前目录下Download/report/
4.下载任务会占用系统工作队列,和数据更新功能共享资源
- 请在“View->任务管理”中管理下载任务
- 在前一个任务没完成时,也可以添加下一个任务
5.如果选择时间范围过大或股票过多,可能会被网站BAN,切勿贪多
'''<line_sep>DOWNLOAD_ALL_TIPS='''
接下来的操作会为所有股票下载年报
这会花费很长的时间以及占用很大的磁盘空间
********并存在被网站BAN的可能性********
如非特别需要,建议选择个别股票分别下载
-------------是否继续此操作-------------
'''<line_sep># ----------------------------------- UpdateTask -----------------------------------
<class_stmt>AnnouncementDownloadTask(TaskQueue.Task)<block_start>REPORT_TYPE_NONE=0<line_sep>REPORT_TYPE_ANNUAL=1<def_stmt>__init__ self<block_start>super(AnnouncementDownloadTask self).__init__('AnnouncementDownloadTask')<line_sep>self.__quit_flag=[<false>]<line_sep># Modules
self.sas_if:sasIF=<none><line_sep>self.task_manager:TaskQueue=<none><line_sep># self.data_utility = None
# Parameters
self.securities=''<line_sep>self.period_since=<none><line_sep>self.period_until=<none><line_sep>self.filter_include=[]<line_sep>self.filter_exclude=[]<line_sep>self.report_type=AnnouncementDownloadTask.REPORT_TYPE_ANNUAL<block_end><def_stmt>run self<block_start><try_stmt><block_start>self.__execute_update()<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>print('Continue...')<block_end><finally_stmt><block_start>print('Finished')<block_end><block_end><def_stmt>quit self<block_start>self.__quit_flag[0]=<true><block_end><def_stmt>identity self<arrow>str<block_start><return>'Download Report: '+self.securities<block_end><def_stmt>__execute_update self<block_start><if_stmt>self.securities<eq>ALL_STOCK_TEXT<block_start>stock_list=self.sas_if.sas_get_stock_info_list()<for_stmt>stock_identity,stock_name stock_list<block_start><if_stmt>self.__quit_flag<is><not><none><and>self.__quit_flag[0]<block_start><break><block_end># self.__build_sub_update(stock_identity)
AnnouncementDownloader.download_annual_report(stock_identity (self.period_since self.period_until) self.__quit_flag)<block_end><block_end><elif_stmt>self.report_type<eq>AnnouncementDownloadTask.REPORT_TYPE_ANNUAL<block_start>AnnouncementDownloader.download_annual_report(self.securities (self.period_since self.period_until) self.__quit_flag)<block_end><else_stmt><block_start><pass><block_end><block_end># def __build_sub_update(self, securities: str):
# task = AnnouncementDownloadTask()
# task.securities = securities
# task.period_since = self.period_since
# task.period_until = self.period_until
# task.filter_include = self.filter_include
# task.filter_exclude = self.filter_exclude
# task.report_type = self.report_type
# task.task_manager = self.task_manager
# self.task_manager.append_task(task)
<block_end># ----------------------------- AnnouncementDownloaderUi -----------------------------
<class_stmt>AnnouncementDownloaderUi(QWidget)<block_start><def_stmt>__init__ self sas_if:sasIF task_manager<block_start>super(AnnouncementDownloaderUi self).__init__()<line_sep># ---------------- ext var ----------------
self.__sas_if=sas_if<line_sep># self.__data_center = self.__data_hub.get_data_center() if self.__data_hub is not None else None
# self.__data_utility = self.__data_hub.get_data_utility() if self.__data_hub is not None else None
self.__task_manager=task_manager<line_sep>self.__translate=QtCore.QCoreApplication.translate<line_sep># Timer for update stock list
self.__timer=QTimer()<line_sep>self.__timer.setInterval(1000)<line_sep>self.__timer.timeout.connect(self.on_timer)<line_sep>self.__timer.start()<line_sep># Ui component
self.__combo_name=SecuritiesSelector(self.__sas_if self)<line_sep>self.__radio_annual_report=QRadioButton('年报')<line_sep>self.__radio_customize_filter=QRadioButton('自定义')<line_sep>self.__line_filter_include=QLineEdit()<line_sep>self.__line_filter_exclude=QLineEdit()<line_sep>self.__button_download=QPushButton('确定')<line_sep>self.__datetime_since=QDateTimeEdit(QDateTime.currentDateTime().addYears(-3))<line_sep>self.__datetime_until=QDateTimeEdit(QDateTime.currentDateTime())<line_sep>self.init_ui()<block_end># ---------------------------------------------------- UI Init -----------------------------------------------------
<def_stmt>init_ui self<block_start>self.__layout_control()<line_sep>self.__config_control()<block_end><def_stmt>__layout_control self<block_start>main_layout=QVBoxLayout()<line_sep>self.setLayout(main_layout)<line_sep>main_layout.addLayout(horizon_layout([QLabel('股票代码') self.__combo_name] [1 10]))<line_sep>main_layout.addLayout(horizon_layout([QLabel('报告起始') self.__datetime_since] [1 10]))<line_sep>main_layout.addLayout(horizon_layout([QLabel('报告截止') self.__datetime_until] [1 10]))<line_sep>main_layout.addLayout(horizon_layout([QLabel('报告类型') self.__radio_annual_report self.__radio_customize_filter] [1 5 5]))<line_sep>main_layout.addLayout(horizon_layout([QLabel('包含词条(以,分隔)') self.__line_filter_include] [1 10]))<line_sep>main_layout.addLayout(horizon_layout([QLabel('排除词条(以,分隔)') self.__line_filter_exclude] [1 10]))<line_sep>main_layout.addWidget(QLabel(DEFAULT_INFO))<line_sep>main_layout.addWidget(self.__button_download)<block_end><def_stmt>__config_control self# self.__combo_name.setEditable(True)
# self.__combo_name.addItem('所有')
# self.__combo_name.addItem('股票列表载入中')
<block_start>self.__radio_annual_report.setChecked(<true>)<line_sep>self.__line_filter_include.setEnabled(<false>)<line_sep>self.__line_filter_exclude.setEnabled(<false>)<line_sep>self.__radio_customize_filter.setEnabled(<false>)<line_sep>self.__radio_annual_report.clicked.connect(self.on_radio_report_type)<line_sep>self.__radio_customize_filter.clicked.connect(self.on_radio_report_type)<line_sep>self.__button_download.clicked.connect(self.on_button_download)<block_end><def_stmt>on_timer self<block_start><if_stmt>self.__combo_name.count()<g>1<block_start>self.__combo_name.insertItem(0 ALL_STOCK_TEXT)<line_sep>self.__combo_name.setCurrentIndex(0)<line_sep>self.__timer.stop()<block_end># # Check stock list ready and update combobox
# if self.__data_utility is not None:
# if self.__data_utility.stock_cache_ready():
# self.__combo_name.clear()
# self.__combo_name.addItem(ALL_STOCK_TEXT)
# stock_list = self.__data_utility.get_stock_list()
# for stock_identity, stock_name in stock_list:
# self.__combo_name.addItem(stock_identity + ' | ' + stock_name, stock_identity)
<block_end><def_stmt>on_radio_report_type self<block_start><if_stmt>self.__radio_annual_report.isChecked()<block_start>self.__line_filter_include.setEnabled(<false>)<line_sep>self.__line_filter_exclude.setEnabled(<false>)<block_end><else_stmt><block_start>self.__line_filter_include.setEnabled(<true>)<line_sep>self.__line_filter_exclude.setEnabled(<true>)<block_end><block_end><def_stmt>on_button_download self# input_securities = self.__combo_name.currentText()
# if '|' in input_securities:
# input_securities = input_securities.split('|')[0].strip()
<block_start>input_securities=self.__combo_name.get_input_securities()<if_stmt>input_securities<eq>ALL_STOCK_TEXT<block_start><if_stmt>self.__sas_if<is><none><block_start>QMessageBox.information(self QtCore.QCoreApplication.translate('main' '提示') QtCore.QCoreApplication.translate('main' '无法获取股票列表') QMessageBox.Yes QMessageBox.No)<line_sep><return><block_end>reply=QMessageBox.question(self QtCore.QCoreApplication.translate('main' '操作确认') QtCore.QCoreApplication.translate('main' DOWNLOAD_ALL_TIPS) QMessageBox.Yes|QMessageBox.No QMessageBox.No)<if_stmt>reply<ne>QMessageBox.Yes<block_start><return><block_end><block_end>self.__build_download_task(input_securities)<block_end><def_stmt>__build_download_task self securities:str<block_start>task=AnnouncementDownloadTask()<line_sep>task.securities=securities<line_sep>task.period_since=self.__datetime_since.dateTime().toPyDateTime()<line_sep>task.period_until=self.__datetime_until.dateTime().toPyDateTime()<line_sep>task.filter_include=self.__line_filter_include.text().split(',')<line_sep>task.filter_exclude=self.__line_filter_exclude.text().split(',')<line_sep>task.report_type=AnnouncementDownloadTask.REPORT_TYPE_ANNUAL<if>self.__radio_annual_report.isChecked()<else>AnnouncementDownloadTask.REPORT_TYPE_NONE<line_sep>task.task_manager=self.__task_manager<line_sep>task.sas_if=self.__sas_if<line_sep># task.data_utility = self.__data_utility
<if_stmt>self.__task_manager<is><not><none><block_start>self.__task_manager.append_task(task)<block_end><else_stmt><block_start>task.run()<block_end><block_end><block_end># ----------------------------------------------------------------------------------------------------------------------
<def_stmt>plugin_prob <arrow>dict<block_start><return>{'plugin_id':'efa60977-65e9-4ecf-9271-7c6e629da399' 'plugin_name':'ReportDownloader' 'plugin_version':'0.0.0.1' 'tags':['Announcement' 'Report' 'Finance Report' 'Annual Report' 'Sleepy'] }<block_end><def_stmt>plugin_adapt method:str<arrow>bool<block_start><return>method<in>['widget']<block_end><def_stmt>plugin_capacities <arrow>list<block_start><return>['widget']<block_end># ----------------------------------------------------------------------------------------------------------------------
sasInterface=<none><def_stmt>init sas_if<arrow>bool<block_start><try_stmt><block_start><global>sasInterface<line_sep>sasInterface=sas_if<block_end><except_stmt>Exception<as>e<block_start><pass><block_end><finally_stmt><block_start><pass><block_end><return><true><block_end><def_stmt>widget parent:QWidget **kwargs<arrow>(QWidget dict)<block_start>ui_context:UiContext=kwargs.get('ui_context' <none>)<line_sep>task_manager=<none><if>ui_context<is><none><else>ui_context.get_task_queue()<line_sep><return>AnnouncementDownloaderUi(sasInterface task_manager) {'name':'年报下载' 'show':<false>}<block_end># ----------------------------------------------------------------------------------------------------------------------
<def_stmt>main <block_start>app=QApplication(sys.argv)<line_sep>dlg=WrapperQDialog(AnnouncementDownloaderUi(<none> <none>))<line_sep>dlg.exec()<block_end># ----------------------------------------------------------------------------------------------------------------------
<def_stmt>exception_hook type value tback# log the exception here
<block_start>print('Exception hook triggered.')<line_sep>print(type)<line_sep>print(value)<line_sep>print(tback)<line_sep># then call the default handler
sys.__excepthook__(type value tback)<block_end><if_stmt>__name__<eq>"__main__"<block_start>sys.excepthook=exception_hook<try_stmt><block_start>main()<block_end><except_stmt>Exception<as>e<block_start>print('Error =>' e)<line_sep>print('Error =>' traceback.format_exc())<line_sep>exit()<block_end><finally_stmt><block_start><pass><block_end><block_end> |
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
<import_from_stmt>.base_query_builder BaseQueryBuilder<import_from_stmt>.aggregation_query AggregationQuery<import_from_stmt>..base.type_conversions StringArrayType<class_stmt>AggregationQueryBuilder(BaseQueryBuilder)<block_start>"""
A builder for creating aggregation queries. This class should not be used directly. Instead, use one of the derived
classes such as `pygw.query.vector.VectorAggregationQueryBuilder`.
"""<def_stmt>__init__ self java_ref<block_start>super().__init__(java_ref)<block_end><def_stmt>count self *type_names<block_start>"""
This is a convenience method to set the count aggregation if no type names are given it is
assumed to count every type.
Args:
type_names (str): The type names to count results.
Returns:
This query builder.
"""<if_stmt>type_names<is><none><block_start>self._java_ref.count()<block_end><else_stmt><block_start>self._java_ref.count(StringArrayType().to_java(type_names))<block_end><return>self<block_end><def_stmt>aggregate self type_name j_aggregation<block_start>"""
Provide the Java Aggregation function and the type name to apply the aggregation on.
Args:
type_name (str): The type name to aggregate.
j_aggregation (Aggregation): The Java aggregation function to
Returns:
This query builder.
"""<line_sep><return>self._java_ref.aggregate(type_name j_aggregation)<block_end><def_stmt>build self<block_start>"""
Builds the configured aggregation query.
Returns:
The final constructed `pygw.query.AggregationQuery`.
"""<line_sep><return>AggregationQuery(self._java_ref.build() self._java_transformer)<block_end><block_end> |
<import_stmt>unittest<import_from_stmt>laika.helpers get_constellation get_prn_from_nmea_id get_nmea_id_from_prn NMEA_ID_RANGES<line_sep>SBAS_DATA=[['S01' 33] ['S02' 34] ['S10' 42] ['S22' 54] ['S23' 55] ['S32' 64] ['S33' 120] ['S64' 151] ['S65' 152] ['S71' 158]]<line_sep>MAIN_CONSTELLATIONS=[['G01' 1] ['G10' 10] ['G32' 32] ['R01' 65] ['R10' 74] ['R23' 87] ['R24' 88] ['R25' 89] ['R32' 96] ['E01' 301] ['E02' 302] ['E36' 336] ['C01' 201] ['C02' 202] ['C29' 229] ['J01' 193] ['J04' 196]]<class_stmt>TestConstellationPRN(unittest.TestCase)<block_start><def_stmt>test_constellation_from_valid_prn self<block_start>data=[['G01' 'GPS'] ['G10' 'GPS'] ['G32' 'GPS'] ['R01' 'GLONASS'] ['R10' 'GLONASS'] ['R23' 'GLONASS'] ['R24' 'GLONASS'] ['R25' 'GLONASS'] ['R32' 'GLONASS'] ['E01' 'GALILEO'] ['E02' 'GALILEO'] ['E36' 'GALILEO'] ['C01' 'BEIDOU'] ['C02' 'BEIDOU'] ['C29' 'BEIDOU'] ['J01' 'QZNSS'] ['J04' 'QZNSS'] ['S01' 'SBAS'] ['I01' 'IRNSS']]<for_stmt>prn,expected_constellation data<block_start>constellation=get_constellation(prn)<line_sep>self.assertEqual(constellation expected_constellation)<block_end><block_end><def_stmt>test_constellation_from_prn_with_invalid_identifier self<block_start>prn='?01'<line_sep>self.assertWarns(UserWarning get_constellation prn)<line_sep>self.assertIsNone(get_constellation(prn))<block_end><def_stmt>test_constellation_from_prn_outside_range self<block_start>prn='G99'<line_sep>constellation=get_constellation(prn)<line_sep>self.assertEqual(constellation 'GPS')<block_end><def_stmt>test_prn_from_nmea_id_for_main_constellations self<block_start>data=MAIN_CONSTELLATIONS<for_stmt>expected_prn,nmea_id data<block_start>prn=get_prn_from_nmea_id(nmea_id)<line_sep>self.assertEqual(prn expected_prn)<block_end><block_end><def_stmt>test_prn_from_nmea_id_for_SBAS self<block_start>'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''<line_sep>data=SBAS_DATA<for_stmt>expected_prn,nmea_id data<block_start>prn=get_prn_from_nmea_id(nmea_id)<line_sep>self.assertEqual(prn expected_prn)<block_end><block_end><def_stmt>test_prn_from_invalid_nmea_id self<block_start>data=[(-1 "?-1") (0 "?0") (100 "?100") (160 "?160") (190 "?190") (300 "?300")]<for_stmt>nmea_id,expected_prn data<block_start>self.assertWarns(UserWarning get_prn_from_nmea_id nmea_id)<line_sep>self.assertEqual(get_prn_from_nmea_id(nmea_id) expected_prn)<block_end>self.assertRaises(TypeError get_prn_from_nmea_id <none>)<line_sep>self.assertRaises(TypeError get_prn_from_nmea_id '1')<block_end><def_stmt>test_nmea_id_from_prn_for_main_constellations self<block_start>data=MAIN_CONSTELLATIONS<for_stmt>prn,expected_nmea_id data<block_start>nmea_id=get_nmea_id_from_prn(prn)<line_sep>self.assertEqual(nmea_id expected_nmea_id)<block_end><block_end><def_stmt>test_nmea_id_from_prn_for_SBAS self<block_start>'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''<line_sep>data=SBAS_DATA<for_stmt>prn,expected_nmea_id data<block_start>nmea_id=get_nmea_id_from_prn(prn)<line_sep>self.assertEqual(nmea_id expected_nmea_id)<block_end><block_end><def_stmt>test_nmea_id_from_invalid_prn self# Special unknown constellation - valid number
<block_start>self.assertEqual(1 get_nmea_id_from_prn('?01'))<line_sep>self.assertEqual(-1 get_nmea_id_from_prn('?-1'))<line_sep># Special unknown constellation - invalid number
self.assertRaises(ValueError get_nmea_id_from_prn '???')<line_sep># Constellation with unknwown identifier
self.assertRaises(NotImplementedError get_nmea_id_from_prn 'X01')<line_sep># Valid constellation - invalid number
self.assertRaises(ValueError get_nmea_id_from_prn 'G00')<line_sep>self.assertRaises(ValueError get_nmea_id_from_prn 'GAA')<line_sep>self.assertRaises(NotImplementedError get_nmea_id_from_prn 'G33')<line_sep>self.assertRaises(NotImplementedError get_nmea_id_from_prn 'C99')<line_sep>self.assertRaises(NotImplementedError get_nmea_id_from_prn 'R99')<line_sep>self.assertRaises(NotImplementedError get_nmea_id_from_prn 'J99')<line_sep># None
self.assertRaises(TypeError get_nmea_id_from_prn <none>)<block_end><def_stmt>test_nmea_ranges_are_valid self<block_start>last_end=0<for_stmt>entry NMEA_ID_RANGES<block_start>self.assertIn('range' entry)<line_sep>self.assertIn('constellation' entry)<line_sep>range_=entry['range']<line_sep>self.assertEqual(len(range_) 2)<line_sep>start,end=range_<line_sep>self.assertLessEqual(start end)<line_sep>self.assertLess(last_end start)<line_sep>last_end=end<block_end><block_end><block_end> |
"""
Fragments of project version mutations
"""<line_sep>PROJECT_VERSION_FRAGMENT='''
content
id
name
projectId
'''<line_sep> |
# Copyright 2020 Toyota Research Institute. All rights reserved.
# Adapted from Pytorch-Lightning
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/loggers/wandb.py
<import_from_stmt>argparse Namespace<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_stmt>torch.nn<as>nn<import_stmt>wandb<import_from_stmt>wandb.wandb_run Run<import_from_stmt>packnet_sfm.utils.depth viz_inv_depth<import_from_stmt>packnet_sfm.utils.logging prepare_dataset_prefix<import_from_stmt>packnet_sfm.utils.types is_dict is_tensor<class_stmt>WandbLogger<block_start>"""
Wandb logger class to monitor training.
Parameters
----------
name : str
Run name (if empty, uses a fancy Wandb name, highly recommended)
dir : str
Folder where wandb information is stored
id : str
ID for the run
anonymous : bool
Anonymous mode
version : str
Run version
project : str
Wandb project where the run will live
tags : list of str
List of tags to append to the run
log_model : bool
Log the model to wandb or not
experiment : wandb
Wandb experiment
entity : str
Wandb entity
"""<def_stmt>__init__ self name=<none> dir=<none> id=<none> anonymous=<false> version=<none> project=<none> entity=<none> tags=<none> log_model=<false> experiment=<none><block_start>super().__init__()<line_sep>self._name=name<line_sep>self._dir=dir<line_sep>self._anonymous='allow'<if>anonymous<else><none><line_sep>self._id=version<or>id<line_sep>self._tags=tags<line_sep>self._project=project<line_sep>self._entity=entity<line_sep>self._log_model=log_model<line_sep>self._experiment=experiment<if>experiment<else>self.create_experiment()<line_sep>self._metrics=OrderedDict()<block_end><def_stmt>__getstate__ self<block_start>"""Get the current logger state"""<line_sep>state=self.__dict__.copy()<line_sep>state['_id']=self._experiment.id<if>self._experiment<is><not><none><else><none><line_sep>state['_experiment']=<none><line_sep><return>state<block_end><def_stmt>create_experiment self<block_start>"""Creates and returns a new experiment"""<line_sep>experiment=wandb.init(name=self._name dir=self._dir project=self._project anonymous=self._anonymous reinit=<true> id=self._id resume='allow' tags=self._tags entity=self._entity)<line_sep>wandb.run.save()<line_sep><return>experiment<block_end><def_stmt>watch self model:nn.Module log:str='gradients' log_freq:int=100<block_start>"""Watch training parameters."""<line_sep>self.experiment.watch(model log=log log_freq=log_freq)<block_end>@property<def_stmt>experiment self<arrow>Run<block_start>"""Returns the experiment (creates a new if it doesn't exist)."""<if_stmt>self._experiment<is><none><block_start>self._experiment=self.create_experiment()<block_end><return>self._experiment<block_end>@property<def_stmt>version self<arrow>str<block_start>"""Returns experiment version."""<line_sep><return>self._experiment.id<if>self._experiment<else><none><block_end>@property<def_stmt>name self<arrow>str<block_start>"""Returns experiment name."""<line_sep>name=self._experiment.project_name()<if>self._experiment<else><none><line_sep><return>name<block_end>@property<def_stmt>run_name self<arrow>str<block_start>"""Returns run name."""<line_sep><return>wandb.run.name<if>self._experiment<else><none><block_end>@property<def_stmt>run_url self<arrow>str<block_start>"""Returns run URL."""<line_sep><return>'https://app.wandb.ai/{}/{}/runs/{}'.format(wandb.run.entity wandb.run.project wandb.run.id)<if>self._experiment<else><none><block_end>@staticmethod<def_stmt>_convert_params params<block_start><if_stmt>isinstance(params Namespace)<block_start>params=vars(params)<block_end><if_stmt>params<is><none><block_start>params={}<block_end><return>params<block_end><def_stmt>log_config self params<block_start>"""Logs model configuration."""<line_sep>params=self._convert_params(params)<line_sep>self.experiment.config.update(params allow_val_change=<true>)<block_end><def_stmt>log_metrics self metrics<block_start>"""Logs training metrics."""<line_sep>self._metrics.update(metrics)<if_stmt>'global_step'<in>metrics<block_start>self.experiment.log(self._metrics)<line_sep>self._metrics.clear()<block_end><block_end><def_stmt>log_images self func mode batch output args dataset world_size config<block_start>"""
Adds images to metrics for later logging.
Parameters
----------
func : Function
Function used to process the image before logging
mode : str {"train", "val"}
Training stage where the images come from (serve as prefix for logging)
batch : dict
Data batch
output : dict
Model output
args : tuple
Step arguments
dataset : CfgNode
Dataset configuration
world_size : int
Number of GPUs, used to get logging samples at consistent intervals
config : CfgNode
Model configuration
"""<line_sep>dataset_idx=0<if>len(args)<eq>1<else>args[1]<line_sep>prefix=prepare_dataset_prefix(config dataset_idx)<line_sep>interval=len(dataset[dataset_idx])<floordiv>world_size<floordiv>config.num_logs<if_stmt>args[0]%interval<eq>0<block_start>prefix_idx='{}-{}-{}'.format(mode prefix batch['idx'][0].item())<line_sep>func(prefix_idx batch output)<block_end><block_end># Log depth images
<def_stmt>log_depth self *args **kwargs<block_start>"""Helper function used to log images relevant for depth estimation"""<def_stmt>log prefix_idx batch output<block_start>self._metrics.update(log_rgb('rgb' prefix_idx batch))<line_sep>self._metrics.update(log_inv_depth('inv_depth' prefix_idx output))<if_stmt>'depth'<in>batch<block_start>self._metrics.update(log_depth('depth' prefix_idx batch))<block_end><block_end>self.log_images(log *args **kwargs)<block_end><block_end><def_stmt>log_rgb key prefix batch i=0<block_start>"""
Converts an RGB image from a batch for logging
Parameters
----------
key : str
Key from data containing the image
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the image
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""<line_sep>rgb=batch[key]<if>is_dict(batch)<else>batch<line_sep><return>prep_image(prefix key rgb[i])<block_end><def_stmt>log_depth key prefix batch i=0<block_start>"""
Converts a depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""<line_sep>depth=batch[key]<if>is_dict(batch)<else>batch<line_sep>inv_depth=1./depth[i]<line_sep>inv_depth[depth[i]<eq>0]=0<line_sep><return>prep_image(prefix key viz_inv_depth(inv_depth filter_zeros=<true>))<block_end><def_stmt>log_inv_depth key prefix batch i=0<block_start>"""
Converts an inverse depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the inverse depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the inverse depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""<line_sep>inv_depth=batch[key]<if>is_dict(batch)<else>batch<line_sep><return>prep_image(prefix key viz_inv_depth(inv_depth[i]))<block_end><def_stmt>prep_image prefix key image<block_start>"""
Prepare image for wandb logging
Parameters
----------
prefix : str
Prefix added to the key for logging
key : str
Key from data containing the inverse depth map
image : torch.Tensor [3,H,W]
Image to be logged
Returns
-------
output : dict
Dictionary with key and value for logging
"""<if_stmt>is_tensor(image)<block_start>image=image.detach().permute(1 2 0).cpu().numpy()<block_end>prefix_key='{}-{}'.format(prefix key)<line_sep><return>{prefix_key:wandb.Image(image caption=key)}<block_end> |
<import_from_future_stmt> with_statement<import_from_stmt>contextlib contextmanager<import_from_stmt>test TemplateTest eq_ raises template_base mock<import_stmt>os<import_from_stmt>mako.cmd cmdline<class_stmt>CmdTest(TemplateTest)<block_start>@contextmanager<def_stmt>_capture_output_fixture self stream="stdout"<block_start><with_stmt>mock.patch("sys.%s"%stream)<as>stdout<block_start><yield>stdout<block_end><block_end><def_stmt>test_stdin_success self<block_start><with_stmt>self._capture_output_fixture()<as>stdout<block_start><with_stmt>mock.patch("sys.stdin" mock.Mock(read=mock.Mock(return_value="hello world ${x}")))<block_start>cmdline(["--var" "x=5" "-"])<block_end><block_end>eq_(stdout.write.mock_calls[0][1][0] "hello world 5")<block_end><def_stmt>test_stdin_syntax_err self<block_start><with_stmt>mock.patch("sys.stdin" mock.Mock(read=mock.Mock(return_value="${x")))<block_start><with_stmt>self._capture_output_fixture("stderr")<as>stderr<block_start><with_stmt>raises(SystemExit)<block_start>cmdline(["--var" "x=5" "-"])<block_end><block_end><assert_stmt>"SyntaxException: Expected"<in>stderr.write.mock_calls[0][1][0]<assert_stmt>"Traceback"<in>stderr.write.mock_calls[0][1][0]<block_end><block_end><def_stmt>test_stdin_rt_err self<block_start><with_stmt>mock.patch("sys.stdin" mock.Mock(read=mock.Mock(return_value="${q}")))<block_start><with_stmt>self._capture_output_fixture("stderr")<as>stderr<block_start><with_stmt>raises(SystemExit)<block_start>cmdline(["--var" "x=5" "-"])<block_end><block_end><assert_stmt>"NameError: Undefined"<in>stderr.write.mock_calls[0][1][0]<assert_stmt>"Traceback"<in>stderr.write.mock_calls[0][1][0]<block_end><block_end><def_stmt>test_file_success self<block_start><with_stmt>self._capture_output_fixture()<as>stdout<block_start>cmdline(["--var" "x=5" os.path.join(template_base "cmd_good.mako")])<block_end>eq_(stdout.write.mock_calls[0][1][0] "hello world 5")<block_end><def_stmt>test_file_syntax_err self<block_start><with_stmt>self._capture_output_fixture("stderr")<as>stderr<block_start><with_stmt>raises(SystemExit)<block_start>cmdline(["--var" "x=5" os.path.join(template_base "cmd_syntax.mako")])<block_end><block_end><assert_stmt>"SyntaxException: Expected"<in>stderr.write.mock_calls[0][1][0]<assert_stmt>"Traceback"<in>stderr.write.mock_calls[0][1][0]<block_end><def_stmt>test_file_rt_err self<block_start><with_stmt>self._capture_output_fixture("stderr")<as>stderr<block_start><with_stmt>raises(SystemExit)<block_start>cmdline(["--var" "x=5" os.path.join(template_base "cmd_runtime.mako")])<block_end><block_end><assert_stmt>"NameError: Undefined"<in>stderr.write.mock_calls[0][1][0]<assert_stmt>"Traceback"<in>stderr.write.mock_calls[0][1][0]<block_end><def_stmt>test_file_notfound self<block_start><with_stmt>raises(SystemExit "error: can't find fake.lalala")<block_start>cmdline(["--var" "x=5" "fake.lalala"])<block_end><block_end><block_end> |
<import_from_stmt>LightPipes *<import_stmt>matplotlib.pyplot<as>plt<def_stmt>TheExample N<block_start>fig=plt.figure(figsize=(11 9.5))<line_sep>ax1=fig.add_subplot(221)<line_sep>ax2=fig.add_subplot(222)<line_sep>ax3=fig.add_subplot(223)<line_sep>ax4=fig.add_subplot(224)<line_sep>labda=1000<times>nm<line_sep>size=10<times>mm<line_sep>f1=10<times>m<line_sep>f2=1.11111111<times>m<line_sep>z=1.0<times>m<line_sep>w=5<times>mm<line_sep>F=Begin(size labda N)<line_sep>F=RectAperture(w w 0 0 0 F)<line_sep>#1) Using Lens and Fresnel:
F1=Lens(z 0 0 F)<line_sep>F1=Fresnel(z F1)<line_sep>phi1=Phase(F1)<line_sep>phi1=PhaseUnwrap(phi1)<line_sep>I1=Intensity(0 F1)<line_sep>x1=[]<for_stmt>i range(N)<block_start>x1.append((-size/2+i<times>size/N)/mm)<block_end>#2) Using Lens + LensFresnel and Convert:
F2=Lens(f1 0 0 F)<line_sep>F2=LensFresnel(f2 z F2)<line_sep>F2=Convert(F2)<line_sep>phi2=Phase(F2)<line_sep>phi2=PhaseUnwrap(phi2)<line_sep>I2=Intensity(0 F2)<line_sep>x2=[]<line_sep>newsize=size/10<for_stmt>i range(N)<block_start>x2.append((-newsize/2+i<times>newsize/N)/mm)<block_end>ax1.plot(x1 phi1[int(N/2)] 'k--' label='Lens + Fresnel')<line_sep>ax1.plot(x2 phi2[int(N/2)] 'k' label='LensFresnel + Convert')<line_sep>ax1.set_xlim(-newsize/2/mm newsize/2/mm)<line_sep>ax1.set_ylim(-2 4)<line_sep>ax1.set_xlabel('x [mm]')<line_sep>ax1.set_ylabel('phase [rad]')<line_sep>ax1.set_title('phase, N = %d'%N)<line_sep>legend=ax1.legend(loc='upper center' shadow=<true>)<line_sep>ax2.plot(x1 I1[int(N/2)] 'k--' label='Lens+Fresnel')<line_sep>ax2.plot(x2 I2[int(N/2)] 'k' label='LensFresnel + Convert')<line_sep>ax2.set_xlim(-newsize/2/mm newsize/2/mm)<line_sep>ax2.set_ylim(0 1000)<line_sep>ax2.set_xlabel('x [mm]')<line_sep>ax2.set_ylabel('Intensity [a.u.]')<line_sep>ax2.set_title('intensity, N = %d'%N)<line_sep>legend=ax2.legend(loc='upper center' shadow=<true>)<line_sep>ax3.imshow(I1)<line_sep>ax3.axis('off')<line_sep>ax3.set_title('Intensity, Lens + Fresnel, N = %d'%N)<line_sep>ax3.set_xlim(int(N/2)-N/20 int(N/2)+N/20)<line_sep>ax3.set_ylim(int(N/2)-N/20 int(N/2)+N/20)<line_sep>ax4.imshow(I2)<line_sep>ax4.axis('off')<line_sep>ax4.set_title('Intensity, LensFresnel + Convert, N = %d'%N)<line_sep>plt.show()<block_end>TheExample(100)#100 x 100 grid
TheExample(1000)#1000 x 1000 grid
|
<import_stmt>numpy<as>np<import_stmt>itertools<as>it<import_from_stmt>funkyyak grad<import_from_stmt>copy copy<def_stmt>nd f *args<block_start>unary_f=<lambda>x:f(*x)<line_sep><return>unary_nd(unary_f args)<block_end><def_stmt>unary_nd f x<block_start>eps=1e-4<if_stmt>isinstance(x np.ndarray)<block_start>nd_grad=np.zeros(x.shape)<for_stmt>dims it.product(*map(range x.shape))<block_start>nd_grad[dims]=unary_nd(indexed_function(f x dims) x[dims])<block_end><return>nd_grad<block_end><elif_stmt>isinstance(x tuple)<block_start><return>tuple([unary_nd(indexed_function(f list(x) i) x[i])<for>i range(len(x))])<block_end><elif_stmt>isinstance(x dict)<block_start><return>{k:unary_nd(indexed_function(f x k) v)<for>k,v x.iteritems()}<block_end><elif_stmt>isinstance(x list)<block_start><return>[unary_nd(indexed_function(f x i) v)<for>i,v enumerate(x)]<block_end><else_stmt><block_start><return>(f(x+eps/2)-f(x-eps/2))/eps<block_end><block_end><def_stmt>indexed_function fun arg index<block_start>local_arg=copy(arg)<def_stmt>partial_function x<block_start>local_arg[index]=x<line_sep><return>fun(local_arg)<block_end><return>partial_function<block_end><def_stmt>eq_class dtype<block_start><return>float<if>dtype<eq>np.float64<else>dtype<block_end><def_stmt>check_equivalent A B<block_start><assert_stmt>eq_class(type(A))<eq>eq_class(type(B)) "Types are: {0} and {1}".format(eq_class(type(A)) eq_class(type(B)))<if_stmt>isinstance(A (tuple list))<block_start><for_stmt>a,b zip(A B)<block_start>check_equivalent(a b)<block_end><block_end><elif_stmt>isinstance(A dict)<block_start><assert_stmt>len(A)<eq>len(B)<for_stmt>k A<block_start>check_equivalent(A[k] B[k])<block_end><block_end><else_stmt><block_start><if_stmt>isinstance(A np.ndarray)<block_start><assert_stmt>A.shape<eq>B.shape "Shapes are {0} and {1}".format(A.shape B.shape)<block_end><assert_stmt>np.allclose(A B rtol=1e-4 atol=1e-6) "Diffs are: {0}".format(A-B)<block_end><block_end><def_stmt>check_grads fun *args<block_start>A=nd(fun *args)<line_sep>B=tuple([grad(fun i)(*args)<for>i range(len(args))])<line_sep>check_equivalent(A B)<block_end><def_stmt>to_scalar x<block_start><return>np.sum(np.sin(x))<block_end> |
__version__="2.9.10"<line_sep> |
<import_stmt>numpy<as>np<import_stmt>numpy.random<as>npr<import_stmt>cv2<import_from_stmt>core.config cfg<import_stmt>utils.blob<as>blob_utils<def_stmt>get_minibatch_blob_names is_training=<true><block_start>"""Return blob names in the order in which they are read by the data loader.
"""<line_sep># data blob: holds a batch of N images, each with 3 channels
blob_names=['data' 'rois' 'labels']<line_sep><return>blob_names<block_end><def_stmt>get_minibatch roidb num_classes<block_start>"""Given a roidb, construct a minibatch sampled from it."""<line_sep># We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs={k:[]<for>k get_minibatch_blob_names()}<line_sep># Get the input image blob
im_blob,im_scales=_get_image_blob(roidb)<assert_stmt>len(im_scales)<eq>1 "Single batch only"<assert_stmt>len(roidb)<eq>1 "Single batch only"<line_sep>blobs['data']=im_blob<line_sep>rois_blob=np.zeros((0 5) dtype=np.float32)<line_sep>labels_blob=np.zeros((0 num_classes) dtype=np.float32)<line_sep>num_images=len(roidb)<for_stmt>im_i range(num_images)<block_start>labels,im_rois=_sample_rois(roidb[im_i] num_classes)<line_sep># Add to RoIs blob
rois=_project_im_rois(im_rois im_scales[im_i])<line_sep>batch_ind=im_i<times>np.ones((rois.shape[0] 1))<line_sep>rois_blob_this_image=np.hstack((batch_ind rois))<if_stmt>cfg.DEDUP_BOXES<g>0<block_start>v=np.array([1 1e3 1e6 1e9 1e12])<line_sep>hashes=np.round(rois_blob_this_image<times>cfg.DEDUP_BOXES).dot(v)<line_sep>_,index,inv_index=np.unique(hashes return_index=<true> return_inverse=<true>)<line_sep>rois_blob_this_image=rois_blob_this_image[index :]<block_end>rois_blob=np.vstack((rois_blob rois_blob_this_image))<line_sep># Add to labels blob
labels_blob=np.vstack((labels_blob labels))<block_end>blobs['rois']=rois_blob<line_sep>blobs['labels']=labels_blob<line_sep><return>blobs <true><block_end><def_stmt>_sample_rois roidb num_classes<block_start>"""Generate a random sample of RoIs"""<line_sep>labels=roidb['gt_classes']<line_sep>rois=roidb['boxes']<if_stmt>cfg.TRAIN.BATCH_SIZE_PER_IM<g>0<block_start>batch_size=cfg.TRAIN.BATCH_SIZE_PER_IM<block_end><else_stmt><block_start>batch_size=np.inf<block_end><if_stmt>batch_size<l>rois.shape[0]<block_start>rois_inds=npr.permutation(rois.shape[0])[:batch_size]<line_sep>rois=rois[rois_inds :]<block_end><return>labels.reshape(1 -1) rois<block_end><def_stmt>_get_image_blob roidb<block_start>"""Builds an input blob from the images in the roidb at the specified
scales.
"""<line_sep>num_images=len(roidb)<line_sep># Sample random scales to use for each image in this batch
scale_inds=np.random.randint(0 high=len(cfg.TRAIN.SCALES) size=num_images)<line_sep>processed_ims=[]<line_sep>im_scales=[]<for_stmt>i range(num_images)<block_start>im=cv2.imread(roidb[i]['image'])<assert_stmt>im<is><not><none> 'Failed to read image \'{}\''.format(roidb[i]['image'])<line_sep># If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
<if_stmt>roidb[i]['flipped']<block_start>im=im[: ::-1 :]<block_end>target_size=cfg.TRAIN.SCALES[scale_inds[i]]<line_sep>im,im_scale=blob_utils.prep_im_for_blob(im cfg.PIXEL_MEANS [target_size] cfg.TRAIN.MAX_SIZE)<line_sep>im_scales.append(im_scale[0])<line_sep>processed_ims.append(im[0])<block_end># Create a blob to hold the input images [n, c, h, w]
blob=blob_utils.im_list_to_blob(processed_ims)<line_sep><return>blob im_scales<block_end><def_stmt>_project_im_rois im_rois im_scale_factor<block_start>"""Project image RoIs into the rescaled training image."""<line_sep>rois=im_rois<times>im_scale_factor<line_sep><return>rois<block_end> |
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>transformers.models.gptj.modeling_gptj GPTJBlock<import_from_stmt>parallelformers.policies.base Layer Policy<import_from_stmt>parallelformers.utils AllReduceLinear<class_stmt>GPTJPolicy(Policy)<block_start>@staticmethod<def_stmt>replace_arguments config world_size<block_start><return>{# 1. reduce hidden size
"attn.embed_dim":config.hidden_size<floordiv>world_size # 2. reduce number of heads
"attn.num_attention_heads":config.n_head<floordiv>world_size }<block_end>@staticmethod<def_stmt>attn_qkv <block_start><return>[Layer(weight="attn.q_proj.weight") Layer(weight="attn.k_proj.weight") Layer(weight="attn.v_proj.weight") ]<block_end>@staticmethod<def_stmt>attn_out <block_start><return>[Layer(weight="attn.out_proj.weight" replace=AllReduceLinear ) ]<block_end>@staticmethod<def_stmt>mlp_in <block_start><return>[Layer(weight="mlp.fc_in.weight" bias="mlp.fc_in.bias" ) ]<block_end>@staticmethod<def_stmt>mlp_out <block_start><return>[Layer(weight="mlp.fc_out.weight" bias="mlp.fc_out.bias" replace=AllReduceLinear ) ]<block_end>@staticmethod<def_stmt>original_layer_class <block_start><return>GPTJBlock<block_end><block_end> |
'''OpenGL extension ATI.text_fragment_shader
This module customises the behaviour of the
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
Python-friendly API
Overview (from the spec)
The ATI_fragment_shader extension exposes a powerful fragment
processing model that provides a very general means of expressing
fragment color blending and dependent texture address modification.
The processing is termed a fragment shader or fragment program and
is specifed using a register-based model in which there are fixed
numbers of instructions, texture lookups, read/write registers, and
constants.
ATI_fragment_shader provides a unified instruction set
for operating on address or color data and eliminates the
distinction between the two. That extension provides all the
interfaces necessary to fully expose this programmable fragment
processor in GL.
ATI_text_fragment_shader is a redefinition of the
ATI_fragment_shader functionality, using a slightly different
interface. The intent of creating ATI_text_fragment_shader is to
take a step towards treating fragment programs similar to other
programmable parts of the GL rendering pipeline, specifically
vertex programs. This new interface is intended to appear
similar to the ARB_vertex_program API, within the limits of the
feature set exposed by the original ATI_fragment_shader extension.
The most significant differences between the two extensions are:
(1) ATI_fragment_shader provides a procedural function call
interface to specify the fragment program, whereas
ATI_text_fragment_shader uses a textual string to specify
the program. The fundamental syntax and constructs of the
program "language" remain the same.
(2) The program object managment portions of the interface,
namely the routines used to create, bind, and delete program
objects and set program constants are managed
using the framework defined by ARB_vertex_program.
(3) ATI_fragment_shader refers to the description of the
programmable fragment processing as a "fragment shader".
In keeping with the desire to treat all programmable parts
of the pipeline consistently, ATI_text_fragment_shader refers
to these as "fragment programs". The name of the extension is
left as ATI_text_fragment_shader instead of
ATI_text_fragment_program in order to indicate the underlying
similarity between the API's of the two extensions, and to
differentiate it from any other potential extensions that
may be able to move even further in the direction of treating
fragment programs as just another programmable area of the
GL pipeline.
Although ATI_fragment_shader was originally conceived as a
device-independent extension that would expose the capabilities of
future generations of hardware, changing trends in programmable
hardware have affected the lifespan of this extension. For this
reason you will now find a fixed set of features and resources
exposed, and the queries to determine this set have been deprecated
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
most of these resource limits are fixed by the text grammar and
the queries have been removed altogether.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
'''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GL _types _glgets<import_from_stmt>OpenGL.raw.GL.ATI.text_fragment_shader *<import_from_stmt>OpenGL.raw.GL.ATI.text_fragment_shader _EXTENSION_NAME<def_stmt>glInitTextFragmentShaderATI <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
|
# vim:fileencoding=utf-8:noet
<import_from_future_stmt> unicode_literals division absolute_import print_function <import_stmt>os<import_from_stmt>threading RLock<import_from_stmt>powerline.lib.path realpath<class_stmt>StatFileWatcher(object)<block_start><def_stmt>__init__ self<block_start>self.watches={}<line_sep>self.lock=RLock()<block_end><def_stmt>watch self path<block_start>path=realpath(path)<with_stmt>self.lock<block_start>self.watches[path]=os.path.getmtime(path)<block_end><block_end><def_stmt>unwatch self path<block_start>path=realpath(path)<with_stmt>self.lock<block_start>self.watches.pop(path <none>)<block_end><block_end><def_stmt>is_watching self path<block_start><with_stmt>self.lock<block_start><return>realpath(path)<in>self.watches<block_end><block_end><def_stmt>__call__ self path<block_start>path=realpath(path)<with_stmt>self.lock<block_start><if_stmt>path<not><in>self.watches<block_start>self.watches[path]=os.path.getmtime(path)<line_sep><return><true><block_end>mtime=os.path.getmtime(path)<if_stmt>mtime<ne>self.watches[path]<block_start>self.watches[path]=mtime<line_sep><return><true><block_end><return><false><block_end><block_end><def_stmt>close self<block_start><with_stmt>self.lock<block_start>self.watches.clear()<block_end><block_end><block_end> |
<import_stmt>braintree<import_from_stmt>braintree.resource Resource<class_stmt>ApplePayCard(Resource)<block_start>"""
A class representing Braintree Apple Pay card objects.
"""<class_stmt>CardType(object)<block_start>"""
Contants representing the type of the credit card. Available types are:
* Braintree.ApplePayCard.AmEx
* Braintree.ApplePayCard.MasterCard
* Braintree.ApplePayCard.Visa
"""<line_sep>AmEx="Apple Pay - American Express"<line_sep>MasterCard="Apple Pay - MasterCard"<line_sep>Visa="Apple Pay - Visa"<block_end><def_stmt>__init__ self gateway attributes<block_start>Resource.__init__(self gateway attributes)<if_stmt>hasattr(self 'expired')<block_start>self.is_expired=self.expired<block_end><if_stmt>"subscriptions"<in>attributes<block_start>self.subscriptions=[braintree.subscription.Subscription(gateway subscription)<for>subscription self.subscriptions]<block_end><block_end>@property<def_stmt>expiration_date self<block_start><return>self.expiration_month+"/"+self.expiration_year<block_end><block_end> |
# Generated by Django 3.0.7 on 2020-10-14 07:46
<import_stmt>django.utils.timezone<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("posthog" "0086_team_session_recording_opt_in") ]<line_sep>operations=[migrations.AlterField(model_name="annotation" name="created_at" field=models.DateTimeField(default=django.utils.timezone.now null=<true>) ) ]<block_end> |
"""Run control side-effect handler."""<import_stmt>pytest<import_from_stmt>decoy Decoy<import_from_stmt>opentrons.protocol_engine.state StateStore<import_from_stmt>opentrons.protocol_engine.actions ActionDispatcher PauseAction<import_from_stmt>opentrons.protocol_engine.execution.run_control RunControlHandler<import_from_stmt>opentrons.protocol_engine.state EngineConfigs<line_sep>@pytest.fixture<def_stmt>state_store decoy:Decoy<arrow>StateStore<block_start>"""Get a mocked out StateStore."""<line_sep><return>decoy.mock(cls=StateStore)<block_end>@pytest.fixture<def_stmt>action_dispatcher decoy:Decoy<arrow>ActionDispatcher<block_start>"""Get a mocked out ActionDispatcher."""<line_sep><return>decoy.mock(cls=ActionDispatcher)<block_end>@pytest.fixture<def_stmt>subject state_store:StateStore action_dispatcher:ActionDispatcher <arrow>RunControlHandler<block_start>"""Create a RunControlHandler with its dependencies mocked out."""<line_sep><return>RunControlHandler(state_store=state_store action_dispatcher=action_dispatcher )<block_end><async_keyword><def_stmt>test_pause decoy:Decoy state_store:StateStore action_dispatcher:ActionDispatcher subject:RunControlHandler <arrow><none><block_start>"""It should be able to execute a pause."""<line_sep>decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=<false>))<line_sep><await>subject.pause()<line_sep>decoy.verify(action_dispatcher.dispatch(PauseAction()) <await>state_store.wait_for(condition=state_store.commands.get_is_running) )<block_end><async_keyword><def_stmt>test_pause_analysis decoy:Decoy state_store:StateStore action_dispatcher:ActionDispatcher subject:RunControlHandler <arrow><none><block_start>"""It should no op during a protocol analysis."""<line_sep>decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=<true>))<line_sep><await>subject.pause()<line_sep>decoy.verify(action_dispatcher.dispatch(PauseAction()) times=0)<block_end> |
<import_stmt>unittest<import_stmt>shutil<import_stmt>tempfile<import_stmt>numpy<as>np<line_sep># import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>pmlearn.exceptions NotFittedError<import_from_stmt>pmlearn.mixture DirichletProcessMixture<class_stmt>DirichletProcessMixtureTestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.num_truncate=3<line_sep>self.num_components=3<line_sep>self.num_pred=1<line_sep>self.num_training_samples=100<line_sep>self.pi=np.array([0.35 0.4 0.25])<line_sep>self.means=np.array([0 5 10])<line_sep>self.sigmas=np.array([0.5 0.5 1.0])<line_sep>self.components=np.random.randint(0 self.num_components self.num_training_samples)<line_sep>X=np.random.normal(loc=self.means[self.components] scale=self.sigmas[self.components])<line_sep>X.shape=(self.num_training_samples 1)<line_sep>self.X_train,self.X_test=train_test_split(X test_size=0.3)<line_sep>self.test_DPMM=DirichletProcessMixture()<line_sep>self.test_nuts_DPMM=DirichletProcessMixture()<line_sep>self.test_dir=tempfile.mkdtemp()<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.test_dir)<block_end><block_end># class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
<class_stmt>DirichletProcessMixturePredictTestCase(DirichletProcessMixtureTestCase)# def test_predict_returns_predictions(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds = self.test_DPMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_DPMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
<block_start><def_stmt>test_predict_raises_error_if_not_fit self<block_start>print('')<with_stmt>self.assertRaises(NotFittedError)<as>no_fit_error<block_start>test_DPMM=DirichletProcessMixture()<line_sep>test_DPMM.predict(self.X_train)<block_end>expected='Run fit on the model before predict.'<line_sep>self.assertEqual(str(no_fit_error.exception) expected)<block_end><block_end># class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
|
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<def_stmt>conv3x3x3 in_planes out_planes stride# 3x3x3 convolution with padding
<block_start><return>nn.Conv3d(in_planes out_planes kernel_size=3 stride=stride padding=1)<block_end><def_stmt>upconv3x3x3 in_planes out_planes stride<block_start><return>nn.ConvTranspose3d(in_planes out_planes kernel_size=3 stride=1 padding=1 output_padding=1)<block_end><def_stmt>conv_block_3d in_dim out_dim activation<block_start><return>nn.Sequential(nn.Conv3d(in_dim out_dim kernel_size=3 stride=1 padding=1) nn.BatchNorm3d(out_dim) activation )<block_end><def_stmt>conv_trans_block_3d in_dim out_dim activation stride=2<block_start><return>nn.Sequential(nn.ConvTranspose3d(in_dim out_dim kernel_size=3 stride=stride padding=1 output_padding=1) nn.BatchNorm3d(out_dim) activation )<block_end><def_stmt>max_pooling_3d <block_start><return>nn.MaxPool3d(kernel_size=2 stride=2 padding=0)<block_end><def_stmt>conv_block_2_3d in_dim out_dim activation stride=1<block_start><return>nn.Sequential(conv_block_3d(in_dim out_dim activation) nn.Conv3d(out_dim out_dim kernel_size=3 stride=stride padding=1) nn.BatchNorm3d(out_dim) )<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>tests.test_utils run_track_tests<import_from_stmt>mirdata annotations<import_from_stmt>mirdata.datasets tonas<line_sep>TEST_DATA_HOME="tests/resources/mir_datasets/tonas"<def_stmt>test_track <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>track=dataset.track(default_trackid)<line_sep>expected_attributes={"singer":"<NAME>" "style":"Debla" "title":"<NAME>" "tuning_frequency":451.0654725341684 "f0_path":"tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.f0.Corrected" "notes_path":"tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.notes.Corrected" "audio_path":"tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.wav" "track_id":"01-D_AMairena" }<line_sep>expected_property_types={"f0":annotations.F0Data "f0_automatic":annotations.F0Data "f0_corrected":annotations.F0Data "notes":annotations.NoteData "audio":tuple "singer":str "style":str "title":str "tuning_frequency":float }<line_sep>run_track_tests(track expected_attributes expected_property_types)<block_end><def_stmt>test_to_jams <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>track=dataset.track(default_trackid)<line_sep>jam=track.to_jams()<line_sep># Validate cante100 jam schema
<assert_stmt>jam.validate()<line_sep># Validate melody
f0=jam.search(namespace="pitch_contour")[0]["data"]<assert_stmt>[note.time<for>note f0]<eq>[0.197 0.209 0.221 0.232]<assert_stmt>[note.duration<for>note f0]<eq>[0.0 0.0 0.0 0.0]<assert_stmt>[note.value<for>note f0]<eq>[{"index":0 "frequency":0.0 "voiced":<false>} {"index":0 "frequency":379.299 "voiced":<true>} {"index":0 "frequency":379.299 "voiced":<true>} {"index":0 "frequency":379.299 "voiced":<true>} ]<line_sep>print([note.confidence<for>note f0])<assert_stmt>[note.confidence<for>note f0]<eq>[3.09e-06 2.86e-06 7.15e-06 1.545e-05]<line_sep># Validate note transciption
notes=jam.search(namespace="note_hz")[0]["data"]<assert_stmt>[note.time<for>note notes]<eq>[0.216667 0.65 2.183333 2.566667 ]<assert_stmt>[note.duration<for>note notes]<eq>[0.433333 1.016667 0.3833329999999999 0.3333330000000001 ]<assert_stmt>[note.value<for>note notes]<eq>[388.8382625732775 411.9597888711769 388.8382625732775 411.9597888711769 ]<assert_stmt>[note.confidence<for>note notes]<eq>[<none> <none> <none> <none>]<block_end><def_stmt>test_load_melody <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>track=dataset.track(default_trackid)<line_sep>f0_path=track.f0_path<line_sep>f0_data_corrected=tonas.load_f0(f0_path <true>)<line_sep>f0_data_automatic=tonas.load_f0(f0_path <false>)<line_sep># check types
<assert_stmt>type(f0_data_corrected)<eq>annotations.F0Data<assert_stmt>type(f0_data_corrected.times)<is>np.ndarray<assert_stmt>type(f0_data_corrected.frequencies)<is>np.ndarray<assert_stmt>type(f0_data_corrected.voicing)<is>np.ndarray<assert_stmt>type(f0_data_corrected._confidence)<is>np.ndarray<assert_stmt>type(f0_data_automatic)<eq>annotations.F0Data<assert_stmt>type(f0_data_automatic.times)<is>np.ndarray<assert_stmt>type(f0_data_automatic.frequencies)<is>np.ndarray<assert_stmt>type(f0_data_corrected.voicing)<is>np.ndarray<assert_stmt>type(f0_data_automatic._confidence)<is>np.ndarray<line_sep># check values
<assert_stmt>np.array_equal(f0_data_corrected.times np.array([0.197 0.209 0.221 0.232]) )<assert_stmt>np.array_equal(f0_data_corrected.frequencies np.array([0.000 379.299 379.299 379.299]))<assert_stmt>np.array_equal(f0_data_corrected.voicing np.array([0.0 1.0 1.0 1.0]) )<assert_stmt>np.array_equal(f0_data_corrected._confidence np.array([3.090e-06 0.00000286 0.00000715 0.00001545]) )<line_sep># check values
<assert_stmt>np.array_equal(f0_data_automatic.times np.array([0.197 0.209 0.221 0.232]) )<assert_stmt>np.array_equal(f0_data_automatic.frequencies np.array([0.000 0.000 143.918 143.918 ]) )<assert_stmt>np.array_equal(f0_data_automatic.voicing np.array([0.0 0.0 1.0 1.0]) )<assert_stmt>np.array_equal(f0_data_automatic._confidence np.array([3.090e-06 2.860e-06 0.00000715 0.00001545]) )<block_end><def_stmt>test_load_notes <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>track=dataset.track(default_trackid)<line_sep>notes_path=track.notes_path<line_sep>notes_data=tonas.load_notes(notes_path)<line_sep>tuning_frequency=tonas._load_tuning_frequency(notes_path)<line_sep># check types
<assert_stmt>type(notes_data)<eq>annotations.NoteData<assert_stmt>type(notes_data.intervals)<is>np.ndarray<assert_stmt>type(notes_data.pitches)<is>np.ndarray<assert_stmt>type(notes_data.confidence)<is>np.ndarray<assert_stmt>type(tuning_frequency)<is>float<line_sep># check tuning frequency
<assert_stmt>tuning_frequency<eq>451.0654725341684<line_sep># check values
<assert_stmt>np.array_equal(notes_data.intervals[: 0] np.array([0.216667 0.65 2.183333 2.566667]))<assert_stmt>np.array_equal(notes_data.intervals[: 1] np.array([0.65 1.666667 2.566666 2.9]))<assert_stmt>np.array_equal(notes_data.pitches np.array([388.8382625732775 411.9597888711769 388.8382625732775 411.9597888711769]) )<assert_stmt>np.array_equal(notes_data.confidence np.array([0.018007 0.010794 0.00698 0.03265 ]) )<block_end><def_stmt>test_load_audio <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>track=dataset.track(default_trackid)<line_sep>audio_path=track.audio_path<line_sep>audio,sr=tonas.load_audio(audio_path)<assert_stmt>sr<eq>44100<assert_stmt>type(audio)<is>np.ndarray<block_end><def_stmt>test_metadata <block_start>default_trackid="01-D_AMairena"<line_sep>dataset=tonas.Dataset(TEST_DATA_HOME)<line_sep>metadata=dataset._metadata<assert_stmt>metadata[default_trackid]<eq>{"title":"En el barrio de Triana" "style":"Debla" "singer":"<NAME>" }<block_end> |
"""Configuration defaults and loading functions.
Pyleus will look for configuration files in the following file paths in order
of increasing precedence. The latter configuration overrides the previous one.
#. /etc/pyleus.conf
#. ~/.config/pyleus.conf
#. ~/.pyleus.conf
You can always specify a configuration file when running any pyleus CLI command
as following:
``$ pyleus -c /path/to/config_file CMD``
This will override previous configurations.
Configuration file example
--------------------------
The following file contains all options you can configure for all pyleus
invocations.
.. code-block:: ini
[storm]
# path to Storm executable (pyleus will automatically look in PATH)
storm_cmd_path: /usr/share/storm/bin/storm
# optional: use -n option of pyleus CLI instead
nimbus_host: 10.11.12.13
# optional: use -p option of pyleus CLI instead
nimbus_port: 6628
# java options to pass to Storm CLI
jvm_opts: -Djava.io.tmpdir=/home/myuser/tmp
[build]
# PyPI server to use during the build of your topologies
pypi_index_url: http://pypi.ninjacorp.com/simple/
# always use system-site-packages for pyleus virtualenvs (default: false)
system_site_packages: true
# list of packages to always include in your topologies
include_packages: foo bar<4.0 baz==0.1
"""<import_from_future_stmt> absolute_import<import_stmt>collections<import_stmt>os<import_from_stmt>pyleus BASE_JAR_PATH<import_from_stmt>pyleus.utils expand_path<import_from_stmt>pyleus.exception ConfigurationError<import_from_stmt>pyleus.compat configparser<line_sep># Configuration files paths in order of increasing precedence
# Please keep in sync with module docstring
CONFIG_FILES_PATH=["/etc/pyleus.conf" "~/.config/pyleus.conf" "~/.pyleus.conf"]<line_sep>Configuration=collections.namedtuple("Configuration" "base_jar config_file debug func include_packages output_jar \
pypi_index_url nimbus_host nimbus_port storm_cmd_path \
system_site_packages topology_path topology_jar topology_name verbose \
wait_time jvm_opts")<line_sep>"""Namedtuple containing all pyleus configuration values."""<line_sep>DEFAULTS=Configuration(base_jar=BASE_JAR_PATH config_file=<none> debug=<false> func=<none> include_packages=<none> output_jar=<none> pypi_index_url=<none> nimbus_host=<none> nimbus_port=<none> storm_cmd_path=<none> system_site_packages=<false> topology_path="pyleus_topology.yaml" topology_jar=<none> topology_name=<none> verbose=<false> wait_time=<none> jvm_opts=<none> )<def_stmt>_validate_config_file config_file<block_start>"""Ensure that config_file exists and is a file."""<if_stmt><not>os.path.exists(config_file)<block_start><raise>ConfigurationError("Specified configuration file not"<concat>" found: {0}".format(config_file))<block_end><if_stmt><not>os.path.isfile(config_file)<block_start><raise>ConfigurationError("Specified configuration file is not"<concat>" a file: {0}".format(config_file))<block_end><block_end><def_stmt>update_configuration config update_dict<block_start>"""Update configuration with new values passed as dictionary.
:return: new configuration ``namedtuple``
"""<line_sep>tmp=config._asdict()<line_sep>tmp.update(update_dict)<line_sep><return>Configuration(**tmp)<block_end><def_stmt>load_configuration cmd_line_file<block_start>"""Load configurations from the more generic to the
more specific configuration file. The latter configurations
override the previous one.
If a file is specified from command line, it is considered
the most specific.
:return: configuration ``namedtuple``
"""<line_sep>config_files_hierarchy=[expand_path(c)<for>c CONFIG_FILES_PATH]<if_stmt>cmd_line_file<is><not><none><block_start>_validate_config_file(cmd_line_file)<line_sep>config_files_hierarchy.append(cmd_line_file)<block_end>config=configparser.SafeConfigParser()<line_sep>config.read(config_files_hierarchy)<line_sep>configs=update_configuration(DEFAULTS dict((config_name config_value)<for>section config.sections()<for>config_name,config_value config.items(section)))<line_sep><return>configs<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> print_function division<import_stmt>torch<import_stmt>json<import_stmt>math<import_stmt>random<import_stmt>numpy<as>np<import_from_stmt>scipy ndimage<import_from_stmt>pymic.transform.abstract_transform AbstractTransform<import_from_stmt>pymic.util.image_process *<class_stmt>ChannelWiseThreshold(AbstractTransform)<block_start>"""Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
"""<def_stmt>__init__ self params<block_start>"""
channels (tuple/list/None): the list of specified channels for thresholding. Default value
is all the channels.
threshold_lower (tuple/list/None): The lower threshold values for specified channels.
threshold_upper (tuple/list/None): The uppoer threshold values for specified channels.
replace_lower (tuple/list/None): new values for pixels with intensity smaller than
threshold_lower. Default value is
replace_upper (tuple/list/None): new values for pixels with intensity larger than threshold_upper.
"""<line_sep>super(ChannelWiseThreshold self).__init__(params)<line_sep>self.channlels=params['ChannelWiseThreshold_channels'.lower()]<line_sep>self.threshold_lower=params['ChannelWiseThreshold_threshold_lower'.lower()]<line_sep>self.threshold_upper=params['ChannelWiseThreshold_threshold_upper'.lower()]<line_sep>self.replace_lower=params['ChannelWiseThreshold_replace_lower'.lower()]<line_sep>self.replace_upper=params['ChannelWiseThreshold_replace_upper'.lower()]<line_sep>self.inverse=params.get('ChannelWiseThreshold_inverse'.lower() <false>)<block_end><def_stmt>__call__ self sample<block_start>image=sample['image']<line_sep>channels=range(image.shape[0])<if>self.channlels<is><none><else>self.channlels<for_stmt>i range(len(channels))<block_start>chn=channels[i]<if_stmt>((self.threshold_lower<is><not><none>)<and>(self.threshold_lower[i]<is><not><none>))<block_start>t_lower=self.threshold_lower[i]<line_sep>r_lower=self.threshold_lower[i]<if_stmt>((self.replace_lower<is><not><none>)<and>(self.replace_lower[i]<is><not><none>))<block_start>r_lower=self.replace_lower[i]<block_end>image[chn][image[chn]<l>t_lower]=r_lower<block_end><if_stmt>((self.threshold_upper<is><not><none>)<and>(self.threshold_upper[i]<is><not><none>))<block_start>t_upper=self.threshold_upper[i]<line_sep>r_upper=self.threshold_upper[i]<if_stmt>((self.replace_upper<is><not><none>)<and>(self.replace_upper[i]<is><not><none>))<block_start>r_upper=self.replace_upper[i]<block_end>image[chn][image[chn]<g>t_upper]=r_upper<block_end><block_end>sample['image']=image<line_sep><return>sample<block_end><block_end><class_stmt>ChannelWiseThresholdWithNormalize(AbstractTransform)<block_start>"""
Note that this can be replaced by ChannelWiseThreshold + NormalizeWithMinMax
Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
and then normalize the image based on remaining pixels
"""<def_stmt>__init__ self params<block_start>"""
:param threshold_lower: (tuple/list/None) The lower threshold value along each channel.
:param threshold_upper: (typle/list/None) The upper threshold value along each channel.
:param mean_std_mode: (bool) If true, nomalize the image based on mean and std values,
and pixels values outside the threshold value are replaced random number.
If false, use the min and max values for normalization.
"""<line_sep>super(ChannelWiseThresholdWithNormalize self).__init__(params)<line_sep>self.threshold_lower=params['ChannelWiseThresholdWithNormalize_threshold_lower'.lower()]<line_sep>self.threshold_upper=params['ChannelWiseThresholdWithNormalize_threshold_upper'.lower()]<line_sep>self.mean_std_mode=params['ChannelWiseThresholdWithNormalize_mean_std_mode'.lower()]<line_sep>self.inverse=params.get('ChannelWiseThresholdWithNormalize_inverse'.lower() <false>)<block_end><def_stmt>__call__ self sample<block_start>image=sample['image']<for_stmt>chn range(image.shape[0])<block_start>v0=self.threshold_lower[chn]<line_sep>v1=self.threshold_upper[chn]<if_stmt>(self.mean_std_mode<eq><true>)<block_start>mask=np.ones_like(image[chn])<if_stmt>(v0<is><not><none>)<block_start>mask=mask<times>np.asarray(image[chn]<g>v0)<block_end><if_stmt>(v1<is><not><none>)<block_start>mask=mask<times>np.asarray(image[chn]<l>v1)<block_end>pixels=image[chn][mask<g>0]<line_sep>chn_mean=pixels.mean()<line_sep>chn_std=pixels.std()<line_sep>chn_norm=(image[chn]-chn_mean)/chn_std<line_sep>chn_random=np.random.normal(0 1 size=chn_norm.shape)<line_sep>chn_norm[mask<eq>0]=chn_random[mask<eq>0]<line_sep>image[chn]=chn_norm<block_end><else_stmt><block_start>img_chn=image[chn]<if_stmt>(v0<is><not><none>)<block_start>img_chn[img_chn<l>v0]=v0<line_sep>min_value=v0<block_end><else_stmt><block_start>min_value=img_chn.min()<block_end><if_stmt>(v1<is><not><none>)<block_start>img_chn[img_chn<g>v1]=v1<line_sep>max_value=img_chn.max()<block_end><else_stmt><block_start>max_value=img_chn.max()<block_end>img_chn=(img_chn-min_value)/(max_value-min_value)<line_sep>image[chn]=img_chn<block_end><block_end>sample['image']=image<line_sep><return>sample<block_end><block_end> |
<import_stmt>tensorflow<as>tf<import_stmt>face_decoder<import_stmt>networks<import_stmt>losses<import_from_stmt>utils *<line_sep>###############################################################################################
# model for single image face reconstruction
###############################################################################################
<class_stmt>Reconstruction_model()# initialization
<block_start><def_stmt>__init__ self opt<block_start>self.Face3D=face_decoder.Face3D()#analytic 3D face object
self.opt=opt# training options
self.Optimizer=tf.train.AdamOptimizer(learning_rate=opt.lr)<block_end># optimizer
# load input data from queue
<def_stmt>set_input self input_iterator<block_start>self.imgs,self.lm_labels,self.attention_masks=input_iterator.get_next()<block_end># forward process of the model
<def_stmt>forward self is_train=<true><block_start><with_stmt>tf.variable_scope(tf.get_variable_scope() reuse=tf.AUTO_REUSE)<block_start>self.coeff=networks.R_Net(self.imgs is_training=is_train)<line_sep>self.Face3D.Reconstruction_Block(self.coeff self.opt)<line_sep>self.id_labels=networks.Perceptual_Net(self.imgs)<line_sep>self.id_features=networks.Perceptual_Net(self.Face3D.render_imgs)<line_sep>self.photo_loss=losses.Photo_loss(self.imgs self.Face3D.render_imgs self.Face3D.img_mask_crop<times>self.attention_masks)<line_sep>self.landmark_loss=losses.Landmark_loss(self.Face3D.landmark_p self.lm_labels)<line_sep>self.perceptual_loss=losses.Perceptual_loss(self.id_features self.id_labels)<line_sep>self.reg_loss=losses.Regulation_loss(self.Face3D.id_coeff self.Face3D.ex_coeff self.Face3D.tex_coeff self.opt)<line_sep>self.reflect_loss=losses.Reflectance_loss(self.Face3D.face_texture self.Face3D.facemodel)<line_sep>self.gamma_loss=losses.Gamma_loss(self.Face3D.gamma)<line_sep>self.loss=self.opt.w_photo<times>self.photo_loss+self.opt.w_lm<times>self.landmark_loss+self.opt.w_id<times>self.perceptual_loss+self.opt.w_reg<times>self.reg_loss+self.opt.w_ref<times>self.reflect_loss+self.opt.w_gamma<times>self.gamma_loss<block_end><block_end># backward process
<def_stmt>backward self is_train=<true><block_start><if_stmt>is_train<block_start>update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)<line_sep>var_list=tf.trainable_variables()<line_sep>update_var_list=[v<for>v var_list<if>'resnet_v1_50'<in>v.name<or>'fc-'<in>v.name]<line_sep>grads=tf.gradients(self.loss update_var_list)<line_sep># get train_op with update_ops to ensure updating for bn parameters
<with_stmt>tf.control_dependencies(update_ops)<block_start>self.train_op=self.Optimizer.apply_gradients(zip(grads update_var_list) global_step=self.opt.global_step)<block_end><block_end># if not training stage, avoid updating variables
<else_stmt><block_start><pass><block_end><block_end># forward and backward
<def_stmt>step self is_train=<true><block_start><with_stmt>tf.variable_scope(tf.get_variable_scope())<as>scope<block_start>self.forward(is_train=is_train)<block_end>self.backward(is_train=is_train)<block_end># statistics summarization
<def_stmt>summarize self# scalar and histogram stats
<block_start>stat=[tf.summary.scalar('reflect_error' self.reflect_loss) tf.summary.scalar('gamma_error' self.gamma_loss) tf.summary.scalar('id_sim_error' self.perceptual_loss) tf.summary.scalar('lm_error' tf.sqrt(self.landmark_loss)) tf.summary.scalar('photo_error' self.photo_loss) tf.summary.scalar('train_error' self.loss) tf.summary.histogram('id_coeff' self.Face3D.id_coeff) tf.summary.histogram('ex_coeff' self.Face3D.ex_coeff) tf.summary.histogram('tex_coeff' self.Face3D.tex_coeff)]<line_sep>self.summary_stat=tf.summary.merge(stat)<line_sep># combine face region of reconstruction images with input images
render_imgs=self.Face3D.render_imgs[: : : ::-1]<times>self.Face3D.img_mask+tf.cast(self.imgs[: : : ::-1] tf.float32)<times>(1-self.Face3D.img_mask)<line_sep>render_imgs=tf.clip_by_value(render_imgs 0 255)<line_sep>render_imgs=tf.cast(render_imgs tf.uint8)<line_sep># image stats
img_stat=[tf.summary.image('imgs' tf.concat([tf.cast(self.imgs[: : : ::-1] tf.uint8) render_imgs] axis=2) max_outputs=8)]<line_sep>self.summary_img=tf.summary.merge(img_stat)<block_end><block_end> |
'''
To have a error free way of accessing and updating private variables, we create specific methods for this.
Those methods which are meant to set a value to a private variable are called setter methods and methods
meant to access private variable values are called getter methods.
The below code is an example of getter and setter methods:
'''<class_stmt>Customer<block_start><def_stmt>__init__ self id name age wallet_balance<block_start>self.id=id<line_sep>self.name=name<line_sep>self.age=age<line_sep>self.__wallet_balance=wallet_balance<block_end><def_stmt>set_wallet_balance self amount<block_start><if_stmt>amount<l>1000<and>amount<g>0<block_start>self.__wallet_balance=amount<block_end><block_end><def_stmt>get_wallet_balance self<block_start><return>self.__wallet_balance<block_end><block_end>c1=Customer(100 "Gopal" 24 1000)<line_sep>c1.set_wallet_balance(120)<line_sep>print(c1.get_wallet_balance())<line_sep> |
<import_stmt>json<class_stmt>TestListRepo<block_start><def_stmt>test_invalid self host<block_start>result=host.run('stack list repo test')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr.startswith('error - ')<block_end><def_stmt>test_args self host add_repo# Add a second repo so we can make sure it is skipped
<block_start>add_repo('test2' 'test2url')<line_sep># Run list repo with just the test box
result=host.run('stack list repo test output-format=json')<assert_stmt>result.rc<eq>0<line_sep># Make sure we got data only for the test box
repo_data=json.loads(result.stdout)<assert_stmt>len(repo_data)<eq>1<assert_stmt>repo_data[0]['name']<eq>'test'<line_sep># now get all of them
# assert both repos are in the list data
result=host.run('stack list repo output-format=json')<line_sep>repo_data=json.loads(result.stdout)<assert_stmt>len(repo_data)<eq>2<assert_stmt>{'test' 'test2'}<eq>{repo['name']<for>repo repo_data}<line_sep># now get all of them, by explicitly asking for them
# assert both repos are in the list data
result=host.run('stack list repo test test2 output-format=json')<line_sep>new_repo_data=json.loads(result.stdout)<assert_stmt>len(new_repo_data)<eq>2<assert_stmt>{'test' 'test2'}<eq>{repo['name']<for>repo new_repo_data}<block_end><def_stmt>test_removed_not_listed self host add_repo revert_etc# Run list repo with just the test box
<block_start>result=host.run('stack list repo test output-format=json')<assert_stmt>result.rc<eq>0<line_sep># Make sure we got data only for the test box
repo_data=json.loads(result.stdout)<assert_stmt>len(repo_data)<eq>1<assert_stmt>repo_data[0]['name']<eq>'test'<line_sep>result=host.run('stack remove repo test')<assert_stmt>result.rc<eq>0<line_sep># Run list repo again
result=host.run('stack list repo test output-format=json')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr.startswith('error - ')<block_end><def_stmt>test_expanded_columns self host host_os add_repo# Run list repo with just the test box
<block_start>result=host.run('stack list repo test expanded=true output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{"name":"test" "alias":"test" "url":"test_url" "autorefresh":<false> "assumeyes":<false> "type":"rpm-md" "is_mirrorlist":<false> "gpgcheck":<false> "gpgkey":<none> "os":host_os "pallet name":<none>}]<block_end><def_stmt>test_add_repo_with_pallet self host host_os add_repo create_pallet_isos revert_export_stack_pallets revert_pallet_hooks revert_etc<block_start>result=host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')<line_sep>#result = host.run(f'stack add pallet /root/minimal-1.0-sles12.x86_64.disk1.iso')
<assert_stmt>result.rc<eq>0<line_sep>result=host.run('stack list pallet minimal output-format=json')<assert_stmt>result.rc<eq>0<line_sep>pallet_data=json.loads(result.stdout)<assert_stmt>len(pallet_data)<eq>1<line_sep># get pallet id, as well as the -'d name in the correct order
<import_from_stmt>stack.commands DatabaseConnection get_mysql_connection Command<import_from_stmt>stack.argument_processors.pallet PalletArgProcessor<import_from_stmt>operator attrgetter<line_sep>p=PalletArgProcessor()<line_sep>p.db=DatabaseConnection(get_mysql_connection())<line_sep>minimal_pallet=p.get_pallets(args=['minimal'] params=pallet_data[0])[0]<line_sep>pallet_name='-'.join(attrgetter('name' 'version' 'rel' 'os' 'arch')(minimal_pallet))<line_sep># now attach the test repo to the pallet
result=host.run(f'stack set repo test pallet={minimal_pallet.id}')<assert_stmt>result.rc<eq>0<line_sep># now verify it is attached to that pallet
result=host.run('stack list repo test expanded=true output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{"name":"test" "alias":"test" "url":"test_url" "autorefresh":<false> "assumeyes":<false> "type":"rpm-md" "is_mirrorlist":<false> "gpgcheck":<false> "gpgkey":<none> "os":host_os "pallet name":pallet_name}]<line_sep># now verify that removing that pallet removes the repo as well
result=host.run('stack remove pallet minimal')<assert_stmt>result.rc<eq>0<line_sep>result=host.run('stack list repo')<assert_stmt>result.rc<eq>0<assert_stmt>result.stdout<eq>''<block_end><block_end> |
<import_from_stmt>ralph.api.serializers RalphAPISerializer<import_from_stmt>ralph.api.viewsets RalphAPIViewSet RalphReadOnlyAPIViewSet<import_from_stmt>ralph.api.routers router<line_sep>__all__=['RalphAPISerializer' 'RalphAPIViewSet' 'RalphReadOnlyAPIViewSet' 'router' ]<line_sep> |
'''
Classes
-----
GroupXData
Data object for holding a dense matrix X of real 64-bit floats,
organized contiguously based on provided group structure.
'''<import_stmt>numpy<as>np<import_from_stmt>collections namedtuple<import_from_stmt>bnpy.data.XData XData<import_from_stmt>bnpy.util as1D as2D as3D toCArray<import_from_stmt>bnpy.util numpyToSharedMemArray sharedMemToNumpyArray<class_stmt>GroupXData(XData)<block_start>""" Dataset object for dense real vectors organized in groups.
GroupXData can represent situations like:
* obseved image patches, across many images
group=image, observation=patch
* observed test results for patients, across many hospitals
group=hospital, obsevation=patient test result
Attributes
------
X : 2D array, size N x D
each row is a single dense observation vector
Xprev : 2D array, size N x D, optional
"previous" observations for auto-regressive likelihoods
dim : int
the dimension of each observation
nObs : int
the number of in-memory observations for this instance
TrueParams : dict
key/value pairs represent names and arrays of true parameters
doc_range : 1D array, size nDoc+1
the number of in-memory observations for this instance
nDoc : int
the number of in-memory documents for this instance
nDocTotal : int
total number of documents in entire dataset
Example
--------
# Create 1000 observations, each one a 3D vector
>>> X = np.random.randn(1000, 3)
# Assign items 0-499 to doc 1, 500-1000 to doc 2
>>> doc_range = [0, 500, 1000]
>>> myData = GroupXData(X, doc_range)
>>> print (myData.nObs)
1000
>>> print (myData.X.shape)
(1000, 3)
>>> print (myData.nDoc)
2
"""<line_sep>@classmethod<def_stmt>LoadFromFile cls filepath nDocTotal=<none> **kwargs<block_start>''' Constructor for loading data from disk into XData instance
'''<if_stmt>filepath.endswith('.mat')<block_start><return>cls.read_mat(filepath nDocTotal=nDocTotal **kwargs)<block_end><raise>NotImplemented('Only .mat file supported.')<block_end><def_stmt>save_to_mat self matfilepath<block_start>''' Save contents of current object to disk
'''<import_stmt>scipy.io<line_sep>SaveVars=dict(X=self.X nDoc=self.nDoc doc_range=self.doc_range)<if_stmt>hasattr(self 'Xprev')<block_start>SaveVars['Xprev']=self.Xprev<block_end><if_stmt>hasattr(self 'TrueParams')<and>'Z'<in>self.TrueParams<block_start>SaveVars['TrueZ']=self.TrueParams['Z']<block_end>scipy.io.savemat(matfilepath SaveVars oned_as='row')<block_end>@classmethod<def_stmt>read_npz cls npzfilepath nDocTotal=<none> **kwargs<block_start>''' Constructor for building an instance of GroupXData from npz
'''<line_sep>var_dict=dict(**np.load(npzfilepath allow_pickle=<true>))<if_stmt>'X'<not><in>var_dict<block_start><raise>KeyError('Stored npz file needs to have data in field named X')<block_end><if_stmt>'doc_range'<not><in>var_dict<block_start><raise>KeyError('Stored npz file needs to have field named doc_range')<block_end><if_stmt>nDocTotal<is><not><none><block_start>var_dict['nDocTotal']=nDocTotal<block_end><return>cls(**var_dict)<block_end>@classmethod<def_stmt>read_mat cls matfilepath nDocTotal=<none> **kwargs<block_start>''' Constructor for building an instance of GroupXData from disk
'''<import_stmt>scipy.io<line_sep>InDict=scipy.io.loadmat(matfilepath)<if_stmt>'X'<not><in>InDict<block_start><raise>KeyError('Stored matfile needs to have data in field named X')<block_end><if_stmt>'doc_range'<not><in>InDict<block_start><raise>KeyError('Stored matfile needs to have field named doc_range')<block_end><if_stmt>nDocTotal<is><not><none><block_start>InDict['nDocTotal']=nDocTotal<block_end><return>cls(**InDict)<block_end><def_stmt>__init__ self X=<none> doc_range=<none> nDocTotal=<none> Xprev=<none> TrueZ=<none> TrueParams=<none> fileNames=<none> summary=<none> **kwargs<block_start>''' Create an instance of GroupXData for provided array X
Post Condition
---------
self.X : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.Xprev : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.doc_range : 1D array, size nDoc+1
'''<line_sep>self.X=as2D(toCArray(X dtype=np.float64))<line_sep>self.doc_range=as1D(toCArray(doc_range dtype=np.int32))<if_stmt>summary<is><not><none><block_start>self.summary=summary<block_end><if_stmt>Xprev<is><not><none><block_start>self.Xprev=as2D(toCArray(Xprev dtype=np.float64))<block_end># Verify attributes are consistent
self._set_dependent_params(doc_range nDocTotal)<line_sep>self._check_dims()<line_sep># Add optional true parameters / true hard labels
<if_stmt>TrueParams<is><not><none><block_start>self.TrueParams=dict()<for_stmt>key,arr list(TrueParams.items())<block_start>self.TrueParams[key]=toCArray(arr)<block_end><block_end><if_stmt>TrueZ<is><not><none><block_start><if_stmt><not>hasattr(self 'TrueParams')<block_start>self.TrueParams=dict()<block_end>self.TrueParams['Z']=as1D(toCArray(TrueZ))<line_sep>self.TrueParams['K']=np.unique(self.TrueParams['Z']).size<block_end># Add optional source files for each group/sequence
<if_stmt>fileNames<is><not><none><block_start><if_stmt>hasattr(fileNames 'shape')<and>fileNames.shape<eq>(1 1)<block_start>fileNames=fileNames[0 0]<block_end><if_stmt>len(fileNames)<g>1<block_start>self.fileNames=[str(x).strip()<for>x np.squeeze(fileNames)]<block_end><else_stmt><block_start>self.fileNames=[str(fileNames[0])]<block_end><block_end># Add extra data attributes custom for the dataset
<for_stmt>key kwargs<block_start><if_stmt>hasattr(self key)<block_start><continue><block_end><if_stmt><not>key.startswith("__")<block_start>arr=np.squeeze(as1D(kwargs[key]))<if_stmt>arr.shape<eq>()<block_start><try_stmt><block_start>arr=float(arr)<block_end><except_stmt>TypeError<block_start><continue><block_end><block_end>setattr(self key arr)<block_end><block_end><block_end><def_stmt>_set_dependent_params self doc_range nDocTotal=<none><block_start>self.nObs=self.X.shape[0]<line_sep>self.dim=self.X.shape[1]<line_sep>self.nDoc=self.doc_range.size-1<if_stmt>nDocTotal<is><none><block_start>self.nDocTotal=self.nDoc<block_end><else_stmt><block_start>self.nDocTotal=int(nDocTotal)<block_end><block_end><def_stmt>_check_dims self<block_start><assert_stmt>self.X.ndim<eq>2<assert_stmt>self.X.flags.c_contiguous<assert_stmt>self.X.flags.owndata<assert_stmt>self.X.flags.aligned<assert_stmt>self.X.flags.writeable<assert_stmt>self.doc_range.ndim<eq>1<assert_stmt>self.doc_range.size<eq>self.nDoc+1<assert_stmt>self.doc_range[0]<eq>0<assert_stmt>self.doc_range[-1]<eq>self.nObs<assert_stmt>np.all(self.doc_range[1:]-self.doc_range[:-1]<ge>0)<block_end><def_stmt>get_size self<block_start><return>self.nDoc<block_end><def_stmt>get_total_size self<block_start><return>self.nDocTotal<block_end><def_stmt>get_dim self<block_start><return>self.dim<block_end><def_stmt>get_text_summary self<block_start>''' Returns human-readable description of this dataset
'''<if_stmt>hasattr(self 'summary')<block_start>s=self.summary<block_end><else_stmt><block_start>s='GroupXData'<block_end><return>s<block_end><def_stmt>get_stats_summary self<block_start>''' Returns human-readable summary of this dataset's basic properties
'''<line_sep>s=' size: %d units (documents)\n'%(self.get_size())<line_sep>s<augadd>' dimension: %d'%(self.get_dim())<line_sep><return>s<block_end><def_stmt>toXData self<block_start>''' Return simplified XData instance, losing group structure
'''<if_stmt>hasattr(self 'TrueParams')<block_start>TParams=self.TrueParams<block_end><else_stmt><block_start>TParams=<none><block_end><if_stmt>hasattr(self 'Xprev')<block_start><return>XData(self.X Xprev=self.Xprev TrueParams=TParams)<block_end><else_stmt><block_start><return>XData(self.X TrueParams=TParams)<block_end><block_end># Create Subset
#########################################################
<def_stmt>make_subset self docMask=<none> atomMask=<none> doTrackTruth=<false> doTrackFullSize=<true><block_start>""" Get subset of this dataset identified by provided unit IDs.
Parameters
-------
docMask : 1D array_like of ints
Identifies units (documents) to use to build subset.
doTrackFullSize : boolean, optional
default=True
If True, return DataObj with same nDocTotal value as this
dataset. If False, returned DataObj has smaller size.
atomMask : 1D array_like of ints, optional
default=None
If present, identifies rows of X to return as XData
Returns
-------
Dchunk : bnpy.data.GroupXData instance
"""<if_stmt>atomMask<is><not><none><block_start><return>self.toXData().select_subset_by_mask(atomMask)<block_end><if_stmt>len(docMask)<l>1<block_start><raise>ValueError('Cannot select empty subset')<block_end>newXList=list()<line_sep>newXPrevList=list()<line_sep>newDocRange=np.zeros(len(docMask)+1)<line_sep>newPos=1<for_stmt>d range(len(docMask))<block_start>start=self.doc_range[docMask[d]]<line_sep>stop=self.doc_range[docMask[d]+1]<line_sep>newXList.append(self.X[start:stop])<if_stmt>hasattr(self 'Xprev')<block_start>newXPrevList.append(self.Xprev[start:stop])<block_end>newDocRange[newPos]=newDocRange[newPos-1]+stop-start<line_sep>newPos<augadd>1<block_end>newX=np.vstack(newXList)<if_stmt>hasattr(self 'Xprev')<block_start>newXprev=np.vstack(newXPrevList)<block_end><else_stmt><block_start>newXprev=<none><block_end><if_stmt>doTrackFullSize<block_start>nDocTotal=self.nDocTotal<block_end><else_stmt><block_start>nDocTotal=<none><block_end><if_stmt>hasattr(self 'alwaysTrackTruth')<block_start>doTrackTruth=doTrackTruth<or>self.alwaysTrackTruth<block_end>hasTrueZ=hasattr(self 'TrueParams')<and>'Z'<in>self.TrueParams<if_stmt>doTrackTruth<and>hasTrueZ<block_start>TrueZ=self.TrueParams['Z']<line_sep>newTrueZList=list()<for_stmt>d range(len(docMask))<block_start>start=self.doc_range[docMask[d]]<line_sep>stop=self.doc_range[docMask[d]+1]<line_sep>newTrueZList.append(TrueZ[start:stop])<block_end>newTrueZ=np.hstack(newTrueZList)<assert_stmt>newTrueZ.size<eq>newDocRange[-1]<block_end><else_stmt><block_start>newTrueZ=<none><block_end><return>GroupXData(newX newDocRange Xprev=newXprev nDocTotal=nDocTotal TrueZ=newTrueZ)<block_end><def_stmt>add_data self XDataObj<block_start>""" Appends (in-place) provided dataset to this dataset.
Post Condition
-------
self.Data grows by adding all units from provided DataObj.
"""<if_stmt><not>self.dim<eq>XDataObj.dim<block_start><raise>ValueError("Dimensions must match!")<block_end>self.nObs<augadd>XDataObj.nObs<line_sep>self.nDocTotal<augadd>XDataObj.nDocTotal<line_sep>self.nDoc<augadd>XDataObj.nDoc<line_sep>self.X=np.vstack([self.X XDataObj.X])<if_stmt>hasattr(self 'Xprev')<block_start>self.Xprev=np.vstack([self.Xprev XDataObj.Xprev])<block_end>new_doc_range=XDataObj.doc_range[1:]+self.doc_range[-1]<line_sep>self.doc_range=np.hstack([self.doc_range new_doc_range])<line_sep>self._check_dims()<block_end><def_stmt>get_random_sample self nDoc randstate=np.random<block_start>nDoc=np.minimum(nDoc self.nDoc)<line_sep>mask=randstate.permutation(self.nDoc)[:nDoc]<line_sep>Data=self.select_subset_by_mask(mask doTrackFullSize=<false>)<line_sep><return>Data<block_end><def_stmt>__str__ self<block_start><return>self.X.__str__()<block_end><def_stmt>getRawDataAsSharedMemDict self<block_start>''' Create dict with copies of raw data as shared memory arrays
'''<line_sep>dataShMemDict=dict()<line_sep>dataShMemDict['X']=numpyToSharedMemArray(self.X)<line_sep>dataShMemDict['doc_range']=numpyToSharedMemArray(self.doc_range)<line_sep>dataShMemDict['nDocTotal']=self.nDocTotal<if_stmt>hasattr(self 'Xprev')<block_start>dataShMemDict['Xprev']=numpyToSharedMemArray(self.Xprev)<block_end><return>dataShMemDict<block_end><def_stmt>getDataSliceFunctionHandle self<block_start>""" Return function handle that can make data slice objects.
Useful with parallelized algorithms,
when we need to use shared memory.
Returns
-------
f : function handle
"""<line_sep><return>makeDataSliceFromSharedMem<block_end><block_end><def_stmt>makeDataSliceFromSharedMem dataShMemDict cslice=(0 <none>) batchID=<none><block_start>""" Create data slice from provided raw arrays and slice indicators.
Returns
-------
Dslice : namedtuple with same fields as GroupXData object
* X
* nObs
* nObsTotal
* dim
Represents subset of documents identified by cslice tuple.
Example
-------
>>> Data = GroupXData(np.random.rand(25,2), doc_range=[0,4,12,25])
>>> shMemDict = Data.getRawDataAsSharedMemDict()
>>> Dslice = makeDataSliceFromSharedMem(shMemDict)
>>> np.allclose(Data.X, Dslice.X)
True
>>> np.allclose(Data.nObs, Dslice.nObs)
True
>>> Data.dim == Dslice.dim
True
>>> Aslice = makeDataSliceFromSharedMem(shMemDict, (0, 2))
>>> Aslice.nDoc
2
>>> np.allclose(Aslice.doc_range, Dslice.doc_range[0:(2+1)])
True
"""<if_stmt>batchID<is><not><none><and>batchID<in>dataShMemDict<block_start>dataShMemDict=dataShMemDict[batchID]<block_end># Make local views (NOT copies) to shared mem arrays
doc_range=sharedMemToNumpyArray(dataShMemDict['doc_range'])<line_sep>X=sharedMemToNumpyArray(dataShMemDict['X'])<line_sep>nDocTotal=int(dataShMemDict['nDocTotal'])<line_sep>dim=X.shape[1]<if_stmt>cslice<is><none><block_start>cslice=(0 doc_range.size-1)<block_end><elif_stmt>cslice[1]<is><none><block_start>cslice=(0 doc_range.size-1)<block_end>tstart=doc_range[cslice[0]]<line_sep>tstop=doc_range[cslice[1]]<line_sep>keys=['X' 'Xprev' 'doc_range' 'nDoc' 'nObs' 'dim' 'nDocTotal']<if_stmt>'Xprev'<in>dataShMemDict<block_start>Xprev=sharedMemToNumpyArray(dataShMemDict['Xprev'])[tstart:tstop]<block_end><else_stmt><block_start>Xprev=<none><block_end>Dslice=namedtuple("GroupXDataTuple" keys)(X=X[tstart:tstop] Xprev=Xprev doc_range=doc_range[cslice[0]:cslice[1]+1]-doc_range[cslice[0]] nDoc=cslice[1]-cslice[0] nObs=tstop-tstart dim=dim nDocTotal=nDocTotal )<line_sep><return>Dslice<block_end> |
<class_stmt>CudaSupportError(RuntimeError)<block_start><pass><block_end> |
<import_from_stmt>configs cfg<import_from_stmt>src.utils.record_log _logger<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>scipy.stats<as>stats<class_stmt>Evaluator(object)<block_start><def_stmt>__init__ self model<block_start>self.model=model<line_sep>self.global_step=model.global_step<line_sep>## ---- summary----
self.build_summary()<line_sep>self.writer=tf.summary.FileWriter(cfg.summary_dir)<block_end><def_stmt>get_evaluation self sess dataset_obj global_step=<none><block_start>_logger.add()<line_sep>_logger.add('getting evaluation result for %s'%dataset_obj.data_type)<line_sep>logits_list,loss_list=[] []<line_sep>target_score_list,predicted_score_list=[] []<for_stmt>sample_batch,_,_,_ dataset_obj.generate_batch_sample_iter()<block_start>feed_dict=self.model.get_feed_dict(sample_batch 'dev')<line_sep>logits,loss,predicted_score=sess.run([self.model.logits self.model.loss self.model.predicted_score] feed_dict)<line_sep>logits_list.append(np.argmax(logits -1))<line_sep>loss_list.append(loss)<line_sep>predicted_score_list.append(predicted_score)<for_stmt>sample sample_batch<block_start>target_score_list.append(sample['relatedness_score'])<block_end><block_end>logits_array=np.concatenate(logits_list 0)<line_sep>loss_value=np.mean(loss_list)<line_sep>target_scores=np.array(target_score_list)<line_sep>predicted_scores=np.concatenate(predicted_score_list 0)<line_sep># pearson, spearman, mse
pearson_value=stats.pearsonr(target_scores predicted_scores)[0]<line_sep>spearman_value=stats.spearmanr(target_scores predicted_scores)[0]<line_sep>mse_value=np.mean((target_scores-predicted_scores)<power>2)<line_sep># todo: analysis
# analysis_save_dir = cfg.mkdir(cfg.answer_dir, 'gs_%d' % global_step or 0)
# OutputAnalysis.do_analysis(dataset_obj, logits_array, accu_array, analysis_save_dir,
# cfg.fine_grained)
<if_stmt>global_step<is><not><none><block_start><if_stmt>dataset_obj.data_type<eq>'train'<block_start>summary_feed_dict={self.train_loss:loss_value self.train_pearson:pearson_value self.train_spearman:spearman_value self.train_mse:mse_value }<line_sep>summary=sess.run(self.train_summaries summary_feed_dict)<line_sep>self.writer.add_summary(summary global_step)<block_end><elif_stmt>dataset_obj.data_type<eq>'dev'<block_start>summary_feed_dict={self.dev_loss:loss_value self.dev_pearson:pearson_value self.dev_spearman:spearman_value self.dev_mse:mse_value }<line_sep>summary=sess.run(self.dev_summaries summary_feed_dict)<line_sep>self.writer.add_summary(summary global_step)<block_end><else_stmt><block_start>summary_feed_dict={self.test_loss:loss_value self.test_pearson:pearson_value self.test_spearman:spearman_value self.test_mse:mse_value }<line_sep>summary=sess.run(self.test_summaries summary_feed_dict)<line_sep>self.writer.add_summary(summary global_step)<block_end><block_end><return>loss_value (pearson_value spearman_value mse_value)<block_end># --- internal use ------
<def_stmt>build_summary self<block_start><with_stmt>tf.name_scope('train_summaries')<block_start>self.train_loss=tf.placeholder(tf.float32 [] 'train_loss')<line_sep>self.train_pearson=tf.placeholder(tf.float32 [] 'train_pearson')<line_sep>self.train_spearman=tf.placeholder(tf.float32 [] 'train_spearman')<line_sep>self.train_mse=tf.placeholder(tf.float32 [] 'train_mse')<line_sep>tf.add_to_collection('train_summaries_collection' tf.summary.scalar('train_loss' self.train_loss))<line_sep>tf.add_to_collection('train_summaries_collection' tf.summary.scalar('train_pearson' self.train_pearson))<line_sep>tf.add_to_collection('train_summaries_collection' tf.summary.scalar('train_spearman' self.train_spearman))<line_sep>tf.add_to_collection('train_summaries_collection' tf.summary.scalar('train_mse' self.train_mse))<line_sep>self.train_summaries=tf.summary.merge_all('train_summaries_collection')<block_end><with_stmt>tf.name_scope('dev_summaries')<block_start>self.dev_loss=tf.placeholder(tf.float32 [] 'dev_loss')<line_sep>self.dev_pearson=tf.placeholder(tf.float32 [] 'dev_pearson')<line_sep>self.dev_spearman=tf.placeholder(tf.float32 [] 'dev_spearman')<line_sep>self.dev_mse=tf.placeholder(tf.float32 [] 'dev_mse')<line_sep>tf.add_to_collection('dev_summaries_collection' tf.summary.scalar('dev_loss' self.dev_loss))<line_sep>tf.add_to_collection('dev_summaries_collection' tf.summary.scalar('dev_pearson' self.dev_pearson))<line_sep>tf.add_to_collection('dev_summaries_collection' tf.summary.scalar('dev_spearman' self.dev_spearman))<line_sep>tf.add_to_collection('dev_summaries_collection' tf.summary.scalar('dev_mse' self.dev_mse))<line_sep>self.dev_summaries=tf.summary.merge_all('dev_summaries_collection')<block_end><with_stmt>tf.name_scope('test_summaries')<block_start>self.test_loss=tf.placeholder(tf.float32 [] 'test_loss')<line_sep>self.test_pearson=tf.placeholder(tf.float32 [] 'test_pearson')<line_sep>self.test_spearman=tf.placeholder(tf.float32 [] 'test_spearman')<line_sep>self.test_mse=tf.placeholder(tf.float32 [] 'test_mse')<line_sep>tf.add_to_collection('test_summaries_collection' tf.summary.scalar('test_loss' self.test_loss))<line_sep>tf.add_to_collection('test_summaries_collection' tf.summary.scalar('test_pearson' self.test_pearson))<line_sep>tf.add_to_collection('test_summaries_collection' tf.summary.scalar('test_spearman' self.test_spearman))<line_sep>tf.add_to_collection('test_summaries_collection' tf.summary.scalar('test_mse' self.test_mse))<line_sep>self.test_summaries=tf.summary.merge_all('test_summaries_collection')<block_end><block_end><block_end> |
<import_stmt>theano.tensor<as>T<import_stmt>keras.backend<as>K<import_from_stmt>keras.layers.core LambdaMerge<import_from_stmt>keras initializations<class_stmt>MemN2N(LambdaMerge)<block_start><def_stmt>__init__ self layers output_dim input_dim input_length memory_length hops=3 bow_mode="bow" mode="adjacent" emb_init="uniform" init="glorot_uniform" **kwargs<block_start>self.output_dim=output_dim<line_sep>self.input_dim=input_dim<line_sep>self.input_length=input_length<line_sep>self.memory_length=memory_length<line_sep>self.hops=hops<line_sep>self.bow_mode=bow_mode<line_sep>self.mode=mode<line_sep>self.init=initializations.get(init)<line_sep>self.emb_init=initializations.get(emb_init)<line_sep>output_shape=(self.output_dim )<line_sep>super(MemN2N self).__init__(layers <lambda>x:x output_shape)<block_end><def_stmt>build self# list of embedding layers
<block_start>self.outputs=[]<line_sep>self.memory=[]<line_sep># self.Hs = [] # if self.mode == "rnn"
self.trainable_weights=[]<for_stmt>i range(self.hops)# memory embedding - A
<block_start><if_stmt>self.mode<eq>"adjacent"<and>i<g>0<block_start>A=self.outputs[-1]<block_end><else_stmt><block_start>A=self.emb_init((self.input_dim self.output_dim) name="{}_A_{}".format(self.name i))<line_sep>self.trainable_weights<augadd>[A]<block_end>self.memory.append(A)<line_sep># outputs embedding - C
# if self.mode == "adjacent" and i > 1:
# Wo = self.outputs[-1]
# elif self.mode == "untied" or i == 0:
C=self.emb_init((self.input_dim self.output_dim) name="{}_C_{}".format(self.name i))<line_sep>self.trainable_weights<augadd>[C]<line_sep>self.outputs.append(C)<line_sep># if self.mode == "rnn"
# H = self.init((self.output_dim, self.output_dim),
# name="{}_H_{}".format(self.name, i))
# self.trainable_weights += [H]
# b = K.zeros((self.input_dim,),
# name="{}_b_{}".format(self.name, i))
# self.Hs += [H]
# self.trainable_weights += [H]
<block_end><if_stmt>self.mode<eq>"adjacent"<block_start>self.W=self.outputs[-1].T<line_sep>self.b=K.zeros((self.input_dim ) name="{}_b".format(self.name))<line_sep># self.trainable_weights += [self.b]
<block_end># question embedding - B
self.B=self.emb_init((self.input_dim self.output_dim) name="{}_B".format(self.name))<line_sep>self.trainable_weights<augadd>[self.B]<line_sep># Temporal embedding
self.Te=self.emb_init((self.input_length self.output_dim))<line_sep>self.trainable_weights<augadd>[self.Te]<block_end><def_stmt>get_output self train=<false><block_start>inputs=[layer.get_output(train)<for>layer self.layers]<line_sep>facts,question=inputs<line_sep># WARN make sure input layers are Embedding layers with identity init
# facts = K.argmax(facts, axis=-1)
# question = K.argmax(question, axis=-1)
u,mask_q=self.lookup(question self.B 1)# just 1 question
<for_stmt>A,C zip(self.memory self.outputs)<block_start>m,mask_m=self.lookup(facts A self.memory_length)<line_sep>c,mask_c=self.lookup(facts C self.memory_length)<line_sep># attention weights
p=self.attention(m u mask_m)<line_sep># output
o=self.calc_output(c p)<line_sep>u=o+u<block_end># u = K.dot(u[:, 0, :], self.W) + self.b
<return>u[: 0 :]<block_end># K.softmax(u)
<def_stmt>lookup self x W memory_length# shape: (batch*memory_length, input_length)
<block_start>x=K.cast(K.reshape(x (-1 self.input_length)) 'int32')<line_sep>mask=K.expand_dims(K.not_equal(x 0.) dim=-1)<line_sep># shape: (batch*memory_length, input_length, output_dim)
X=K.gather(W x)<if_stmt>self.bow_mode<eq>"bow"# shape: (batch*memory_length, output_dim)
<block_start>X=K.sum(X+K.expand_dims(self.Te 0) axis=1)<block_end># shape: (batch, memory_length, output_dim)
X=K.reshape(X (-1 memory_length self.output_dim))<line_sep><return>X mask<block_end><def_stmt>attention self m q mask# mask original shape is (batch*memory_length, input_length, 1)
# shape (batch, memory)
<block_start>mask=K.reshape(mask[: 0] (-1 self.memory_length))<line_sep># shape: (batch, memory_length, 1)
p=T.batched_tensordot(m q (2 2))<line_sep># shape: (batch, memory_length)
p=K.softmax(p[: : 0])# * K.cast(mask, 'float32')
# shape: (batch, 1, memory_length)
<return>K.expand_dims(p dim=1)<block_end><def_stmt>calc_output self c p# shape: (batch, memory_length, 1)
<block_start>p=K.permute_dimensions(p (0 2 1))<line_sep># shape: (batch, output_dim)
o=K.sum(c<times>p axis=1)<line_sep># if self.mode == "rnn":
# import theano
# W = theano.printing.Print('[Debug] W shape: ', attrs=("shape",))(W)
# o = K.dot(o, W) + b
# shape: (batch, 1, output_dim)
<return>K.expand_dims(o dim=1)<block_end><block_end> |
#coding=utf-8
<import_stmt>matplotlib<line_sep>matplotlib.use("Agg")<import_stmt>tensorflow<as>tf<import_stmt>argparse<import_stmt>numpy<as>np<import_from_stmt>tensorflow.keras.models Sequential<import_from_stmt>tensorflow.keras.layers Conv2D MaxPooling2D UpSampling2D BatchNormalization Reshape Permute Activation<import_from_stmt>tensorflow.keras.utils to_categorical<import_from_stmt>tensorflow.keras.preprocessing.image img_to_array<import_from_stmt>tensorflow.keras.callbacks ModelCheckpoint<import_from_stmt>sklearn.preprocessing LabelEncoder<import_from_stmt>PIL Image<import_stmt>matplotlib.pyplot<as>plt<import_stmt>cv2<import_stmt>random<import_stmt>os<import_from_stmt>tqdm tqdm<line_sep>seed=7<line_sep>np.random.seed(seed)<line_sep>#设置图像大小
img_w=32<line_sep>img_h=32<line_sep>#分类
n_label=6<line_sep>classes=[0.0 17.0 34.0 51.0 68.0 255.0]<line_sep>labelencoder=LabelEncoder()<line_sep>labelencoder.fit(classes)<line_sep>#训练批次和每次数据量
EPOCHS=5<line_sep>BS=32<line_sep>#图像最大值
divisor=255.0<line_sep>#图像根路径
filepath='C:\\Users\Administrator\Desktop\Project\src\\'<line_sep>#读取图片
<def_stmt>load_img path grayscale=<false><block_start><if_stmt>grayscale<block_start>img=cv2.imread(path cv2.IMREAD_GRAYSCALE)<block_end><else_stmt><block_start>img=cv2.imread(path)<line_sep>img=np.array(img dtype="float")/divisor<block_end><return>img<block_end>#获取训练数据和测试数据地址
<def_stmt>get_train_val val_rate=0.25<block_start>train_url=[]<line_sep>train_set=[]<line_sep>val_set=[]<for_stmt>pic os.listdir(filepath+'train')<block_start>train_url.append(pic)<block_end>random.shuffle(train_url)<line_sep>total_num=len(train_url)<line_sep>val_num=int(val_rate<times>total_num)<for_stmt>i range(len(train_url))<block_start><if_stmt>i<l>val_num<block_start>val_set.append(train_url[i])<block_end><else_stmt><block_start>train_set.append(train_url[i])<block_end><block_end><return>train_set val_set<block_end># 生成训练数据
<def_stmt>generateData batch_size data=[]<block_start><while_stmt><true><block_start>train_data=[]<line_sep>train_label=[]<line_sep>batch=0<for_stmt>i (range(len(data)))<block_start>url=data[i]<line_sep>batch<augadd>1<line_sep>img=load_img(filepath+'train/'+url)<line_sep>img=img_to_array(img)<line_sep>train_data.append(img)<line_sep>label=load_img(filepath+'label/'+url grayscale=<true>)<line_sep>label=img_to_array(label).reshape((img_w<times>img_h ))<line_sep>train_label.append(label)<if_stmt>batch%batch_size<eq>0<block_start>train_data=np.array(train_data)<line_sep>train_label=np.array(train_label).flatten()#拍平
train_label=labelencoder.transform(train_label)<line_sep>train_label=to_categorical(train_label num_classes=n_label)#编码输出便签
train_label=train_label.reshape((batch_size img_w img_h n_label))<line_sep><yield>(train_data train_label)<line_sep>train_data=[]<line_sep>train_label=[]<line_sep>batch=0<block_end><block_end><block_end><block_end>#生成测试的数据
<def_stmt>generateValidData batch_size data=[]<block_start><while_stmt><true><block_start>valid_data=[]<line_sep>valid_label=[]<line_sep>batch=0<for_stmt>i (range(len(data)))<block_start>url=data[i]<line_sep>batch<augadd>1<line_sep>img=load_img(filepath+'train/'+url)<line_sep>img=img_to_array(img)<line_sep>valid_data.append(img)<line_sep>label=load_img(filepath+'label/'+url grayscale=<true>)<line_sep>label=img_to_array(label).reshape((img_w<times>img_h ))<line_sep>valid_label.append(label)<if_stmt>batch%batch_size<eq>0<block_start>valid_data=np.array(valid_data)<line_sep>valid_label=np.array(valid_label).flatten()<line_sep>valid_label=labelencoder.transform(valid_label)<line_sep>valid_label=to_categorical(valid_label num_classes=n_label)<line_sep>valid_label=valid_label.reshape((batch_size img_w img_h n_label))<line_sep><yield>(valid_data valid_label)<line_sep>valid_data=[]<line_sep>valid_label=[]<line_sep>batch=0<block_end><block_end><block_end><block_end>#定义模型-网络模型
<def_stmt>SegNet <block_start>model=Sequential()<line_sep>#encoder
model.add(Conv2D(64 (3 3) strides=(1 1) input_shape=(img_w img_h 3) padding='same' activation='relu' data_format='channels_last'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(64 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>#(128,128)
model.add(Conv2D(128 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(128 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>#(64,64)
model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>#(32,32)
model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>#(16,16)
model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>#(8,8)
#decoder
model.add(UpSampling2D(size=(2 2)))<line_sep>#(16,16)
model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(UpSampling2D(size=(2 2)))<line_sep>#(32,32)
model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(512 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(UpSampling2D(size=(2 2)))<line_sep>#(64,64)
model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(256 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(UpSampling2D(size=(2 2)))<line_sep>#(128,128)
model.add(Conv2D(128 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(128 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(UpSampling2D(size=(2 2)))<line_sep>#(256,256)
model.add(Conv2D(64 (3 3) strides=(1 1) input_shape=(img_w img_h 3) padding='same' activation='relu' data_format='channels_last'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(64 (3 3) strides=(1 1) padding='same' activation='relu'))<line_sep>model.add(BatchNormalization())<line_sep>model.add(Conv2D(n_label (1 1) strides=(1 1) padding='same'))<line_sep>model.add(Activation('softmax'))<line_sep>model.compile(loss='categorical_crossentropy' optimizer='sgd' metrics=['accuracy'])<line_sep>model.summary()<line_sep><return>model<block_end>#开始训练
<def_stmt>train args<block_start>model=SegNet()<line_sep>modelcheck=ModelCheckpoint(args['model'] monitor='val_acc' save_best_only=<true> mode='max')<line_sep>callable=[modelcheck tf.keras.callbacks.TensorBoard(log_dir='.')]<line_sep>train_set,val_set=get_train_val()<line_sep>train_numb=len(train_set)<line_sep>valid_numb=len(val_set)<line_sep>print("the number of train data is" train_numb)<line_sep>print("the number of val data is" valid_numb)<line_sep>H=model.fit(x=generateData(BS train_set) steps_per_epoch=(train_numb<floordiv>BS) epochs=EPOCHS verbose=2 validation_data=generateValidData(BS val_set) validation_steps=(valid_numb<floordiv>BS) callbacks=callable)<line_sep># plot the training loss and accuracy
plt.style.use("ggplot")<line_sep>plt.figure()<line_sep>N=EPOCHS<line_sep>plt.plot(np.arange(0 N) H.history["loss"] label="train_loss")<line_sep>plt.plot(np.arange(0 N) H.history["val_loss"] label="val_loss")<line_sep>plt.plot(np.arange(0 N) H.history["acc"] label="train_acc")<line_sep>plt.plot(np.arange(0 N) H.history["val_acc"] label="val_acc")<line_sep>plt.title("Training Loss and Accuracy on SegNet Satellite Seg")<line_sep>plt.xlabel("Epoch #")<line_sep>plt.ylabel("Loss/Accuracy")<line_sep>plt.legend(loc="lower left")<line_sep>plt.savefig(args["plot"])<block_end>#获取参数
<def_stmt>args_parse # construct the argument parse and parse the arguments
<block_start>ap=argparse.ArgumentParser()<line_sep>ap.add_argument("-a" "--augment" help="using data augment or not" action="store_true" default=<false>)<line_sep>ap.add_argument("-m" "--model" required=<false> default="segnet.h5" help="path to output model")<line_sep>ap.add_argument("-p" "--plot" type=str default="plot.png" help="path to output accuracy/loss plot")<line_sep>args=vars(ap.parse_args())<line_sep><return>args<block_end>#运行程序
<if_stmt>__name__<eq>'__main__'<block_start>args=args_parse()<line_sep>train(args)<line_sep>print("完成")<line_sep>#predict()
<block_end> |
"""
Problem:
--------
Design a data structure that supports the following two operations:
- `void addNum(int num)`: Add a integer number from the data stream to the data structure.
- `double findMedian()`: Return the median of all elements so far.
"""<class_stmt>MedianFinder<block_start><def_stmt>__init__ self<block_start>"""
Initialize your data structure here.
"""<line_sep>self.list=[]<block_end><def_stmt>addNum self num:int<arrow><none># Traverse through the list and check if `num` > ith element
# If yes, insert `num` in that index
# This keeps the list sorted at all times
<block_start><for_stmt>i range(len(self.list))<block_start><if_stmt>num<g>self.list[i]<block_start>self.list.insert(i num)<line_sep><return><block_end><block_end># If `num` is the largest element or is the first one to be added
self.list.append(num)<block_end><def_stmt>findMedian self<arrow>float# Find index of the middle element (floor division by 2)
<block_start>mid_index=len(self.list)<floordiv>2<if_stmt>len(self.list)%2<eq>0# If number of elements = EVEN
# Return average of the middle 2 elements
<block_start><return>(self.list[mid_index-1]+self.list[mid_index])/2<block_end><else_stmt># If number of elements = ODD
# Return the middle element
<block_start><return>self.list[mid_index]<block_end><block_end><block_end># Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
|
<import_from_stmt>. utils<import_stmt>os<import_stmt>scanpy<as>sc<import_stmt>scprep<import_stmt>tempfile<line_sep>URL="https://ndownloader.figshare.com/files/25555751"<line_sep>@utils.loader<def_stmt>load_human_blood_nestorowa2016 test=<false><block_start>"""Download Nesterova data from Figshare."""<if_stmt>test# load full data first, cached if available
<block_start>adata=load_human_blood_nestorowa2016(test=<false>)<line_sep># Subsample data
adata=adata[: :500].copy()<line_sep>utils.filter_genes_cells(adata)<line_sep>sc.pp.subsample(adata n_obs=500)<line_sep># Note: could also use 200-500 HVGs rather than 200 random genes
# Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)<line_sep><return>adata<block_end><else_stmt><block_start><with_stmt>tempfile.TemporaryDirectory()<as>tempdir<block_start>filepath=os.path.join(tempdir "human_blood_nestorowa2016.h5ad")<line_sep>scprep.io.download.download_url(URL filepath)<line_sep>adata=sc.read(filepath)<line_sep># Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)<block_end><return>adata<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.