content stringlengths 0 1.55M |
|---|
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_stmt>torch.nn<as>nn<import_from_stmt>openspeech.utils CTCDECODE_IMPORT_ERROR<class_stmt>BeamSearchCTC(nn.Module)<block_start>r"""
Decodes probability output using ctcdecode package.
Args:
labels (list): the tokens you used to train your model
lm_path (str): the path to your external kenlm language model(LM).
alpha (int): weighting associated with the LMs probabilities.
beta (int): weight associated with the number of words within our beam
cutoff_top_n (int): cutoff number in pruning. Only the top cutoff_top_n characters with the highest probability
in the vocab will be used in beam search.
cutoff_prob (float): cutoff probability in pruning. 1.0 means no pruning.
beam_size (int): this controls how broad the beam search is.
num_processes (int): parallelize the batch using num_processes workers.
blank_id (int): this should be the index of the CTC blank token
Inputs: logits, sizes
- logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
- sizes: Size of each sequence in the mini-batch
Returns:
- outputs: sequences of the model's best prediction
"""<def_stmt>__init__ self labels:list lm_path:str=<none> alpha:int=0 beta:int=0 cutoff_top_n:int=40 cutoff_prob:float=1.0 beam_size:int=3 num_processes:int=4 blank_id:int=0 <arrow><none><block_start>super(BeamSearchCTC self).__init__()<try_stmt><block_start><import_from_stmt>ctcdecode CTCBeamDecoder<block_end><except_stmt>ImportError<block_start><raise>ImportError(CTCDECODE_IMPORT_ERROR)<block_end><assert_stmt>isinstance(labels list) "labels must instance of list"<line_sep>self.decoder=CTCBeamDecoder(labels lm_path alpha beta cutoff_top_n cutoff_prob beam_size num_processes blank_id)<block_end><def_stmt>forward self logits sizes=<none><block_start>r"""
Decodes probability output using ctcdecode package.
Inputs: logits, sizes
logits: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t
sizes: Size of each sequence in the mini-batch
Returns:
outputs: sequences of the model's best prediction
"""<line_sep>logits=logits.cpu()<line_sep>outputs,scores,offsets,seq_lens=self.decoder.decode(logits sizes)<line_sep><return>outputs<block_end><block_end> |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for inference_gym.targets.eight_schools."""<import_stmt>tensorflow.compat.v2<as>tf<import_from_stmt>inference_gym.internal test_util<import_from_stmt>inference_gym.targets eight_schools<line_sep>@test_util.multi_backend_test(globals() 'targets.eight_schools_test')<class_stmt>EightSchoolsTest(test_util.InferenceGymTestCase)<block_start><def_stmt>testEightSchools self<block_start>"""Checks that unconstrained parameters yield finite joint densities."""<line_sep>model=eight_schools.EightSchools()<line_sep>self.validate_log_prob_and_transforms(model sample_transformation_shapes=dict(identity={'avg_effect':[] 'log_stddev':[] 'school_effects':[8] }) check_ground_truth_mean_standard_error=<true> check_ground_truth_mean=<true> check_ground_truth_standard_deviation=<true>)<block_end>@test_util.numpy_disable_gradient_test<def_stmt>testEightSchoolsHMC self<block_start>"""Checks approximate samples from the model against the ground truth."""<line_sep>model=eight_schools.EightSchools()<line_sep>self.validate_ground_truth_using_hmc(model num_chains=4 num_steps=4000 num_leapfrog_steps=10 step_size=0.4 )<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end> |
"""This module implements the MessageService
The MessageService enables sending and receiving messages
"""<import_stmt>param<class_stmt>MessageService(param.Parameterized)<block_start>"""The MessageService enables sending and receiving messages"""<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<import_stmt>logging<import_stmt>unittest<import_from_stmt>collections namedtuple<import_from_stmt>classy_vision.generic.distributed_util set_cpu_device<import_from_stmt>parameterized parameterized<import_from_stmt>utils ROOT_LOSS_CONFIGS SSLHydraConfig<import_from_stmt>vissl.trainer.train_task SelfSupervisionTask<import_from_stmt>vissl.utils.hydra_config convert_to_attrdict<line_sep>logger=logging.getLogger("__name__")<line_sep>set_cpu_device()<line_sep>BATCH_SIZE=2048<line_sep>EMBEDDING_DIM=128<line_sep>NUM_CROPS=2<line_sep>BUFFER_PARAMS_STRUCT=namedtuple("BUFFER_PARAMS_STRUCT" ["effective_batch_size" "world_size" "embedding_dim"])<line_sep>BUFFER_PARAMS=BUFFER_PARAMS_STRUCT(BATCH_SIZE 1 EMBEDDING_DIM)<class_stmt>TestRootConfigsLossesBuild(unittest.TestCase)<block_start>@parameterized.expand(ROOT_LOSS_CONFIGS)<def_stmt>test_loss_build self filepath<block_start>logger.info(f"Loading {filepath}")<line_sep>cfg=SSLHydraConfig.from_configs([filepath "config.DATA.TRAIN.DATA_SOURCES=[synthetic]" "config.DATA.TEST.DATA_SOURCES=[synthetic]" ])<line_sep>_,config=convert_to_attrdict(cfg.default_cfg)<line_sep>task=SelfSupervisionTask.from_config(config)<line_sep>task.datasets,_=task.build_datasets()<line_sep>self.assertTrue(task._build_loss() "failed to build loss")<block_end><def_stmt>test_pytorch_loss self<block_start>cfg=SSLHydraConfig.from_configs(["config=test/integration_test/quick_simclr" "config.LOSS.name=CosineEmbeddingLoss" "+config.LOSS.CosineEmbeddingLoss.margin=1.0" "config.DATA.TRAIN.DATA_SOURCES=[synthetic]" "config.DATA.TEST.DATA_SOURCES=[synthetic]" ])<line_sep>_,config=convert_to_attrdict(cfg.default_cfg)<line_sep>task=SelfSupervisionTask.from_config(config)<line_sep>task.datasets,_=task.build_datasets()<line_sep>self.assertTrue(task._build_loss() "failed to build loss")<block_end><block_end> |
"""Defines the application configuration for the source application"""<import_from_future_stmt> unicode_literals<import_from_stmt>django.apps AppConfig<class_stmt>SourceConfig(AppConfig)<block_start>"""Configuration for the source app
"""<line_sep>name='source'<line_sep>label='source'<line_sep>verbose_name='Source'<def_stmt>ready self<block_start>"""
Override this method in subclasses to run code when Django starts.
"""<line_sep># Register source file parse saver
<import_from_stmt>job.configuration.data.data_file DATA_FILE_PARSE_SAVER<import_from_stmt>source.configuration.source_data_file SourceDataFileParseSaver<line_sep>DATA_FILE_PARSE_SAVER['DATA_FILE_PARSE_SAVER']=SourceDataFileParseSaver()<line_sep># Register source message types
<import_from_stmt>messaging.messages.factory add_message_type<import_from_stmt>source.messages.purge_source_file PurgeSourceFile<line_sep>add_message_type(PurgeSourceFile)<block_end><block_end> |
"""
Name : c14_11_rainbow_callMaxOn2_viaSimulation.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""<import_stmt>scipy<as>sp<import_from_stmt>scipy zeros sqrt shape<line_sep>#
sp.random.seed(123)# fix our random numbers
s1=100.# stock price 1
s2=95.# stock price 2
k=102.0# exercise price
T=8./12.# maturity in years
r=0.08# risk-free rate
rho=0.75# correlation between 2
sigma1=0.15# volatility for stock 1
sigma2=0.20# volatility for stock 1
nSteps=100.# number of steps
nSimulation=1000# number of simulations
#
# step 1: generate correlated random number
dt=T/nSteps<line_sep>call=sp.zeros([nSimulation] dtype=float)<line_sep>x=range(0 int(nSteps) 1)<line_sep>#
# step 2: call call prices
<for_stmt>j range(0 nSimulation)<block_start>x1=sp.random.normal(size=nSimulation)<line_sep>x2=sp.random.normal(size=nSimulation)<line_sep>y1=x1<line_sep>y2=rho<times>x1+sp.sqrt(1-rho<power>2)<times>x2<line_sep>sT1=s1<line_sep>sT2=s2<for_stmt>i x[:-1]<block_start>e1=y1[i]<line_sep>e2=y2[i]<line_sep>sT1<augmul>sp.exp((r-0.5<times>sigma1<power>2)<times>dt+sigma1<times>e1<times>sqrt(dt))<line_sep>sT2<augmul>sp.exp((r-0.5<times>sigma2<power>2)<times>dt+sigma2<times>e2<times>sqrt(dt))<line_sep>minOf2=min(sT1 sT2)<line_sep>call[j]=max(minOf2-k 0)<block_end><block_end>#
# Step 3: summation and discount back
call=sp.mean(call)<times>sp.exp(-r<times>T)<line_sep>print('Rainbow call on minimum of 2 assets = ' round(call 3))<line_sep> |
##-*****************************************************************************
##
## Copyright (c) 2009-2011,
## <NAME> Imageworks, Inc. and
## Industrial Light & Magic, a division of Lucasfilm Entertainment Company Ltd.
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following disclaimer
## in the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Sony Pictures Imageworks, nor
## Industrial Light & Magic nor the names of their contributors may be used
## to endorse or promote products derived from this software without specific
## prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##-*****************************************************************************
<import_from_stmt>maya cmds<as>MayaCmds<import_stmt>maya.OpenMaya<as>OpenMaya<import_stmt>os<import_stmt>subprocess<import_stmt>unittest<import_stmt>util<def_stmt>createJoints <block_start>name=MayaCmds.joint(position=(0 0 0))<line_sep>MayaCmds.rotate(33.694356 4.000428 61.426019 r=<true> ws=<true>)<line_sep>MayaCmds.joint(position=(0 4 0) orientation=(0.0 45.0 90.0))<line_sep>MayaCmds.rotate(62.153171 0.0 0.0 r=<true> os=<true>)<line_sep>MayaCmds.joint(position=(0 8 -1) orientation=(90.0 0.0 0.0))<line_sep>MayaCmds.rotate(70.245162 -33.242019 41.673097 r=<true> os=<true>)<line_sep>MayaCmds.joint(position=(0 12 3))<line_sep>MayaCmds.rotate(0.0 0.0 -58.973851 r=<true> os=<true>)<line_sep><return>name<block_end><class_stmt>JointTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>MayaCmds.file(new=<true> force=<true>)<line_sep>self.__files=[]<line_sep>self.__abcStitcher=[os.environ['AbcStitcher']]<block_end><def_stmt>tearDown self<block_start><for_stmt>f self.__files<block_start>os.remove(f)<block_end><block_end><def_stmt>testStaticJointRW self<block_start>name=createJoints()<line_sep># write to file
self.__files.append(util.expandFileName('testStaticJoints.abc'))<line_sep>MayaCmds.AbcExport(j='-root %s -file %s'%(name self.__files[-1]))<line_sep>MayaCmds.select(name)<line_sep>MayaCmds.group(name='original')<line_sep># read from file
MayaCmds.AbcImport(self.__files[-1] mode='import')<line_sep># make sure the translate and rotation are the same
nodes1=["|original|joint1" "|original|joint1|joint2" "|original|joint1|joint2|joint3" "|original|joint1|joint2|joint3|joint4"]<line_sep>nodes2=["|joint1" "|joint1|joint2" "|joint1|joint2|joint3" "|joint1|joint2|joint3|joint4"]<for_stmt>i range(0 4)<block_start>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx') MayaCmds.getAttr(nodes2[i]+'.tx') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty') MayaCmds.getAttr(nodes2[i]+'.ty') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz') MayaCmds.getAttr(nodes2[i]+'.tz') 4)<block_end><block_end><def_stmt>testStaticIKRW self<block_start>name=createJoints()<line_sep>MayaCmds.ikHandle(sj=name ee='joint4')<line_sep>MayaCmds.move(-1.040057 -7.278225 6.498725 r=<true>)<line_sep># write to file
self.__files.append(util.expandFileName('testStaticIK.abc'))<line_sep>MayaCmds.AbcExport(j='-root %s -f %s'%(name self.__files[-1]))<line_sep>MayaCmds.select(name)<line_sep>MayaCmds.group(name='original')<line_sep># read from file
MayaCmds.AbcImport(self.__files[-1] mode='import')<line_sep># make sure the translate and rotation are the same
nodes1=["|original|joint1" "|original|joint1|joint2" "|original|joint1|joint2|joint3" "|original|joint1|joint2|joint3|joint4"]<line_sep>nodes2=["|joint1" "|joint1|joint2" "|joint1|joint2|joint3" "|joint1|joint2|joint3|joint4"]<for_stmt>i range(0 4)<block_start>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx') MayaCmds.getAttr(nodes2[i]+'.tx') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty') MayaCmds.getAttr(nodes2[i]+'.ty') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz') MayaCmds.getAttr(nodes2[i]+'.tz') 4)<block_end><block_end><def_stmt>testAnimIKRW self<block_start>name=createJoints()<line_sep>handleName=MayaCmds.ikHandle(sj=name ee='joint4')[0]<line_sep>MayaCmds.currentTime(1 update=<true>)<line_sep>MayaCmds.setKeyframe(handleName breakdown=0 hierarchy='none' controlPoints=<false> shape=<false>)<line_sep>MayaCmds.currentTime(16 update=<true>)<line_sep>MayaCmds.move(-1.040057 -7.278225 6.498725 r=<true>)<line_sep>MayaCmds.setKeyframe(handleName breakdown=0 hierarchy='none' controlPoints=<false> shape=<false>)<line_sep>self.__files.append(util.expandFileName('testAnimIKRW.abc'))<line_sep>self.__files.append(util.expandFileName('testAnimIKRW01_08.abc'))<line_sep>self.__files.append(util.expandFileName('testAnimIKRW09-16.abc'))<line_sep># write to files
MayaCmds.AbcExport(j='-fr 1 8 -root %s -f %s'%(name self.__files[-2]))<line_sep>MayaCmds.AbcExport(j='-fr 9 16 -root %s -f %s'%(name self.__files[-1]))<line_sep>MayaCmds.select(name)<line_sep>MayaCmds.group(name='original')<line_sep>subprocess.call(self.__abcStitcher+self.__files[-3:])<line_sep># read from file
MayaCmds.AbcImport(self.__files[-3] mode='import')<line_sep># make sure the translate and rotation are the same
nodes1=["|original|joint1" "|original|joint1|joint2" "|original|joint1|joint2|joint3" "|original|joint1|joint2|joint3|joint4"]<line_sep>nodes2=["|joint1" "|joint1|joint2" "|joint1|joint2|joint3" "|joint1|joint2|joint3|joint4"]<for_stmt>t range(1 25)<block_start>MayaCmds.currentTime(t update=<true>)<for_stmt>i range(0 4)<block_start>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tx') MayaCmds.getAttr(nodes2[i]+'.tx') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.ty') MayaCmds.getAttr(nodes2[i]+'.ty') 4)<line_sep>self.failUnlessAlmostEqual(MayaCmds.getAttr(nodes1[i]+'.tz') MayaCmds.getAttr(nodes2[i]+'.tz') 4)<block_end><block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
"""补充 8105 中汉字的拼音数据"""<import_from_stmt>collections namedtuple<import_stmt>re<import_stmt>sys<import_from_stmt>pyquery PyQuery<import_stmt>requests<line_sep>re_pinyin=re.compile(r'拼音:(?P<pinyin>\S+) ')<line_sep>re_code=re.compile(r'统一码\w?:(?P<code>\S+) ')<line_sep>re_alternate=re.compile(r'异体字:\s+?(?P<alternate>\S+)')<line_sep>HanziInfo=namedtuple('HanziInfo' 'pinyin code alternate')<def_stmt>fetch_html url params<block_start>response=requests.get(url params=params)<line_sep><return>response.content<block_end><def_stmt>fetch_info hanzi<block_start>url='http://www.guoxuedashi.com/zidian/so.php'<line_sep>params={'sokeyzi':hanzi 'kz':1 'submit':'' }<line_sep>html=fetch_html(url params)<line_sep>pq=PyQuery(html)<line_sep>pq=PyQuery(pq('table.zui td')[1])<line_sep>text=pq('tr').text()<line_sep>text_alternate=pq(html)('.info_txt2')('em').text()<line_sep>pinyin=''<line_sep>pinyin_match=re_pinyin.search(text)<if_stmt>pinyin_match<is><not><none><block_start>pinyin=pinyin_match.group('pinyin')<block_end>code=re_code.search(text).group('code')<line_sep>alternate=''<line_sep>alternate_match=re_alternate.search(text_alternate)<if_stmt>alternate_match<is><not><none><block_start>alternate=alternate_match.group('alternate')<block_end><return>HanziInfo(pinyin code alternate)<block_end><def_stmt>parse_hanzi hanzi<block_start>info=fetch_info(hanzi)<if_stmt>(<not>info.pinyin)<and>info.alternate<block_start>alternate=fetch_info(info.alternate)<block_end><else_stmt><block_start>alternate=''<block_end><return>HanziInfo(info.pinyin info.code alternate)<block_end><def_stmt>main lines<block_start><for_stmt>line lines<block_start><if_stmt>line.startswith('# U+')<and>'<-'<in>line# # U+xxx ... -> U+xxx
<block_start>code=line.split(':')[0].strip('# ')<line_sep># U+xxx -> xxx
code=code[2:]<line_sep>info=parse_hanzi(code)<line_sep>pinyin=info.pinyin<line_sep>extra=''<if_stmt>(<not>pinyin)<and>info.alternate<block_start>alternate=info.alternate<line_sep>pinyin=alternate.pinyin<line_sep>extra=' => U+{0}'.format(alternate.code)<if_stmt>','<in>pinyin<block_start>first_pinyin,extra_pinyin=pinyin.split(',' 1)<line_sep>pinyin=first_pinyin<line_sep>extra<augadd>' ?-> '+extra_pinyin<block_end><block_end><if_stmt>pinyin<block_start>line=line.strip()<line_sep># # U+xxx -> U+xxx
line=line[2:]<line_sep>line=line.replace('<-' pinyin)<if_stmt>extra<block_start>line<augadd>extra<block_end><block_end><block_end><yield>line.strip()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>args=sys.argv[1:]<line_sep>input_file=args[0]<with_stmt>open(input_file)<as>fp<block_start><for_stmt>line main(fp)<block_start>print(line)<block_end><block_end><block_end> |
<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.exceptions APIException<class_stmt>FeatureStateVersionError(APIException)<block_start>status_code=status.HTTP_400_BAD_REQUEST<block_end><class_stmt>FeatureStateVersionAlreadyExistsError(FeatureStateVersionError)<block_start>status_code=status.HTTP_400_BAD_REQUEST<def_stmt>__init__ self version:int<block_start>super(FeatureStateVersionAlreadyExistsError self).__init__(f"Version {version} already exists for FeatureState.")<block_end><block_end> |
# Allow tests/ directory to see faster_than_requests/ package on PYTHONPATH
<import_stmt>sys<import_from_stmt>pathlib Path<line_sep>sys.path.append(str(Path(__file__).parent.parent))<line_sep> |
<import_stmt>time<def_stmt>main request response<block_start>delay=float(request.GET.first(b"ms" 500))<line_sep>time.sleep(delay/1E3)<line_sep><return>[(b"Content-type" b"text/javascript")] u"export let delayedLoaded = true;"<block_end> |
<import_from_stmt>src.platform.jboss.interfaces JINTERFACES<import_from_stmt>cprint FingerPrint<class_stmt>FPrint(FingerPrint)<block_start><def_stmt>__init__ self<block_start>self.platform="jboss"<line_sep>self.version="7.1"<line_sep>self.title=JINTERFACES.MM<line_sep>self.uri="/console/app/gwt/chrome/chrome_rtl.css"<line_sep>self.port=9990<line_sep>self.hash="14755bd918908c2703c57bd1a52046b6"<block_end><block_end> |
#
# author: <NAME>
# Project Description: This repository contains source code for semantically segmenting WSIs; however, it could be easily
# adapted for other domains such as natural image segmentation
# File Description: This file contains the CNN models
# ==============================================================================
<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>CBR(nn.Module)<block_start><def_stmt>__init__ self nIn nOut kSize stride=1<block_start>super().__init__()<line_sep>padding=int((kSize-1)/2)<line_sep>self.conv=nn.Conv2d(nIn nOut kSize stride=stride padding=padding bias=<false>)<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-03)<line_sep>self.act=nn.ReLU(<true>)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep>output=self.bn(output)<line_sep>output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>CB(nn.Module)<block_start><def_stmt>__init__ self nIn nOut kSize stride=1<block_start>super().__init__()<line_sep>padding=int((kSize-1)/2)<line_sep>self.conv=nn.Conv2d(nIn nOut kSize stride=stride padding=padding bias=<false>)<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-03)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep>output=self.bn(output)<line_sep><return>output<block_end><block_end><class_stmt>C(nn.Module)<block_start><def_stmt>__init__ self nIn nOut kSize stride=1<block_start>super().__init__()<line_sep>padding=int((kSize-1)/2)<line_sep>self.conv=nn.Conv2d(nIn nOut kSize stride=stride padding=padding bias=<false>)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep><return>output<block_end><block_end><class_stmt>DownSampler(nn.Module)<block_start><def_stmt>__init__ self nIn nOut<block_start>super().__init__()<line_sep>self.conv=nn.Conv2d(nIn nOut-nIn 3 stride=2 padding=1 bias=<false>)<line_sep>self.pool=nn.AvgPool2d(3 stride=2 padding=1)<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-3)<line_sep>self.act=nn.ReLU(<true>)<block_end># nn.PReLU(nOut)
<def_stmt>forward self input<block_start>output=torch.cat([self.conv(input) self.pool(input)] 1)<line_sep>output=self.bn(output)<line_sep>output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>BasicResidualBlock(nn.Module)<block_start><def_stmt>__init__ self nIn nOut prob=0.03<block_start>super().__init__()<line_sep>self.c1=CBR(nIn nOut 3 1)<line_sep>self.c2=CB(nOut nOut 3 1)<line_sep>self.act=nn.ReLU(<true>)<block_end># nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
<def_stmt>forward self input<block_start>output=self.c1(input)<line_sep>output=self.c2(output)<line_sep>output=input+output<line_sep># output = self.drop(output)
output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>DownSamplerA(nn.Module)<block_start><def_stmt>__init__ self nIn nOut<block_start>super().__init__()<line_sep>self.conv=CBR(nIn nOut 3 2)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep><return>output<block_end><block_end><class_stmt>BR(nn.Module)<block_start><def_stmt>__init__ self nOut<block_start>super().__init__()<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-03)<line_sep>self.act=nn.ReLU(<true>)<block_end># nn.PReLU(nOut)
<def_stmt>forward self input<block_start>output=self.bn(input)<line_sep>output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>CDilated(nn.Module)<block_start><def_stmt>__init__ self nIn nOut kSize stride=1 d=1<block_start>super().__init__()<line_sep>padding=int((kSize-1)/2)<times>d<line_sep>self.conv=nn.Conv2d(nIn nOut (kSize kSize) stride=stride padding=(padding padding) bias=<false> dilation=d)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep><return>output<block_end><block_end><class_stmt>CDilated1(nn.Module)<block_start><def_stmt>__init__ self nIn nOut kSize stride=1 d=1<block_start>super().__init__()<line_sep>padding=int((kSize-1)/2)<times>d<line_sep>self.conv=nn.Conv2d(nIn nOut (kSize kSize) stride=stride padding=(padding padding) bias=<false> dilation=d)<line_sep>self.br=BR(nOut)<block_end><def_stmt>forward self input<block_start>output=self.conv(input)<line_sep><return>self.br(output)<block_end><block_end><class_stmt>DilatedParllelResidualBlockB(nn.Module)<block_start><def_stmt>__init__ self nIn nOut prob=0.03<block_start>super().__init__()<line_sep>n=int(nOut/5)<line_sep>n1=nOut-4<times>n<line_sep>self.c1=C(nIn n 1 1)<line_sep>self.d1=CDilated(n n1 3 1 1)<line_sep>self.d2=CDilated(n n 3 1 2)<line_sep>self.d4=CDilated(n n 3 1 4)<line_sep>self.d8=CDilated(n n 3 1 8)<line_sep>self.d16=CDilated(n n 3 1 16)<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-3)<line_sep>self.act=nn.ReLU(<true>)<block_end># nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
<def_stmt>forward self input<block_start>output1=self.c1(input)<line_sep>d1=self.d1(output1)<line_sep>d2=self.d2(output1)<line_sep>d4=self.d4(output1)<line_sep>d8=self.d8(output1)<line_sep>d16=self.d16(output1)<line_sep>add1=d2<line_sep>add2=add1+d4<line_sep>add3=add2+d8<line_sep>add4=add3+d16<line_sep>combine=torch.cat([d1 add1 add2 add3 add4] 1)<line_sep>combine_in_out=input+combine<line_sep>output=self.bn(combine_in_out)<line_sep># output = self.drop(output)
output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>DilatedParllelResidualBlockB1(nn.Module)<block_start><def_stmt>__init__ self nIn nOut prob=0.03<block_start>super().__init__()<line_sep>n=int(nOut/4)<line_sep>n1=nOut-3<times>n<line_sep>self.c1=C(nIn n 3 1)<line_sep>self.d1=CDilated(n n1 3 1 1)<line_sep>self.d2=CDilated(n n 3 1 2)<line_sep>self.d4=CDilated(n n 3 1 4)<line_sep>self.d8=CDilated(n n 3 1 8)<line_sep>self.d16=CDilated(n n 3 1 16)<line_sep>self.bn=nn.BatchNorm2d(nOut momentum=0.95 eps=1e-3)<line_sep>self.act=nn.ReLU(<true>)<block_end># nn.PReLU(nOut)
# self.drop = nn.Dropout2d(p=prob)
<def_stmt>forward self input<block_start>output1=self.c1(input)<line_sep>d1=self.d1(output1)<line_sep>d2=self.d2(output1)<line_sep>d4=self.d4(output1)<line_sep>d8=self.d8(output1)<line_sep># d16 = self.d16(output1)
add1=d2<line_sep>add2=add1+d4<line_sep>add3=add2+d8<line_sep># add4 = add3 + d16
combine=torch.cat([d1 add1 add2 add3] 1)<line_sep>combine_in_out=input+combine<line_sep>output=self.bn(combine_in_out)<line_sep># output = self.drop(output)
output=self.act(output)<line_sep><return>output<block_end><block_end><class_stmt>PSPDec(nn.Module)<block_start><def_stmt>__init__ self nIn nOut downSize upSize=48<block_start>super().__init__()<line_sep>self.features=nn.Sequential(nn.AdaptiveAvgPool2d(downSize) nn.Conv2d(nIn nOut 1 bias=<false>) nn.BatchNorm2d(nOut momentum=0.95 eps=1e-3) nn.ReLU(<true>) # nn.PReLU(nOut),
nn.Upsample(size=upSize mode='bilinear'))<block_end><def_stmt>forward self x<block_start><return>self.features(x)<block_end><block_end><class_stmt>ResNetC1(nn.Module)<block_start>'''
Segmentation model with ESP as the encoding block.
This is the same as in stage 1
'''<def_stmt>__init__ self classes<block_start>super().__init__()<line_sep>self.level1=CBR(3 16 7 2)# 384 x 384
self.p01=PSPDec(16+classes classes 160 192)<line_sep>self.p02=PSPDec(16+classes classes 128 192)<line_sep>self.p03=PSPDec(16+classes classes 96 192)<line_sep>self.p04=PSPDec(16+classes classes 72 192)<line_sep>self.class_0=nn.Sequential(nn.Conv2d(16+5<times>classes classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(classes classes 7 padding=3 bias=<false>))<line_sep>self.level2=DownSamplerA(16 128)<line_sep>self.level2_0=DilatedParllelResidualBlockB1(128 128)<line_sep>self.level2_1=DilatedParllelResidualBlockB1(128 128)# 512 x 256
self.p10=PSPDec(8+256 64 80 96)<line_sep>self.p20=PSPDec(8+256 64 64 96)<line_sep>self.p30=PSPDec(8+256 64 48 96)<line_sep>self.p40=PSPDec(8+256 64 36 96)<line_sep>self.class_1=nn.Sequential(nn.Conv2d(8+256+64<times>4 classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(classes classes 1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>))<line_sep>self.br_2=BR(256)<line_sep>self.level3_0=DownSamplerA(256 256)<line_sep>self.level3_1=DilatedParllelResidualBlockB1(256 256 0.3)<line_sep>self.level3_2=DilatedParllelResidualBlockB1(256 256 0.3)# 256 x 128
self.level4_1=DilatedParllelResidualBlockB1(256 256 0.3)<line_sep>self.level4_2=DilatedParllelResidualBlockB1(256 256 0.3)<line_sep>self.level4_3=DilatedParllelResidualBlockB1(256 256 0.3)# 128 x 64
self.p1=PSPDec(512 128 40)<line_sep>self.p2=PSPDec(512 128 32)<line_sep>self.p3=PSPDec(512 128 24)<line_sep>self.p4=PSPDec(512 128 18)<line_sep>self.br_4=BR(512)<line_sep>self.classifier=nn.Sequential(nn.Conv2d(512+4<times>128 128 1 padding=0 bias=<false>) nn.BatchNorm2d(128 momentum=0.95 eps=1e-3) nn.ReLU(<true>) # nn.PReLU(classes),
# nn.Dropout2d(.1),
nn.Conv2d(128 classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) nn.Conv2d(classes classes 1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>))<line_sep># C(320, classes, 7, 1)
self.upsample_1=nn.Upsample(scale_factor=2 mode='bilinear')<line_sep>self.upsample_2=nn.Upsample(scale_factor=2 mode='bilinear')<line_sep>self.upsample_3=nn.Upsample(scale_factor=2 mode='bilinear')<block_end><def_stmt>forward self input1# input1 = self.cmlrn(input)
<block_start>output0=self.level1(input1)<line_sep>output1_0=self.level2(output0)<line_sep>output1=self.level2_0(output1_0)<line_sep>output1=self.level2_1(output1)<line_sep>output1=self.br_2(torch.cat([output1_0 output1] 1))<line_sep>output2_0=self.level3_0(output1)<line_sep>output2=self.level3_1(output2_0)<line_sep>output2=self.level3_2(output2)<line_sep>output3=self.level4_1(output2)<line_sep>output3=self.level4_2(output3)<line_sep>output3=self.level4_3(output3)<line_sep>output3=self.br_4(torch.cat([output2_0 output3] 1))<line_sep>output3=self.classifier(torch.cat([output3 self.p1(output3) self.p2(output3) self.p3(output3) self.p4(output3)] 1))<line_sep>output3=self.upsample_3(output3)<line_sep>combine_up_23=torch.cat([output3 output1] 1)<line_sep>output23_hook=self.class_1(torch.cat([combine_up_23 self.p10(combine_up_23) self.p20(combine_up_23) self.p30(combine_up_23) self.p40(combine_up_23)] 1))<line_sep>output23_hook=self.upsample_2(output23_hook)<line_sep>combine_up=torch.cat([output0 output23_hook] 1)<line_sep>output0_hook=self.class_0(torch.cat([combine_up self.p01(combine_up) self.p02(combine_up) self.p03(combine_up) self.p04(combine_up)] 1))<line_sep># output3 = output2_0 + output3
# classifier = self.classifier(output3)
classifier=self.upsample_1(output0_hook)<line_sep><return>classifier<block_end><block_end><class_stmt>ResNetC1_YNet(nn.Module)<block_start>'''
Jointly learning the segmentation and classification with ESP as encoding blocks
'''<def_stmt>__init__ self classes diagClasses segNetFile=<none><block_start>super().__init__()<line_sep>self.level4_0=DownSamplerA(512 128)<line_sep>self.level4_1=DilatedParllelResidualBlockB1(128 128 0.3)<line_sep>self.level4_2=DilatedParllelResidualBlockB1(128 128 0.3)<line_sep>self.br_con_4=BR(256)<line_sep>self.level5_0=DownSamplerA(256 64)<line_sep>self.level5_1=DilatedParllelResidualBlockB1(64 64 0.3)<line_sep>self.level5_2=DilatedParllelResidualBlockB1(64 64 0.3)<line_sep>self.br_con_5=BR(128)<line_sep>self.global_Avg=nn.AdaptiveAvgPool2d(1)<line_sep>self.fc1=nn.Linear(128 64)<line_sep>self.fc2=nn.Linear(64 diagClasses)<line_sep># segmentation model
self.segNet=ResNetC1(classes)<if_stmt>segNetFile<is><not><none><block_start>print('Loading pre-trained segmentation model')<line_sep>self.segNet.load_state_dict(torch.load(segNetFile))<block_end>self.modules=[]<for_stmt>i,m enumerate(self.segNet.children())<block_start>self.modules.append(m)<block_end><block_end><def_stmt>forward self input1<block_start>output0=self.modules[0](input1)<line_sep>output1_0=self.modules[6](output0)# downsample
output1=self.modules[7](output1_0)<line_sep>output1=self.modules[8](output1)<line_sep>output1=self.modules[14](torch.cat([output1_0 output1] 1))<line_sep>output2_0=self.modules[15](output1)# downsample
output2=self.modules[16](output2_0)<line_sep>output2=self.modules[17](output2)<line_sep>output3=self.modules[18](output2)<line_sep>output3=self.modules[19](output3)<line_sep>output3=self.modules[20](output3)<line_sep>output3_hook=self.modules[25](torch.cat([output2_0 output3] 1))<line_sep>output3=self.modules[26](torch.cat([output3_hook self.modules[21](output3_hook) self.modules[22](output3_hook) self.modules[23](output3_hook) self.modules[24](output3_hook)] 1))<line_sep>output3=self.modules[29](output3)<line_sep>combine_up_23=torch.cat([output3 output1] 1)<line_sep>output23_hook=self.modules[13](torch.cat([combine_up_23 self.modules[9](combine_up_23) self.modules[10](combine_up_23) self.modules[11](combine_up_23) self.modules[12](combine_up_23)] 1))<line_sep>output23_hook=self.modules[28](output23_hook)<line_sep>combine_up=torch.cat([output0 output23_hook] 1)<line_sep>output0_hook=self.modules[5](torch.cat([combine_up self.modules[1](combine_up) self.modules[2](combine_up) self.modules[3](combine_up) self.modules[4](combine_up)] 1))<line_sep># segmentation classsifier
classifier=self.modules[27](output0_hook)<line_sep># diagnostic branch
l5_0=self.level4_0(output3_hook)<line_sep>l5_1=self.level4_1(l5_0)<line_sep>l5_2=self.level4_2(l5_1)<line_sep>l5_con=self.br_con_4(torch.cat([l5_0 l5_2] 1))<line_sep>l6_0=self.level5_0(l5_con)<line_sep>l6_1=self.level5_1(l6_0)<line_sep>l6_2=self.level5_2(l6_1)<line_sep>l6_con=self.br_con_5(torch.cat([l6_0 l6_2] 1))<line_sep>glbAvg=self.global_Avg(l6_con)<line_sep>flatten=glbAvg.view(glbAvg.size(0) -1)<line_sep>fc1=self.fc1(flatten)<line_sep>diagClass=self.fc2(fc1)<line_sep><return>classifier diagClass<block_end><block_end><class_stmt>ResNetD1(nn.Module)<block_start>'''
Segmentation model with RCB as encoding blocks.
This is the same as in Stage 1
'''<def_stmt>__init__ self classes<block_start>super().__init__()<line_sep>self.level1=CBR(3 16 7 2)# 384 x 384
self.p01=PSPDec(16+classes classes 160 192)<line_sep>self.p02=PSPDec(16+classes classes 128 192)<line_sep>self.p03=PSPDec(16+classes classes 96 192)<line_sep>self.p04=PSPDec(16+classes classes 72 192)<line_sep>self.class_0=nn.Sequential(nn.Conv2d(16+5<times>classes classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) nn.Conv2d(classes classes 7 padding=3 bias=<false>))<line_sep>self.level2=DownSamplerA(16 128)<line_sep>self.level2_0=BasicResidualBlock(128 128)<line_sep>self.level2_1=BasicResidualBlock(128 128)# 512 x 256
self.p10=PSPDec(8+256 64 80 96)<line_sep>self.p20=PSPDec(8+256 64 64 96)<line_sep>self.p30=PSPDec(8+256 64 48 96)<line_sep>self.p40=PSPDec(8+256 64 36 96)<line_sep>self.class_1=nn.Sequential(nn.Conv2d(8+256+64<times>4 classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) nn.Conv2d(classes classes 1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>))<line_sep>self.br_2=BR(256)<line_sep>self.level3_0=DownSamplerA(256 256)<line_sep>self.level3_1=BasicResidualBlock(256 256 0.3)<line_sep>self.level3_2=BasicResidualBlock(256 256 0.3)<line_sep>self.level4_1=BasicResidualBlock(256 256 0.3)<line_sep>self.level4_2=BasicResidualBlock(256 256 0.3)<line_sep>self.level4_3=BasicResidualBlock(256 256 0.3)<line_sep>self.p1=PSPDec(512 128 40)<line_sep>self.p2=PSPDec(512 128 32)<line_sep>self.p3=PSPDec(512 128 24)<line_sep>self.p4=PSPDec(512 128 18)<line_sep>self.br_4=BR(512)<line_sep>self.classifier=nn.Sequential(nn.Conv2d(512+128<times>4 128 1 padding=0 bias=<false>) nn.BatchNorm2d(128 momentum=0.95 eps=1e-3) nn.ReLU(<true>) nn.Conv2d(128 classes 3 padding=1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>) nn.Conv2d(classes classes 1 bias=<false>) nn.BatchNorm2d(classes momentum=0.95 eps=1e-3) nn.ReLU(<true>))<line_sep>self.upsample_1=nn.Upsample(scale_factor=2 mode='bilinear')<line_sep>self.upsample_2=nn.Upsample(scale_factor=2 mode='bilinear')<line_sep>self.upsample_3=nn.Upsample(scale_factor=2 mode='bilinear')<block_end><def_stmt>forward self input1# input1 = self.cmlrn(input)
<block_start>output0=self.level1(input1)<line_sep>output1_0=self.level2(output0)<line_sep>output1=self.level2_0(output1_0)<line_sep>output1=self.level2_1(output1)<line_sep>output1=self.br_2(torch.cat([output1_0 output1] 1))<line_sep>output2_0=self.level3_0(output1)<line_sep>output2=self.level3_1(output2_0)<line_sep>output2=self.level3_2(output2)<line_sep>output3=self.level4_1(output2)<line_sep>output3=self.level4_2(output3)<line_sep>output3=self.level4_3(output3)<line_sep>output3=self.br_4(torch.cat([output2_0 output3] 1))<line_sep>output3=self.classifier(torch.cat([output3 self.p1(output3) self.p2(output3) self.p3(output3) self.p4(output3)] 1))<line_sep>output3=self.upsample_3(output3)<line_sep>combine_up_23=torch.cat([output3 output1] 1)<line_sep>output23_hook=self.class_1(torch.cat([combine_up_23 self.p10(combine_up_23) self.p20(combine_up_23) self.p30(combine_up_23) self.p40(combine_up_23)] 1))<line_sep>output23_hook=self.upsample_2(output23_hook)<line_sep>combine_up=torch.cat([output23_hook output0] 1)<line_sep>output0_hook=self.class_0(torch.cat([combine_up self.p01(combine_up) self.p02(combine_up) self.p03(combine_up) self.p04(combine_up)] 1))<line_sep>classifier=self.upsample_1(output0_hook)<line_sep><return>classifier<block_end><block_end><class_stmt>ResNetD1_YNet(nn.Module)<block_start>'''
Jointly learning the segmentation and classification with RCB as encoding blocks
'''<def_stmt>__init__ self classes diagClasses segNetFile=<none><block_start>super().__init__()<line_sep>self.level4_0=DownSamplerA(512 128)# 24x24
self.level4_1=BasicResidualBlock(128 128 0.3)<line_sep>self.level4_2=BasicResidualBlock(128 128 0.3)<line_sep>self.br_con_4=BR(256)<line_sep>self.level5_0=DownSamplerA(256 64)# 12x12
self.level5_1=BasicResidualBlock(64 64 0.3)<line_sep>self.level5_2=BasicResidualBlock(64 64 0.3)<line_sep>self.br_con_5=BR(128)<line_sep>self.global_Avg=nn.AdaptiveAvgPool2d(1)<line_sep>self.fc1=nn.Linear(128 64)<line_sep>self.fc2=nn.Linear(64 diagClasses)<line_sep>self.segNet=ResNetD1(classes)# 384 x 384
<if_stmt>segNetFile<is><not><none><block_start>print('Loading segmentation pre-trained model')<line_sep>self.segNet.load_state_dict(torch.load(segNetFile))<block_end>self.modules=[]<for_stmt>i,m enumerate(self.segNet.children())<block_start>self.modules.append(m)<line_sep># print(i, m)
<block_end><block_end><def_stmt>forward self input1<block_start>output0=self.modules[0](input1)<line_sep>output1_0=self.modules[6](output0)# downsample
output1=self.modules[7](output1_0)<line_sep>output1=self.modules[8](output1)<line_sep>output1=self.modules[14](torch.cat([output1_0 output1] 1))<line_sep>output2_0=self.modules[15](output1)# downsample
output2=self.modules[16](output2_0)<line_sep>output2=self.modules[17](output2)<line_sep>output3=self.modules[18](output2)<line_sep>output3=self.modules[19](output3)<line_sep>output3=self.modules[20](output3)<line_sep>output3_hook=self.modules[25](torch.cat([output2_0 output3] 1))<line_sep>output3=self.modules[26](torch.cat([output3_hook self.modules[21](output3_hook) self.modules[22](output3_hook) self.modules[23](output3_hook) self.modules[24](output3_hook)] 1))<line_sep>output3=self.modules[29](output3)<line_sep>combine_up_23=torch.cat([output3 output1] 1)<line_sep>output23_hook=self.modules[13](torch.cat([combine_up_23 self.modules[9](combine_up_23) self.modules[10](combine_up_23) self.modules[11](combine_up_23) self.modules[12](combine_up_23)] 1))<line_sep>output23_hook=self.modules[28](output23_hook)<line_sep>combine_up=torch.cat([output0 output23_hook] 1)<line_sep>output0_hook=self.modules[5](torch.cat([combine_up self.modules[1](combine_up) self.modules[2](combine_up) self.modules[3](combine_up) self.modules[4](combine_up)] 1))<line_sep># segmentation classsifier
classifier=self.modules[27](output0_hook)<line_sep># diagnostic branch
l5_0=self.level4_0(output3_hook)<line_sep>l5_1=self.level4_1(l5_0)<line_sep>l5_2=self.level4_2(l5_1)<line_sep>l5_con=self.br_con_4(torch.cat([l5_0 l5_2] 1))<line_sep>l6_0=self.level5_0(l5_con)<line_sep>l6_1=self.level5_1(l6_0)<line_sep>l6_2=self.level5_2(l6_1)<line_sep>l6_con=self.br_con_5(torch.cat([l6_0 l6_2] 1))<line_sep>glbAvg=self.global_Avg(l6_con)<line_sep>flatten=glbAvg.view(glbAvg.size(0) -1)<line_sep>fc1=self.fc1(flatten)<line_sep>diagClass=self.fc2(fc1)<line_sep><return>classifier diagClass<block_end><block_end> |
#
# genmap_support.py: Multibyte Codec Map Generator
#
# Original Author: <NAME> <<EMAIL>>
# Modified Author: <NAME> <<EMAIL>>
#
<class_stmt>BufferedFiller<block_start><def_stmt>__init__ self column=78<block_start>self.column=column<line_sep>self.buffered=[]<line_sep>self.cline=[]<line_sep>self.clen=0<line_sep>self.count=0<block_end><def_stmt>write self *data<block_start><for_stmt>s data<block_start><if_stmt>len(s)<g>self.column<block_start><raise>ValueError("token is too long")<block_end><if_stmt>len(s)+self.clen<g>self.column<block_start>self.flush()<block_end>self.clen<augadd>len(s)<line_sep>self.cline.append(s)<line_sep>self.count<augadd>1<block_end><block_end><def_stmt>flush self<block_start><if_stmt><not>self.cline<block_start><return><block_end>self.buffered.append(''.join(self.cline))<line_sep>self.clen=0<del_stmt>self.cline[:]<block_end><def_stmt>printout self fp<block_start>self.flush()<for_stmt>l self.buffered<block_start>fp.write(f'{l}\n')<block_end><del_stmt>self.buffered[:]<block_end><def_stmt>__len__ self<block_start><return>self.count<block_end><block_end><class_stmt>DecodeMapWriter<block_start>filler_class=BufferedFiller<def_stmt>__init__ self fp prefix decode_map<block_start>self.fp=fp<line_sep>self.prefix=prefix<line_sep>self.decode_map=decode_map<line_sep>self.filler=self.filler_class()<block_end><def_stmt>update_decode_map self c1range c2range onlymask=() wide=0<block_start>c2values=range(c2range[0] c2range[1]+1)<for_stmt>c1 range(c1range[0] c1range[1]+1)<block_start><if_stmt>c1<not><in>self.decode_map<or>(onlymask<and>c1<not><in>onlymask)<block_start><continue><block_end>c2map=self.decode_map[c1]<line_sep>rc2values=[n<for>n c2values<if>n<in>c2map]<if_stmt><not>rc2values<block_start><continue><block_end>c2map[self.prefix]=<true><line_sep>c2map['min']=rc2values[0]<line_sep>c2map['max']=rc2values[-1]<line_sep>c2map['midx']=len(self.filler)<for_stmt>v range(rc2values[0] rc2values[-1]+1)<block_start><if_stmt>v<in>c2map<block_start>self.filler.write('%d,'%c2map[v])<block_end><else_stmt><block_start>self.filler.write('U,')<block_end><block_end><block_end><block_end><def_stmt>generate self wide=<false><block_start><if_stmt><not>wide<block_start>self.fp.write(f"static const ucs2_t __{self.prefix}_decmap[{len(self.filler)}] = {{\n")<block_end><else_stmt><block_start>self.fp.write(f"static const Py_UCS4 __{self.prefix}_decmap[{len(self.filler)}] = {{\n")<block_end>self.filler.printout(self.fp)<line_sep>self.fp.write("};\n\n")<if_stmt><not>wide<block_start>self.fp.write(f"static const struct dbcs_index {self.prefix}_decmap[256] = {{\n")<block_end><else_stmt><block_start>self.fp.write(f"static const struct widedbcs_index {self.prefix}_decmap[256] = {{\n")<block_end><for_stmt>i range(256)<block_start><if_stmt>i<in>self.decode_map<and>self.prefix<in>self.decode_map[i]<block_start>m=self.decode_map<line_sep>prefix=self.prefix<block_end><else_stmt><block_start>self.filler.write("{" "0," "0," "0" "},")<line_sep><continue><block_end>self.filler.write("{" "__%s_decmap"%prefix "+" "%d"%m[i]['midx'] "," "%d,"%m[i]['min'] "%d"%m[i]['max'] "},")<block_end>self.filler.printout(self.fp)<line_sep>self.fp.write("};\n\n")<block_end><block_end><class_stmt>EncodeMapWriter<block_start>filler_class=BufferedFiller<line_sep>elemtype='DBCHAR'<line_sep>indextype='struct unim_index'<def_stmt>__init__ self fp prefix encode_map<block_start>self.fp=fp<line_sep>self.prefix=prefix<line_sep>self.encode_map=encode_map<line_sep>self.filler=self.filler_class()<block_end><def_stmt>generate self<block_start>self.buildmap()<line_sep>self.printmap()<block_end><def_stmt>buildmap self<block_start><for_stmt>c1 range(0 256)<block_start><if_stmt>c1<not><in>self.encode_map<block_start><continue><block_end>c2map=self.encode_map[c1]<line_sep>rc2values=[k<for>k c2map.keys()]<line_sep>rc2values.sort()<if_stmt><not>rc2values<block_start><continue><block_end>c2map[self.prefix]=<true><line_sep>c2map['min']=rc2values[0]<line_sep>c2map['max']=rc2values[-1]<line_sep>c2map['midx']=len(self.filler)<for_stmt>v range(rc2values[0] rc2values[-1]+1)<block_start><if_stmt>v<not><in>c2map<block_start>self.write_nochar()<block_end><elif_stmt>isinstance(c2map[v] int)<block_start>self.write_char(c2map[v])<block_end><elif_stmt>isinstance(c2map[v] tuple)<block_start>self.write_multic(c2map[v])<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end><block_end><block_end><def_stmt>write_nochar self<block_start>self.filler.write('N,')<block_end><def_stmt>write_multic self point<block_start>self.filler.write('M,')<block_end><def_stmt>write_char self point<block_start>self.filler.write(str(point)+',')<block_end><def_stmt>printmap self<block_start>self.fp.write(f"static const {self.elemtype} __{self.prefix}_encmap[{len(self.filler)}] = {{\n")<line_sep>self.filler.printout(self.fp)<line_sep>self.fp.write("};\n\n")<line_sep>self.fp.write(f"static const {self.indextype} {self.prefix}_encmap[256] = {{\n")<for_stmt>i range(256)<block_start><if_stmt>i<in>self.encode_map<and>self.prefix<in>self.encode_map[i]<block_start>self.filler.write("{" "__%s_encmap"%self.prefix "+" "%d"%self.encode_map[i]['midx'] "," "%d,"%self.encode_map[i]['min'] "%d"%self.encode_map[i]['max'] "},")<block_end><else_stmt><block_start>self.filler.write("{" "0," "0," "0" "},")<line_sep><continue><block_end><block_end>self.filler.printout(self.fp)<line_sep>self.fp.write("};\n\n")<block_end><block_end><def_stmt>open_mapping_file path source<block_start><try_stmt><block_start>f=open(path)<block_end><except_stmt>IOError<block_start><raise>SystemExit(f'{source} is needed')<block_end><return>f<block_end><def_stmt>print_autogen fo source<block_start>fo.write(f'// AUTO-GENERATED FILE FROM {source}: DO NOT EDIT\n')<block_end><def_stmt>loadmap fo natcol=0 unicol=1 sbcs=0<block_start>print("Loading from" fo)<line_sep>fo.seek(0 0)<line_sep>decmap={}<for_stmt>line fo<block_start>line=line.split('#' 1)[0].strip()<if_stmt><not>line<or>len(line.split())<l>2<block_start><continue><block_end>row=[eval(e)<for>e line.split()]<line_sep>loc,uni=row[natcol] row[unicol]<if_stmt>loc<ge>0x100<or>sbcs<block_start>decmap.setdefault((loc<rshift>8) {})<line_sep>decmap[(loc<rshift>8)][(loc&0xff)]=uni<block_end><block_end><return>decmap<block_end> |
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test layers from qlayers.py."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_allclose<import_stmt>pytest<import_stmt>logging<import_from_stmt>tensorflow.keras backend<as>K<import_from_stmt>tensorflow.keras.layers Activation<import_from_stmt>tensorflow.keras.layers Flatten<import_from_stmt>tensorflow.keras.layers Input<import_from_stmt>tensorflow.keras.models Model<import_from_stmt>tensorflow.keras.backend clear_session<import_from_stmt>qkeras QActivation<import_from_stmt>qkeras QDense<import_from_stmt>qkeras quantized_bits<import_from_stmt>qkeras.utils model_save_quantized_weights<import_from_stmt>qkeras.utils quantized_model_from_json<def_stmt>qdense_util layer_cls kwargs=<none> input_data=<none> weight_data=<none> expected_output=<none><block_start>"""qlayer test utility."""<line_sep>input_shape=input_data.shape<line_sep>input_dtype=input_data.dtype<line_sep>layer=layer_cls(**kwargs)<line_sep>x=Input(shape=input_shape[1:] dtype=input_dtype)<line_sep>y=layer(x)<line_sep>layer.set_weights(weight_data)<line_sep>model=Model(x y)<line_sep>actual_output=model.predict(input_data)<if_stmt>expected_output<is><not><none><block_start>assert_allclose(actual_output expected_output rtol=1e-4)<block_end><block_end>@pytest.mark.parametrize('layer_kwargs, input_data, weight_data, bias_data, expected_output' [({'units':2 'use_bias':<true> 'kernel_initializer':'glorot_uniform' 'bias_initializer':'zeros'} np.array([[1 1 1 1]] dtype=K.floatx()) np.array([[10 20] [10 20] [10 20] [10 20]] dtype=K.floatx()) # weight_data
np.array([0 0] dtype=K.floatx()) # bias
np.array([[40 80]] dtype=K.floatx())) # expected_output
({'units':2 'use_bias':<true> 'kernel_initializer':'glorot_uniform' 'bias_initializer':'zeros' 'kernel_quantizer':'quantized_bits(2,0,alpha=1.0)' 'bias_quantizer':'quantized_bits(2,0)' } np.array([[1 1 1 1]] dtype=K.floatx()) np.array([[10 20] [10 20] [10 20] [10 20]] dtype=K.floatx()) # weight_data
np.array([0 0] dtype=K.floatx()) # bias
np.array([[2 2]] dtype=K.floatx())) #expected_output
])<def_stmt>test_qdense layer_kwargs input_data weight_data bias_data expected_output<block_start>qdense_util(layer_cls=QDense kwargs=layer_kwargs input_data=input_data weight_data=[weight_data bias_data] expected_output=expected_output)<block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__])<block_end> |
<import_stmt>datetime<import_from_stmt>flask_ldap3_login LDAP3LoginManager AuthenticationResponseStatus<import_from_stmt>lost.settings LOST_CONFIG FLASK_DEBUG<import_from_stmt>flask_jwt_extended create_access_token create_refresh_token<import_from_stmt>lost.db.model User<as>DBUser Group<import_from_stmt>lost.db roles<class_stmt>LoginManager()<block_start><def_stmt>__init__ self dbm user_name password<block_start>self.dbm=dbm<line_sep>self.user_name=user_name<line_sep>self.password=password<block_end><def_stmt>login self<block_start><if_stmt>LOST_CONFIG.ldap_config['LDAP_ACTIVE']<block_start>access_token,refresh_token=self.__authenticate_ldap()<block_end><else_stmt><block_start>access_token,refresh_token=self.__authenticate_flask()<block_end><if_stmt>access_token<and>refresh_token<block_start><return>{'token':access_token 'refresh_token':refresh_token} 200<block_end><return>{'message':'Invalid credentials'} 401<block_end><def_stmt>__get_token self user_id<block_start>expires=datetime.timedelta(minutes=LOST_CONFIG.session_timeout)<line_sep>expires_refresh=datetime.timedelta(minutes=LOST_CONFIG.session_timeout+2)<if_stmt>FLASK_DEBUG<block_start>expires=datetime.timedelta(days=365)<line_sep>expires_refresh=datetime.timedelta(days=366)<block_end>access_token=create_access_token(identity=user_id fresh=<true> expires_delta=expires)<line_sep>refresh_token=create_refresh_token(user_id expires_delta=expires_refresh)<line_sep><return>access_token refresh_token<block_end><def_stmt>__authenticate_flask self<block_start><if_stmt>self.user_name<block_start>user=self.dbm.find_user_by_user_name(self.user_name)<block_end><if_stmt>user<and>user.check_password(self.password)<block_start><return>self.__get_token(user.idx)<block_end><return><none> <none><block_end><def_stmt>__authenticate_ldap self# auth with ldap
<block_start>ldap_manager=LDAP3LoginManager()<line_sep>ldap_manager.init_config(LOST_CONFIG.ldap_config)<line_sep># Check if the credentials are correct
response=ldap_manager.authenticate(self.user_name self.password)<if_stmt>response.status<ne>AuthenticationResponseStatus.success# no user found in ldap, try it with db user:
<block_start><return>self.__authenticate_flask()<block_end>user_info=response.user_info<line_sep>user=self.dbm.find_user_by_user_name(self.user_name)<line_sep># user not in db:
<if_stmt><not>user<block_start>user=self.__create_db_user(user_info)<block_end><else_stmt># user in db -> synch with ldap
<block_start>user=self.__update_db_user(user_info user)<block_end><return>self.__get_token(user.idx)<block_end><def_stmt>__create_db_user self user_info<block_start>user=DBUser(user_name=user_info['uid'] email=user_info['mail'] email_confirmed_at=datetime.datetime.now() first_name=user_info['givenName'] last_name=user_info['sn'] is_external=<true>)<line_sep>anno_role=self.dbm.get_role_by_name(roles.ANNOTATOR)<line_sep>user.roles.append(anno_role)<line_sep>user.groups.append(Group(name=user.user_name is_user_default=<true>))<line_sep>self.dbm.save_obj(user)<line_sep><return>user<block_end><def_stmt>__update_db_user self user_info user<block_start>user.email=user_info['mail']<line_sep>user.first_name=user_info['givenName']<line_sep>user.last_name=user_info['sn']<line_sep>self.dbm.save_obj(user)<line_sep><return>user<block_end><block_end> |
<import_stmt>textwrap<import_from_stmt>sqlalchemy.orm relationship<import_from_stmt>sqlalchemy Column Integer ForeignKey String Boolean<import_from_stmt>.base_model Base<import_from_stmt>.port_model Port<import_from_stmt>.ip_address_model IPAddress<import_from_stmt>.nse_model nse_result_association_table<class_stmt>NmapResult(Base)<block_start>""" Database model that describes the TARGET.nmap scan results.
Represents nmap data.
Relationships:
``target``: many to one -> :class:`pipeline.models.target_model.Target`
``ip_address``: one to one -> :class:`pipeline.models.ip_address_model.IPAddress`
``port``: one to one -> :class:`pipeline.models.port_model.Port`
``nse_results``: one to many -> :class:`pipeline.models.nse_model.NSEResult`
"""<def_stmt>__str__ self<block_start><return>self.pretty()<block_end><def_stmt>pretty self commandline=<false> nse_results=<none><block_start>pad=" "<line_sep>ip_address=self.ip_address.ipv4_address<or>self.ip_address.ipv6_address<line_sep>msg=f"{ip_address} - {self.service}\n"<line_sep>msg<augadd>f"{'='<times>(len(ip_address)+len(self.service)+3)}\n\n"<line_sep>msg<augadd>f"{self.port.protocol} port: {self.port.port_number} - {'open'<if>self.open<else>'closed'} - {self.reason}\n"<line_sep>msg<augadd>f"product: {self.product} :: {self.product_version}\n"<line_sep>msg<augadd>"nse script(s) output:\n"<if_stmt>nse_results<is><none># add all nse scripts
<block_start><for_stmt>nse_result self.nse_results<block_start>msg<augadd>f"{pad}{nse_result.script_id}\n"<line_sep>msg<augadd>textwrap.indent(nse_result.script_output pad<times>2)<line_sep>msg<augadd>"\n"<block_end><block_end><else_stmt># filter used, only return those specified
<block_start><for_stmt>nse_result nse_results<block_start><if_stmt>nse_result<in>self.nse_results<block_start>msg<augadd>f"{pad}{nse_result.script_id}\n"<line_sep>msg<augadd>textwrap.indent(nse_result.script_output pad<times>2)<line_sep>msg<augadd>"\n"<block_end><block_end><block_end><if_stmt>commandline<block_start>msg<augadd>"command used:\n"<line_sep>msg<augadd>f"{pad}{self.commandline}\n"<block_end><return>msg<block_end>__tablename__="nmap_result"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>open=Column(Boolean)<line_sep>reason=Column(String)<line_sep>service=Column(String)<line_sep>product=Column(String)<line_sep>commandline=Column(String)<line_sep>product_version=Column(String)<line_sep>port=relationship(Port)<line_sep>port_id=Column(Integer ForeignKey("port.id"))<line_sep>ip_address=relationship(IPAddress)<line_sep>ip_address_id=Column(Integer ForeignKey("ip_address.id"))<line_sep>target_id=Column(Integer ForeignKey("target.id"))<line_sep>target=relationship("Target" back_populates="nmap_results")<line_sep>nse_results=relationship("NSEResult" secondary=nse_result_association_table back_populates="nmap_results")<block_end> |
<def_stmt>main <block_start>a=["a" 1 "5" 2.3 1.2j]<line_sep>some_condition=<true><for_stmt>x a# If it's all isinstance, we can use a type switch
<block_start><if_stmt>isinstance(x (str float))<block_start>print("String or float!")<block_end><elif_stmt>isinstance(x int)<block_start>print("Integer!")<block_end><else_stmt><block_start>print("Dunno!")<line_sep>print(":)")<block_end># If it's got mixed expressions, we will inline a switch for the isinstance expression
<if_stmt>isinstance(x str)<and>some_condition<block_start>print("String")<block_end><elif_stmt>isinstance(x int)<block_start>print("Integer!")<block_end><else_stmt><block_start>print("Dunno!!")<line_sep>print(":O")<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_from_stmt>django.conf.urls url<import_from_stmt>. views<line_sep>urlpatterns=[url(r'^search/$' views.ip_search name='crits-ips-views-ip_search') url(r'^search/(?P<ip_str>\S+)/$' views.ip_search name='crits-ips-views-ip_search') url(r'^details/(?P<ip>\S+)/$' views.ip_detail name='crits-ips-views-ip_detail') url(r'^remove/$' views.remove_ip name='crits-ips-views-remove_ip') url(r'^list/$' views.ips_listing name='crits-ips-views-ips_listing') url(r'^list/(?P<option>\S+)/$' views.ips_listing name='crits-ips-views-ips_listing') url(r'^bulkadd/$' views.bulk_add_ip name='crits-ips-views-bulk_add_ip') url(r'^(?P<method>\S+)/$' views.add_update_ip name='crits-ips-views-add_update_ip') ]<line_sep> |
<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>foundations_spec *<import_from_stmt>foundations_aws.aws_bucket AWSBucket<class_stmt>TestAWSBucket(Spec)<block_start><class_stmt>MockListing(object)<block_start><def_stmt>__init__ self bucket files<block_start>self._bucket=bucket<line_sep>self._files=files<block_end><def_stmt>__call__ self Bucket Prefix Delimiter<block_start><if_stmt>Bucket<ne>self._bucket<block_start><return>{}<block_end><return>{'Contents':[{'Key':Prefix+key}<for>key self._grouped_and_prefixed_files(Prefix Delimiter)] 'CommonPrefixes':[{'Prefix':Prefix+new_prefix}<for>new_prefix self._unique_delimited_prefixes(Prefix Delimiter)]}<block_end><def_stmt>_unique_delimited_prefixes self prefix delimiter<block_start>items=set()<line_sep># below is done to preserve order
<for_stmt>key self._prefixes(prefix delimiter)<block_start><if_stmt><not>key<in>items<block_start>items.add(key)<line_sep><yield>key<block_end><block_end><block_end><def_stmt>_prefixes self prefix delimiter<block_start><for_stmt>key self._prefixed_files(prefix)<block_start><if_stmt>delimiter<in>key<block_start><yield>key.split(delimiter)[0]<block_end><block_end><block_end><def_stmt>_grouped_and_prefixed_files self prefix delimiter<block_start><for_stmt>key self._prefixed_files(prefix)<block_start><if_stmt><not>delimiter<in>key<block_start><yield>key<block_end><block_end><block_end><def_stmt>_prefixed_files self prefix<block_start>prefix_length=len(prefix)<for_stmt>key self._files<block_start><if_stmt>key.startswith(prefix)<block_start><yield>key[prefix_length:]<block_end><block_end><block_end><block_end>connection_manager=let_patch_mock('foundations_aws.global_state.connection_manager')<line_sep>connection=let_mock()<line_sep>mock_file=let_mock()<line_sep>@let<def_stmt>file_name self<block_start><return>self.faker.name()<block_end>@let<def_stmt>data self<block_start><return>self.faker.sha256()<block_end>@let<def_stmt>data_body self<block_start>mock=Mock()<line_sep>mock.read.return_value=self.data<line_sep>mock.iter_chunks.return_value=[self.data]<line_sep><return>mock<block_end>@let<def_stmt>bucket_prefix self<block_start><return>self.faker.name()<block_end>@let<def_stmt>bucket_postfix self<block_start><return>self.faker.uri_path()<block_end>@let<def_stmt>bucket_name_with_slashes self<block_start><return>self.bucket_prefix+'/'+self.bucket_postfix<block_end>@let<def_stmt>upload_file_name_with_slashes self<block_start><return>self.bucket_postfix+'/'+self.file_name<block_end>@let<def_stmt>bucket self<block_start><return>AWSBucket(self.bucket_path)<block_end>@let<def_stmt>bucket_with_slashes self<block_start><return>AWSBucket(self.bucket_name_with_slashes)<block_end>@let<def_stmt>bucket_path self<block_start><return>'testing-bucket'<block_end>@let<def_stmt>source_path self<block_start><return>self.faker.name()<block_end>@let<def_stmt>source_path_with_slashes self<block_start><return>self.bucket_postfix+'/'+self.source_path<block_end>@set_up<def_stmt>set_up self<block_start>self.connection_manager.bucket_connection.return_value=self.connection<block_end><def_stmt>test_upload_from_string_uploads_data_to_bucket_with_prefix self<block_start>self.bucket_with_slashes.upload_from_string(self.file_name self.data)<line_sep>self.connection.put_object.assert_called_with(Bucket=self.bucket_prefix Key=self.upload_file_name_with_slashes Body=self.data)<block_end><def_stmt>test_exists_returns_true_when_file_exists_with_prefix self<block_start>self.bucket_with_slashes.exists(self.file_name)<line_sep>self.connection.head_object.assert_called_with(Bucket=self.bucket_prefix Key=self.upload_file_name_with_slashes)<block_end><def_stmt>test_download_as_string_uploads_data_to_bucket_with_prefix self<block_start>self.connection.get_object=ConditionalReturn()<line_sep>self.connection.get_object.return_when({'Body':self.data_body} Bucket=self.bucket_prefix Key=self.upload_file_name_with_slashes)<line_sep>result=self.bucket_with_slashes.download_as_string(self.file_name)<line_sep>self.assertEqual(self.data result)<block_end><def_stmt>test_download_to_file_uploads_data_to_bucket_with_prefix self<block_start>self.connection.get_object=ConditionalReturn()<line_sep>self.connection.get_object.return_when({'Body':self.data_body} Bucket=self.bucket_prefix Key=self.upload_file_name_with_slashes)<line_sep>result=self.bucket_with_slashes.download_to_file(self.file_name self.mock_file)<line_sep>self.mock_file.write.assert_called_with(self.data)<block_end><def_stmt>test_remove_removes_prefixed_files self<block_start>self.bucket_with_slashes.remove(self.file_name)<line_sep>self.connection.delete_object.assert_called_with(Bucket=self.bucket_prefix Key=self.upload_file_name_with_slashes)<block_end><def_stmt>test_move_moves_prefixed_files self<block_start>self.bucket_with_slashes.move(self.source_path self.file_name)<line_sep>source_info={'Bucket':self.bucket_prefix 'Key':self.source_path_with_slashes}<line_sep>self.connection.copy_object.assert_called_with(Bucket=self.bucket_prefix CopySource=source_info Key=self.upload_file_name_with_slashes)<block_end><def_stmt>test_list_files_returns_empty self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path [])<line_sep>self.assertEqual([] self._fetch_listing('*'))<block_end><def_stmt>test_list_files_returns_all_results self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log'])<line_sep>self.assertEqual(['my.txt' 'scheduler.log'] self._fetch_listing('*'))<block_end><def_stmt>test_list_files_returns_file_type_filter self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log'])<line_sep>self.assertEqual(['my.txt'] self._fetch_listing('*.txt'))<block_end><def_stmt>test_list_files_returns_all_results_dot_directory self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log'])<line_sep>self.assertEqual(['my.txt' 'scheduler.log'] self._fetch_listing('./*'))<block_end><def_stmt>test_list_files_returns_file_type_filter_dot_directory self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log'])<line_sep>self.assertEqual(['my.txt'] self._fetch_listing('./*.txt'))<block_end><def_stmt>test_list_files_returns_only_local_directory self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log' 'path/to/some/other/files'])<line_sep>self.assertEqual(['my.txt' 'scheduler.log' 'path'] self._fetch_listing('*'))<block_end><def_stmt>test_list_files_returns_only_sub_directory self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['my.txt' 'scheduler.log' 'path/to/some/other/files'])<line_sep>self.assertEqual(['path/to/some/other/files'] self._fetch_listing('path/to/some/other/*'))<block_end><def_stmt>test_list_files_returns_folder_within_sub_directory self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['path/to/some/other/files'])<line_sep>self.assertEqual(['path/to'] self._fetch_listing('path/*'))<block_end><def_stmt>test_list_files_returns_arbitrary_filter self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_path ['some_stuff_here' 'no_stuff_there' 'some_more_stuff_here'])<line_sep>self.assertEqual(['some_stuff_here' 'some_more_stuff_here'] self._fetch_listing('some_*_here'))<block_end><def_stmt>test_list_files_supports_prefixes self<block_start>self.connection.list_objects_v2.side_effect=self.MockListing(self.bucket_prefix [self.upload_file_name_with_slashes])<line_sep>result=list(self.bucket_with_slashes.list_files('*'))<line_sep>self.assertEqual([self.file_name] result)<block_end><def_stmt>_fetch_listing self pathname<block_start>generator=self.bucket.list_files(pathname)<line_sep><return>list(generator)<block_end><block_end> |
<import_stmt>time<import_stmt>os<import_from_stmt>pykafka.test.kafka_instance KafkaInstance KafkaConnection<def_stmt>get_cluster <block_start>"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""<if_stmt>os.environ.get('BROKERS' <none>)<and>os.environ.get('ZOOKEEPER' <none>)<and>os.environ.get('KAFKA_BIN' <none>)# Broker is already running. Use that.
<block_start><return>KafkaConnection(os.environ['KAFKA_BIN'] os.environ['BROKERS'] os.environ['ZOOKEEPER'] os.environ.get('BROKERS_SSL' <none>))<block_end><else_stmt><block_start><return>KafkaInstance(num_instances=3)<block_end><block_end><def_stmt>stop_cluster cluster<block_start>"""Stop a created cluster, or merely flush a pre-existing one."""<if_stmt>isinstance(cluster KafkaInstance)<block_start>cluster.terminate()<block_end><else_stmt><block_start>cluster.flush()<block_end><block_end><def_stmt>retry assertion_callable retry_time=10 wait_between_tries=0.1 exception_to_retry=AssertionError<block_start>"""Retry assertion callable in a loop"""<line_sep>start=time.time()<while_stmt><true><block_start><try_stmt><block_start><return>assertion_callable()<block_end><except_stmt>exception_to_retry<as>e<block_start><if_stmt>time.time()-start<ge>retry_time<block_start><raise>e<block_end>time.sleep(wait_between_tries)<block_end><block_end><block_end> |
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>sys<import_from_stmt>functools wraps<import_from_stmt>pytest mark<import_from_stmt>zmq.tests BaseZMQTestCase<import_from_stmt>zmq.utils.win32 allow_interrupt<def_stmt>count_calls f<block_start>@wraps(f)<def_stmt>_ *args **kwds<block_start><try_stmt><block_start><return>f(*args **kwds)<block_end><finally_stmt><block_start>_.__calls__<augadd>1<block_end><block_end>_.__calls__=0<line_sep><return>_<block_end>@mark.new_console<class_stmt>TestWindowsConsoleControlHandler(BaseZMQTestCase)<block_start>@mark.new_console@mark.skipif(<not>sys.platform.startswith('win') reason='Windows only test')<def_stmt>test_handler self<block_start>@count_calls<def_stmt>interrupt_polling <block_start>print('Caught CTRL-C!')<block_end><import_from_stmt>ctypes windll<import_from_stmt>ctypes.wintypes BOOL DWORD<line_sep>kernel32=windll.LoadLibrary('kernel32')<line_sep># <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
GenerateConsoleCtrlEvent=kernel32.GenerateConsoleCtrlEvent<line_sep>GenerateConsoleCtrlEvent.argtypes=(DWORD DWORD)<line_sep>GenerateConsoleCtrlEvent.restype=BOOL<line_sep># Simulate CTRL-C event while handler is active.
<try_stmt><block_start><with_stmt>allow_interrupt(interrupt_polling)<as>context<block_start>result=GenerateConsoleCtrlEvent(0 0)<line_sep># Sleep so that we give time to the handler to
# capture the Ctrl-C event.
time.sleep(0.5)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><else_stmt><block_start><if_stmt>result<eq>0<block_start><raise>WindowsError()<block_end><else_stmt><block_start>self.fail('Expecting `KeyboardInterrupt` exception!')<block_end><block_end># Make sure our handler was called.
self.assertEqual(interrupt_polling.__calls__ 1)<block_end><block_end> |
<import_stmt>traceback<import_stmt>copy<import_stmt>gc<import_from_stmt>ctypes c_void_p<import_stmt>itertools<import_stmt>array<import_stmt>math<import_stmt>numpy<as>np<import_from_stmt>OpenGL.GL *<import_from_stmt>PyEngine3D.Common logger<import_from_stmt>PyEngine3D.Utilities Singleton GetClassName Attributes Profiler<import_from_stmt>PyEngine3D.OpenGLContext OpenGLContext<def_stmt>get_numpy_dtype data_type<block_start><if_stmt>GL_BYTE<eq>data_type<block_start><return>np.int8<block_end><elif_stmt>GL_UNSIGNED_BYTE<eq>data_type<block_start><return>np.uint8<block_end><elif_stmt>GL_UNSIGNED_BYTE<eq>data_type<block_start><return>np.uint8<block_end><elif_stmt>GL_SHORT<eq>data_type<block_start><return>np.int16<block_end><elif_stmt>GL_UNSIGNED_SHORT<eq>data_type<block_start><return>np.uint16<block_end><elif_stmt>GL_INT<eq>data_type<block_start><return>np.int32<block_end><elif_stmt>GL_UNSIGNED_INT<eq>data_type<block_start><return>np.uint32<block_end><elif_stmt>GL_UNSIGNED_INT64<eq>data_type<block_start><return>np.uint64<block_end><elif_stmt>GL_FLOAT<eq>data_type<block_start><return>np.float32<block_end><elif_stmt>GL_DOUBLE<eq>data_type<block_start><return>np.float64<block_end>logger.error('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)' data_type)<line_sep><return>np.uint8<block_end><def_stmt>get_internal_format str_image_mode<block_start><if_stmt>str_image_mode<eq>"RGBA"<block_start><return>GL_RGBA8<block_end><elif_stmt>str_image_mode<eq>"RGB"<block_start><return>GL_RGB8<block_end><elif_stmt>str_image_mode<eq>"L"<or>str_image_mode<eq>"P"<or>str_image_mode<eq>"R"<block_start><return>GL_R8<block_end><else_stmt><block_start>logger.error("get_internal_format::unknown image mode ( %s )"%str_image_mode)<block_end><return>GL_RGBA8<block_end><def_stmt>get_texture_format str_image_mode<block_start><if_stmt>str_image_mode<eq>"RGBA"# R,G,B,A order. GL_BGRA is faster than GL_RGBA
<block_start><return>GL_RGBA# GL_BGRA
<block_end><elif_stmt>str_image_mode<eq>"RGB"<block_start><return>GL_RGB<block_end><elif_stmt>str_image_mode<eq>"L"<or>str_image_mode<eq>"P"<or>str_image_mode<eq>"R"<block_start><return>GL_RED<block_end><else_stmt><block_start>logger.error("get_texture_format::unknown image mode ( %s )"%str_image_mode)<block_end><return>GL_RGBA<block_end><def_stmt>get_image_mode texture_internal_format<block_start><if_stmt>texture_internal_format<in>(GL_RGBA GL_BGRA)<block_start><return>"RGBA"<block_end><elif_stmt>texture_internal_format<in>(GL_RGB GL_BGR)<block_start><return>"RGB"<block_end><elif_stmt>texture_internal_format<eq>GL_RG<block_start><return>"RG"<block_end><elif_stmt>texture_internal_format<in>(GL_R8 GL_R16F GL_RED GL_DEPTH_STENCIL GL_DEPTH_COMPONENT)<block_start><return>"R"<block_end><elif_stmt>texture_internal_format<eq>GL_LUMINANCE<block_start><return>"L"<block_end><else_stmt><block_start>logger.error("get_image_mode::unknown image format ( %s )"%texture_internal_format)<block_end><return>"RGBA"<block_end><def_stmt>CreateTexture **texture_datas<block_start>texture_class=texture_datas.get('texture_type' Texture2D)<if_stmt>texture_class<is><not><none><block_start><if_stmt>type(texture_class)<is>str<block_start>texture_class=eval(texture_class)<block_end><return>texture_class(**texture_datas)<block_end><return><none><block_end><class_stmt>Texture<block_start>target=GL_TEXTURE_2D<line_sep>default_wrap=GL_REPEAT<line_sep>use_glTexStorage=<false><def_stmt>__init__ self **texture_data<block_start>self.name=texture_data.get('name')<line_sep>self.attachment=<false><line_sep>self.image_mode="RGBA"<line_sep>self.internal_format=GL_RGBA8<line_sep>self.texture_format=GL_RGBA<line_sep>self.sRGB=<false><line_sep>self.clear_color=<none><line_sep>self.multisample_count=0<line_sep>self.width=0<line_sep>self.height=0<line_sep>self.depth=1<line_sep>self.data_type=GL_UNSIGNED_BYTE<line_sep>self.min_filter=GL_LINEAR_MIPMAP_LINEAR<line_sep>self.mag_filter=GL_LINEAR<line_sep>self.enable_mipmap=<false><line_sep>self.wrap=self.default_wrap<line_sep>self.wrap_s=self.default_wrap<line_sep>self.wrap_t=self.default_wrap<line_sep>self.wrap_r=self.default_wrap<line_sep>self.buffer=-1<line_sep>self.sampler_handle=-1<line_sep>self.attribute=Attributes()<line_sep>self.create_texture(**texture_data)<block_end><def_stmt>create_texture self **texture_data<block_start><if_stmt>self.buffer<ne>-1<block_start>self.delete()<block_end>self.attachment=<false><line_sep>self.image_mode=texture_data.get('image_mode')<line_sep>self.internal_format=texture_data.get('internal_format')<line_sep>self.texture_format=texture_data.get('texture_format')<line_sep>self.sRGB=texture_data.get('sRGB' <false>)<line_sep>self.clear_color=texture_data.get('clear_color')<line_sep>self.multisample_count=0<if_stmt>self.internal_format<is><none><and>self.image_mode<block_start>self.internal_format=get_internal_format(self.image_mode)<block_end><if_stmt>self.texture_format<is><none><and>self.image_mode<block_start>self.texture_format=get_texture_format(self.image_mode)<block_end><if_stmt>self.image_mode<is><none><and>self.texture_format<block_start>self.image_mode=get_image_mode(self.texture_format)<block_end># Convert to sRGB
<if_stmt>self.sRGB<block_start><if_stmt>self.internal_format<eq>GL_RGB<block_start>self.internal_format=GL_SRGB8<block_end><elif_stmt>self.internal_format<eq>GL_RGBA<block_start>self.internal_format=GL_SRGB8_ALPHA8<block_end><block_end><if_stmt>GL_RGBA<eq>self.internal_format<block_start>self.internal_format=GL_RGBA8<block_end><if_stmt>GL_RGB<eq>self.internal_format<block_start>self.internal_format=GL_RGB8<block_end>self.width=int(texture_data.get('width' 0))<line_sep>self.height=int(texture_data.get('height' 0))<line_sep>self.depth=int(max(1 texture_data.get('depth' 1)))<line_sep>self.data_type=texture_data.get('data_type' GL_UNSIGNED_BYTE)<line_sep>self.min_filter=texture_data.get('min_filter' GL_LINEAR_MIPMAP_LINEAR)<line_sep>self.mag_filter=texture_data.get('mag_filter' GL_LINEAR)# GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR, GL_NEAREST
mipmap_filters=(GL_LINEAR_MIPMAP_LINEAR GL_LINEAR_MIPMAP_NEAREST GL_NEAREST_MIPMAP_LINEAR GL_NEAREST_MIPMAP_NEAREST)<line_sep>self.enable_mipmap=self.min_filter<in>mipmap_filters<if_stmt>self.target<eq>GL_TEXTURE_2D_MULTISAMPLE<block_start>self.enable_mipmap=<false><block_end>self.wrap=texture_data.get('wrap' self.default_wrap)# GL_REPEAT, GL_CLAMP
self.wrap_s=texture_data.get('wrap_s')<line_sep>self.wrap_t=texture_data.get('wrap_t')<line_sep>self.wrap_r=texture_data.get('wrap_r')<line_sep>self.buffer=-1<line_sep>self.sampler_handle=-1<line_sep># texture parameter overwrite
# self.sampler_handle = glGenSamplers(1)
# glSamplerParameteri(self.sampler_handle, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
# glBindSampler(0, self.sampler_handle)
logger.info("Create %s : %s %dx%dx%d %s mipmap(%s)."%(GetClassName(self) self.name self.width self.height self.depth str(self.internal_format) 'Enable'<if>self.enable_mipmap<else>'Disable'))<line_sep>self.attribute=Attributes()<block_end><def_stmt>__del__ self<block_start><pass><block_end><def_stmt>delete self<block_start>logger.info("Delete %s : %s"%(GetClassName(self) self.name))<line_sep>glDeleteTextures([self.buffer ])<line_sep>self.buffer=-1<block_end><def_stmt>get_texture_info self<block_start><return>dict(texture_type=self.__class__.__name__ width=self.width height=self.height depth=self.depth image_mode=self.image_mode internal_format=self.internal_format texture_format=self.texture_format data_type=self.data_type min_filter=self.min_filter mag_filter=self.mag_filter wrap=self.wrap wrap_s=self.wrap_s wrap_t=self.wrap_t wrap_r=self.wrap_r )<block_end><def_stmt>get_save_data self<block_start>save_data=self.get_texture_info()<line_sep>data=self.get_image_data()<if_stmt>data<is><not><none><block_start>save_data['data']=data<block_end><return>save_data<block_end><def_stmt>get_mipmap_size self level=0<block_start><if_stmt>0<l>level<block_start>divider=2.0<power>level<line_sep>width=max(1 int(self.width/divider))<line_sep>height=max(1 int(self.height/divider))<line_sep><return>width height<block_end><return>self.width self.height<block_end><def_stmt>get_image_data self level=0<block_start><if_stmt>self.target<not><in>(GL_TEXTURE_2D GL_TEXTURE_2D_ARRAY GL_TEXTURE_3D)<block_start><return><none><block_end>level=min(level self.get_mipmap_count())<line_sep>dtype=get_numpy_dtype(self.data_type)<try_stmt><block_start>glBindTexture(self.target self.buffer)<line_sep>data=OpenGLContext.glGetTexImage(self.target level self.texture_format self.data_type)<line_sep># convert to numpy array
<if_stmt>type(data)<is>bytes<block_start>data=np.fromstring(data dtype=dtype)<block_end><else_stmt><block_start>data=np.array(data dtype=dtype)<block_end>glBindTexture(self.target 0)<line_sep><return>data<block_end><except_stmt><block_start>logger.error(traceback.format_exc())<line_sep>logger.error('%s failed to get image data.'%self.name)<line_sep>logger.info('Try to glReadPixels.')<block_end>glBindTexture(self.target self.buffer)<line_sep>fb=glGenFramebuffers(1)<line_sep>glBindFramebuffer(GL_FRAMEBUFFER fb)<line_sep>data=[]<for_stmt>layer range(self.depth)<block_start><if_stmt>GL_TEXTURE_2D<eq>self.target<block_start>glFramebufferTexture2D(GL_FRAMEBUFFER GL_COLOR_ATTACHMENT0 GL_TEXTURE_2D self.buffer level)<block_end><elif_stmt>GL_TEXTURE_3D<eq>self.target<block_start>glFramebufferTexture3D(GL_FRAMEBUFFER GL_COLOR_ATTACHMENT0 GL_TEXTURE_3D self.buffer level layer)<block_end><elif_stmt>GL_TEXTURE_2D_ARRAY<eq>self.target<block_start>glFramebufferTextureLayer(GL_FRAMEBUFFER GL_COLOR_ATTACHMENT0 self.buffer level layer)<block_end>glReadBuffer(GL_COLOR_ATTACHMENT0)<line_sep>width,height=self.get_mipmap_size(level)<line_sep>pixels=glReadPixels(0 0 width height self.texture_format self.data_type)<line_sep># convert to numpy array
<if_stmt>type(pixels)<is>bytes<block_start>pixels=np.fromstring(pixels dtype=dtype)<block_end>data.append(pixels)<block_end>data=np.array(data dtype=dtype)<line_sep>glBindTexture(self.target 0)<line_sep>glBindFramebuffer(GL_FRAMEBUFFER 0)<line_sep>glDeleteFramebuffers(1 [fb ])<line_sep><return>data<block_end><def_stmt>get_mipmap_count self<block_start>factor=max(max(self.width self.height) self.depth)<line_sep><return>math.floor(math.log2(factor))+1<block_end><def_stmt>generate_mipmap self<block_start><if_stmt>self.enable_mipmap<block_start>glBindTexture(self.target self.buffer)<line_sep>glGenerateMipmap(self.target)<block_end><else_stmt><block_start>logger.warn('%s disable to generate mipmap.'%self.name)<block_end><block_end><def_stmt>texure_wrap self wrap<block_start>glTexParameteri(self.target GL_TEXTURE_WRAP_S wrap)<line_sep>glTexParameteri(self.target GL_TEXTURE_WRAP_T wrap)<line_sep>glTexParameteri(self.target GL_TEXTURE_WRAP_R wrap)<block_end><def_stmt>bind_texture self wrap=<none><block_start><if_stmt>self.buffer<eq>-1<block_start>logger.warn("%s texture is invalid."%self.name)<line_sep><return><block_end>glBindTexture(self.target self.buffer)<if_stmt>wrap<is><not><none><block_start>self.texure_wrap(wrap)<block_end><block_end><def_stmt>bind_image self image_unit level=0 access=GL_READ_WRITE<block_start><if_stmt>self.buffer<eq>-1<block_start>logger.warn("%s texture is invalid."%self.name)<line_sep><return><block_end># flag : GL_READ_WRITE, GL_WRITE_ONLY, GL_READ_ONLY
glBindImageTexture(image_unit self.buffer level GL_FALSE 0 access self.internal_format)<block_end><def_stmt>is_attached self<block_start><return>self.attachment<block_end><def_stmt>set_attachment self attachment<block_start>self.attachment=attachment<block_end><def_stmt>get_attribute self<block_start>self.attribute.set_attribute("name" self.name)<line_sep>self.attribute.set_attribute("target" self.target)<line_sep>self.attribute.set_attribute("width" self.width)<line_sep>self.attribute.set_attribute("height" self.height)<line_sep>self.attribute.set_attribute("depth" self.depth)<line_sep>self.attribute.set_attribute("image_mode" self.image_mode)<line_sep>self.attribute.set_attribute("internal_format" self.internal_format)<line_sep>self.attribute.set_attribute("texture_format" self.texture_format)<line_sep>self.attribute.set_attribute("data_type" self.data_type)<line_sep>self.attribute.set_attribute("min_filter" self.min_filter)<line_sep>self.attribute.set_attribute("mag_filter" self.mag_filter)<line_sep>self.attribute.set_attribute("multisample_count" self.multisample_count)<line_sep>self.attribute.set_attribute("wrap" self.wrap)<line_sep>self.attribute.set_attribute("wrap_s" self.wrap_s)<line_sep>self.attribute.set_attribute("wrap_t" self.wrap_t)<line_sep>self.attribute.set_attribute("wrap_r" self.wrap_r)<line_sep><return>self.attribute<block_end><def_stmt>set_attribute self attribute_name attribute_value item_info_history attribute_index<block_start><if_stmt>hasattr(self attribute_name)<and>""<ne>attribute_value<block_start>setattr(self attribute_name eval(attribute_value))<block_end><if_stmt>'wrap'<in>attribute_name<block_start>glBindTexture(self.target self.buffer)<line_sep>glTexParameteri(self.target GL_TEXTURE_WRAP_S self.wrap_s<or>self.wrap)<line_sep>glTexParameteri(self.target GL_TEXTURE_WRAP_T self.wrap_t<or>self.wrap)<line_sep>glTexParameteri(self.target GL_TEXTURE_WRAP_R self.wrap_r<or>self.wrap)<line_sep>glBindTexture(self.target 0)<block_end><return>self.attribute<block_end><block_end><class_stmt>Texture2D(Texture)<block_start>target=GL_TEXTURE_2D<def_stmt>create_texture self **texture_data<block_start>Texture.create_texture(self **texture_data)<line_sep>data=texture_data.get('data')<line_sep>self.buffer=glGenTextures(1)<line_sep>glBindTexture(GL_TEXTURE_2D self.buffer)<if_stmt>self.use_glTexStorage<block_start>glTexStorage2D(GL_TEXTURE_2D self.get_mipmap_count() self.internal_format self.width self.height)<if_stmt>data<is><not><none><block_start>glTexSubImage2D(GL_TEXTURE_2D 0 0 0 self.width self.height self.texture_format self.data_type data)<block_end><block_end><else_stmt><block_start>glTexImage2D(GL_TEXTURE_2D 0 self.internal_format self.width self.height 0 self.texture_format self.data_type data)<block_end><if_stmt>self.enable_mipmap<block_start>glGenerateMipmap(GL_TEXTURE_2D)<block_end>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_WRAP_S self.wrap_s<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_WRAP_T self.wrap_t<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_MIN_FILTER self.min_filter)<line_sep>glTexParameteri(GL_TEXTURE_2D GL_TEXTURE_MAG_FILTER self.mag_filter)<if_stmt>self.clear_color<is><not><none><block_start>glClearTexImage(self.buffer 0 self.texture_format self.data_type self.clear_color)<block_end>glBindTexture(GL_TEXTURE_2D 0)<block_end><block_end><class_stmt>Texture2DArray(Texture)<block_start>target=GL_TEXTURE_2D_ARRAY<def_stmt>create_texture self **texture_data<block_start>Texture.create_texture(self **texture_data)<line_sep>data=texture_data.get('data')<line_sep>self.buffer=glGenTextures(1)<line_sep>glBindTexture(GL_TEXTURE_2D_ARRAY self.buffer)<if_stmt>self.use_glTexStorage<block_start>glTexStorage3D(GL_TEXTURE_2D_ARRAY self.get_mipmap_count() self.internal_format self.width self.height self.depth)<if_stmt>data<is><not><none><block_start>glTexSubImage3D(GL_TEXTURE_2D_ARRAY 0 0 0 0 self.width self.height self.depth self.texture_format self.data_type data)<block_end><block_end><else_stmt><block_start>glTexImage3D(GL_TEXTURE_2D_ARRAY 0 self.internal_format self.width self.height self.depth 0 self.texture_format self.data_type data)<block_end><if_stmt>self.enable_mipmap<block_start>glGenerateMipmap(GL_TEXTURE_2D_ARRAY)<block_end>glTexParameteri(GL_TEXTURE_2D_ARRAY GL_TEXTURE_WRAP_S self.wrap_s<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_2D_ARRAY GL_TEXTURE_WRAP_T self.wrap_t<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_2D_ARRAY GL_TEXTURE_MIN_FILTER self.min_filter)<line_sep>glTexParameteri(GL_TEXTURE_2D_ARRAY GL_TEXTURE_MAG_FILTER self.mag_filter)<line_sep>glBindTexture(GL_TEXTURE_2D_ARRAY 0)<block_end><block_end><class_stmt>Texture3D(Texture)<block_start>target=GL_TEXTURE_3D<def_stmt>create_texture self **texture_data<block_start>Texture.create_texture(self **texture_data)<line_sep>data=texture_data.get('data')<line_sep>self.buffer=glGenTextures(1)<line_sep>glBindTexture(GL_TEXTURE_3D self.buffer)<if_stmt>self.use_glTexStorage<block_start>glTexStorage3D(GL_TEXTURE_3D self.get_mipmap_count() self.internal_format self.width self.height self.depth)<if_stmt>data<is><not><none><block_start>glTexSubImage3D(GL_TEXTURE_3D 0 0 0 0 self.width self.height self.depth self.texture_format self.data_type data)<block_end><block_end><else_stmt><block_start>glTexImage3D(GL_TEXTURE_3D 0 self.internal_format self.width self.height self.depth 0 self.texture_format self.data_type data)<block_end><if_stmt>self.enable_mipmap<block_start>glGenerateMipmap(GL_TEXTURE_3D)<block_end>glTexParameteri(GL_TEXTURE_3D GL_TEXTURE_WRAP_S self.wrap_s<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_3D GL_TEXTURE_WRAP_T self.wrap_t<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_3D GL_TEXTURE_WRAP_R self.wrap_r<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_3D GL_TEXTURE_MIN_FILTER self.min_filter)<line_sep>glTexParameteri(GL_TEXTURE_3D GL_TEXTURE_MAG_FILTER self.mag_filter)<line_sep>glBindTexture(GL_TEXTURE_3D 0)<block_end><block_end><class_stmt>Texture2DMultiSample(Texture)<block_start>target=GL_TEXTURE_2D_MULTISAMPLE<def_stmt>create_texture self **texture_data<block_start>Texture.create_texture(self **texture_data)<line_sep>multisample_count=texture_data.get('multisample_count' 4)<line_sep>self.multisample_count=multisample_count-(multisample_count%4)<line_sep>self.buffer=glGenTextures(1)<line_sep>glBindTexture(GL_TEXTURE_2D_MULTISAMPLE self.buffer)<if_stmt>self.use_glTexStorage<block_start>glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE self.multisample_count self.internal_format self.width self.height GL_TRUE)<block_end><else_stmt><block_start>glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE self.multisample_count self.internal_format self.width self.height GL_TRUE)<block_end>glBindTexture(GL_TEXTURE_2D_MULTISAMPLE 0)<block_end><block_end><class_stmt>TextureCube(Texture)<block_start>target=GL_TEXTURE_CUBE_MAP<line_sep>default_wrap=GL_REPEAT<def_stmt>__init__ self **texture_data<block_start>self.texture_positive_x=<none><line_sep>self.texture_negative_x=<none><line_sep>self.texture_positive_y=<none><line_sep>self.texture_negative_y=<none><line_sep>self.texture_positive_z=<none><line_sep>self.texture_negative_z=<none><line_sep>Texture.__init__(self **texture_data)<block_end><def_stmt>create_texture self **texture_data<block_start>Texture.create_texture(self **texture_data)<line_sep># If texture2d is None then create render target.
face_texture_datas=copy.copy(texture_data)<line_sep>face_texture_datas.pop('name')<line_sep>face_texture_datas['texture_type']=Texture2D<line_sep>self.texture_positive_x=texture_data.get('texture_positive_x' CreateTexture(name=self.name+"_right" **face_texture_datas))<line_sep>self.texture_negative_x=texture_data.get('texture_negative_x' CreateTexture(name=self.name+"_left" **face_texture_datas))<line_sep>self.texture_positive_y=texture_data.get('texture_positive_y' CreateTexture(name=self.name+"_top" **face_texture_datas))<line_sep>self.texture_negative_y=texture_data.get('texture_negative_y' CreateTexture(name=self.name+"_bottom" **face_texture_datas))<line_sep>self.texture_positive_z=texture_data.get('texture_positive_z' CreateTexture(name=self.name+"_front" **face_texture_datas))<line_sep>self.texture_negative_z=texture_data.get('texture_negative_z' CreateTexture(name=self.name+"_back" **face_texture_datas))<line_sep>self.buffer=glGenTextures(1)<line_sep>glBindTexture(GL_TEXTURE_CUBE_MAP self.buffer)<if_stmt>self.use_glTexStorage<block_start>glTexStorage2D(GL_TEXTURE_CUBE_MAP self.get_mipmap_count() self.internal_format self.width self.height)<line_sep>self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X self.texture_positive_x)# Right
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X self.texture_negative_x)# Left
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y self.texture_positive_y)# Top
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y self.texture_negative_y)# Bottom
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z self.texture_positive_z)# Front
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z self.texture_negative_z)# Back
<block_end><else_stmt><block_start>self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X self.texture_positive_x)# Right
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X self.texture_negative_x)# Left
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y self.texture_positive_y)# Top
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y self.texture_negative_y)# Bottom
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z self.texture_positive_z)# Front
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z self.texture_negative_z)<block_end># Back
<if_stmt>self.enable_mipmap<block_start>glGenerateMipmap(GL_TEXTURE_CUBE_MAP)<block_end>glTexParameteri(GL_TEXTURE_CUBE_MAP GL_TEXTURE_WRAP_S self.wrap_s<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_CUBE_MAP GL_TEXTURE_WRAP_T self.wrap_t<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_CUBE_MAP GL_TEXTURE_WRAP_R self.wrap_r<or>self.wrap)<line_sep>glTexParameteri(GL_TEXTURE_CUBE_MAP GL_TEXTURE_MIN_FILTER self.min_filter)<line_sep>glTexParameteri(GL_TEXTURE_CUBE_MAP GL_TEXTURE_MAG_FILTER self.mag_filter)<line_sep>glBindTexture(GL_TEXTURE_CUBE_MAP 0)<block_end>@staticmethod<def_stmt>createTexImage2D target_face texture<block_start>glTexImage2D(target_face 0 texture.internal_format texture.width texture.height 0 texture.texture_format texture.data_type texture.get_image_data())<block_end>@staticmethod<def_stmt>createTexSubImage2D target_face texture<block_start>glTexSubImage2D(target_face 0 0 0 texture.width texture.height texture.texture_format texture.data_type texture.get_image_data())<block_end><def_stmt>delete self<block_start>super(TextureCube self).delete()<line_sep>self.texture_positive_x.delete()<line_sep>self.texture_negative_x.delete()<line_sep>self.texture_positive_y.delete()<line_sep>self.texture_negative_y.delete()<line_sep>self.texture_positive_z.delete()<line_sep>self.texture_negative_z.delete()<block_end><def_stmt>get_save_data self get_image_data=<true><block_start>save_data=Texture.get_save_data(self)<line_sep>save_data['texture_positive_x']=self.texture_positive_x.name<line_sep>save_data['texture_negative_x']=self.texture_negative_x.name<line_sep>save_data['texture_positive_y']=self.texture_positive_y.name<line_sep>save_data['texture_negative_y']=self.texture_negative_y.name<line_sep>save_data['texture_positive_z']=self.texture_positive_z.name<line_sep>save_data['texture_negative_z']=self.texture_negative_z.name<line_sep><return>save_data<block_end><def_stmt>get_attribute self<block_start>Texture.get_attribute(self)<line_sep>self.attribute.set_attribute("texture_positive_x" self.texture_positive_x.name)<line_sep>self.attribute.set_attribute("texture_negative_x" self.texture_negative_x.name)<line_sep>self.attribute.set_attribute("texture_positive_y" self.texture_positive_y.name)<line_sep>self.attribute.set_attribute("texture_negative_y" self.texture_negative_y.name)<line_sep>self.attribute.set_attribute("texture_positive_z" self.texture_positive_z.name)<line_sep>self.attribute.set_attribute("texture_negative_z" self.texture_negative_z.name)<line_sep><return>self.attribute<block_end><block_end> |
# -*- coding: UTF-8 -*-
"""
@<NAME> 2020_09_08
"""<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>models.blocks.SE_block SE<import_from_stmt>models.blocks.conv_bn BN_Conv2d<class_stmt>ResNeXt_Block(nn.Module)<block_start>"""
ResNeXt block with group convolutions
"""<def_stmt>__init__ self in_chnls cardinality group_depth stride is_se=<false><block_start>super(ResNeXt_Block self).__init__()<line_sep>self.is_se=is_se<line_sep>self.group_chnls=cardinality<times>group_depth<line_sep>self.conv1=BN_Conv2d(in_chnls self.group_chnls 1 stride=1 padding=0)<line_sep>self.conv2=BN_Conv2d(self.group_chnls self.group_chnls 3 stride=stride padding=1 groups=cardinality)<line_sep>self.conv3=nn.Conv2d(self.group_chnls self.group_chnls<times>2 1 stride=1 padding=0)<line_sep>self.bn=nn.BatchNorm2d(self.group_chnls<times>2)<if_stmt>self.is_se<block_start>self.se=SE(self.group_chnls<times>2 16)<block_end>self.short_cut=nn.Sequential(nn.Conv2d(in_chnls self.group_chnls<times>2 1 stride 0 bias=<false>) nn.BatchNorm2d(self.group_chnls<times>2))<block_end><def_stmt>forward self x<block_start>out=self.conv1(x)<line_sep>out=self.conv2(out)<line_sep>out=self.bn(self.conv3(out))<if_stmt>self.is_se<block_start>coefficient=self.se(out)<line_sep>out<augmul>coefficient<block_end>out<augadd>self.short_cut(x)<line_sep><return>F.relu(out)<block_end><block_end> |
# Get a collection object for 'my_collection'
myColl=db.get_collection('my_collection')<line_sep> |
<import_stmt>warnings<import_from_stmt>dataclasses dataclass<import_from_stmt>typing List Optional<import_stmt>torch<import_from_stmt>falkon.utils.stream_utils sync_current_stream<import_from_stmt>falkon.mmv_ops.utils _get_gpu_info create_output_mat _start_wait_processes<import_from_stmt>falkon.options FalkonOptions BaseOptions<import_from_stmt>falkon.utils decide_cuda<import_from_stmt>falkon.utils.helpers sizeof_dtype calc_gpu_block_sizes<import_from_stmt>pykeops.torch Genred<line_sep>@dataclass(frozen=<true>)<class_stmt>ArgsFmmv<block_start>X1:torch.Tensor<line_sep>X2:torch.Tensor<line_sep>v:torch.Tensor<line_sep>other_vars:List[torch.Tensor]<line_sep>out:torch.Tensor<line_sep>gpu_ram:float<line_sep>backend:str<line_sep>function:callable<block_end><def_stmt>_keops_dtype dtype:torch.dtype<arrow>str<block_start>"""Returns a string which represents the given data type.
The string representation is necessary for KeOps which doesn't
like type objects.
"""<if_stmt>dtype<eq>torch.float64<block_start><return>'float64'<block_end><elif_stmt>dtype<eq>torch.float32<block_start><return>'float32'<block_end><else_stmt><block_start><raise>NotImplementedError("Data type %s not recognized."%(dtype))<block_end><block_end><def_stmt>_decide_backend opt:BaseOptions num_dim:int<arrow>str<block_start>"""Switch between CPU and GPU backend for KeOps
"""<if_stmt><not>decide_cuda(opt)<block_start><return>'CPU'<block_end><else_stmt><block_start><return>'GPU_1D'<block_end><block_end><def_stmt>_estimate_split N M D T R ds<block_start>"""Estimate the splits along dimensions N and M for a MVM to fit in memory
The operations consist of computing the product between a kernel
matrix (from a N*D and a M*D matrix) and a 'vector' of shape M*T
This typically requires storage of the input and output matrices,
which occupies (M + N)*(D + T) memory locations plus some intermediate
buffers to perform computations.
TODO: It is not clear how much intermediate memory KeOps requires;
the only thing that is certain is that it is quadratic in D.
For now we sidestep this issue by using a smaller R than what is
actually available in GPU memory.
This function calculates the split along N and M into blocks of size n*m
so that we can compute the kernel-vector product between such blocks
and still fit in GPU memory.
Parameters
-----------
- N : int
The first dimension of the kernel matrix
- M : int
The second dimension of the kernel matrix
- D : int
The data dimensionality
- T : int
The number of output columns
- R : float
The amount of memory available (in bytes)
- ds : int
The size in bytes of each element in the data matrices
(e.g. 4 if the data is in single precision).
Returns
--------
- n : int
The block size to be used along the first dimension
- m : int
The block size along the second dimension of the kernel
matrix
Raises
-------
RuntimeError
If the available memory `R` is insufficient to store even the smallest
possible input matrices. This may happen if `D` is very large since we
do not perform any splitting along `D`.
Notes
------
We find 'good' values of M, N such that
N*(D+T) + M*(D+T) <= R/ds
"""<line_sep>R=R/ds<line_sep># We have a linear equation in two variables (N, M)
slope=-1<line_sep>intercept=R/(D+T)<line_sep>slack_points=10<line_sep># We try to pick a point at the edges such that only one kind of split
# is necessary
<if_stmt>N<l>intercept-1<block_start>M=min(M intercept+slope<times>N)<block_end><elif_stmt>M<l>intercept-1<block_start>N=min(N intercept+slope<times>M)<block_end><else_stmt># All points on the slope such that N, M > 0 are possible
<block_start>N=intercept-slack_points-1<line_sep>M=intercept+slope<times>N<block_end><if_stmt>N<le>0<or>M<le>0<block_start><raise>RuntimeError("Insufficient available GPU "<concat>"memory (available %.2fGB)"%(R<times>ds/2<power>30))<block_end><return>int(N) int(M)<block_end><def_stmt>_single_gpu_method proc_idx queue device_id<block_start>a:ArgsFmmv=queue.get()<line_sep>backend=a.backend<line_sep>X1=a.X1<line_sep>X2=a.X2<line_sep>v=a.v<line_sep>oout=a.out<line_sep>other_vars=a.other_vars<line_sep>fn=a.function<line_sep>R=a.gpu_ram<line_sep>N,D=X1.shape<line_sep>M=X2.shape[0]<line_sep>T=v.shape[1]<line_sep>device=torch.device(f"cuda:{device_id}")<line_sep># Second round of subdivision (only if necessary due to RAM constraints)
n,m=_estimate_split(N M D T R sizeof_dtype(X1.dtype))<line_sep>other_vars_dev=[ov.to(device copy=<false>)<for>ov other_vars]<line_sep>out_ic=oout.device.index<eq>device_id<line_sep># Process the two rounds of splitting with a nested loop.
<with_stmt>torch.cuda.device(device_id)<block_start><for_stmt>mi range(0 M m)<block_start>ml=min(m M-mi)<if_stmt>ml<ne>M<and>mi<g>0# Then we must create a temporary output array
<block_start>out=torch.empty_like(oout)<block_end><else_stmt><block_start>out=oout<block_end>cX2=X2[mi:mi+ml :].to(device copy=<false>)<line_sep>cv=v[mi:mi+ml :].to(device copy=<false>)<for_stmt>ni range(0 N n)<block_start>nl=min(n N-ni)<line_sep>cX1=X1[ni:ni+nl :].to(device copy=<false>)<line_sep>cout=out[ni:ni+nl :].to(device copy=<false>)<line_sep>variables=[cX1 cX2 cv]+other_vars_dev<line_sep>fn(*variables out=cout device_id=device_id backend=backend)<if_stmt><not>out_ic<block_start>out[ni:ni+nl :].copy_(cout)<block_end><block_end><if_stmt>ml<ne>M<and>mi<g>0<block_start>oout.add_(out)<block_end><block_end><block_end><return>oout<block_end><def_stmt>run_keops_mmv X1:torch.Tensor X2:torch.Tensor v:torch.Tensor other_vars:List[torch.Tensor] out:Optional[torch.Tensor] formula:str aliases:List[str] axis:int reduction:str='Sum' opt:Optional[FalkonOptions]=<none><arrow>torch.Tensor<block_start><if_stmt>opt<is><none><block_start>opt=FalkonOptions()<block_end># Choose backend
N,D=X1.shape<line_sep>T=v.shape[1]<line_sep>backend=_decide_backend(opt D)<line_sep>dtype=_keops_dtype(X1.dtype)<line_sep>data_devs=[X1.device X2.device v.device]<if_stmt>any([ddev.type<eq>'cuda'<for>ddev data_devs])<and>(<not>backend.startswith("GPU"))<block_start>warnings.warn("KeOps backend was chosen to be CPU, but GPU input tensors found. "<concat>"Defaulting to 'GPU_1D' backend. To force usage of the CPU backend, "<concat>"please pass CPU tensors; to avoid this warning if the GPU backend is "<concat>"desired, check your options (i.e. set 'use_cpu=False').")<line_sep>backend="GPU_1D"<block_end>differentiable=any([X1.requires_grad X2.requires_grad v.requires_grad]+[o.requires_grad<for>o other_vars])<if_stmt>differentiable<block_start><import_from_stmt>falkon.kernels.tiling_red TilingGenred<line_sep>fn=TilingGenred(formula aliases reduction_op='Sum' axis=1 dtype=dtype dtype_acc="auto" sum_scheme="auto" opt=opt)<line_sep><return>fn(X1 X2 v *other_vars out=out backend=backend)<block_end># Define formula wrapper
fn=Genred(formula aliases reduction_op=reduction axis=axis dtype=dtype dtype_acc=opt.keops_acc_dtype sum_scheme=opt.keops_sum_scheme)<line_sep>comp_dev_type=backend[:3].lower().replace('gpu' 'cuda')# 'cpu' or 'cuda'
out=create_output_mat(out data_devs is_sparse=<false> shape=(N T) dtype=X1.dtype comp_dev_type=comp_dev_type other_mat=X1 output_stride="C")<if_stmt>comp_dev_type<eq>'cpu'<and>all([ddev.type<eq>'cpu'<for>ddev data_devs])# incore CPU
<block_start>variables=[X1 X2 v]+other_vars<line_sep>out=fn(*variables out=out backend=backend)<block_end><elif_stmt>comp_dev_type<eq>'cuda'<and>all([ddev.type<eq>'cuda'<for>ddev data_devs])# incore CUDA
<block_start>variables=[X1 X2 v]+other_vars<line_sep>device=data_devs[0]<with_stmt>torch.cuda.device(device)<block_start>sync_current_stream(device)<line_sep>out=fn(*variables out=out backend=backend)<block_end><block_end><else_stmt># Out of core
# slack is high due to imprecise memory usage estimates for keops
<block_start>gpu_info=_get_gpu_info(opt slack=opt.keops_memory_slack)<line_sep>block_sizes=calc_gpu_block_sizes(gpu_info N)<line_sep># Create queues
args=[]# Arguments passed to each subprocess
<for_stmt>i,g enumerate(gpu_info)# First round of subdivision
<block_start>bwidth=block_sizes[i+1]-block_sizes[i]<if_stmt>bwidth<le>0<block_start><continue><block_end>args.append((ArgsFmmv(X1=X1.narrow(0 block_sizes[i] bwidth) X2=X2 v=v out=out.narrow(0 block_sizes[i] bwidth) other_vars=other_vars function=fn backend=backend gpu_ram=g.usable_memory) g.Id))<block_end>_start_wait_processes(_single_gpu_method args)<block_end><return>out<block_end> |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>octavia.api.drivers utils<as>provider_utils<import_from_stmt>octavia.controller.worker.v2.flows amphora_flows<import_from_stmt>octavia.controller.worker.v2.flows health_monitor_flows<import_from_stmt>octavia.controller.worker.v2.flows l7policy_flows<import_from_stmt>octavia.controller.worker.v2.flows l7rule_flows<import_from_stmt>octavia.controller.worker.v2.flows listener_flows<import_from_stmt>octavia.controller.worker.v2.flows load_balancer_flows<import_from_stmt>octavia.controller.worker.v2.flows member_flows<import_from_stmt>octavia.controller.worker.v2.flows pool_flows<line_sep>LB_FLOWS=load_balancer_flows.LoadBalancerFlows()<line_sep>AMP_FLOWS=amphora_flows.AmphoraFlows()<line_sep>HM_FLOWS=health_monitor_flows.HealthMonitorFlows()<line_sep>L7_POLICY_FLOWS=l7policy_flows.L7PolicyFlows()<line_sep>L7_RULES_FLOWS=l7rule_flows.L7RuleFlows()<line_sep>LISTENER_FLOWS=listener_flows.ListenerFlows()<line_sep>M_FLOWS=member_flows.MemberFlows()<line_sep>P_FLOWS=pool_flows.PoolFlows()<def_stmt>get_create_load_balancer_flow topology listeners=<none><block_start><return>LB_FLOWS.get_create_load_balancer_flow(topology listeners=listeners)<block_end><def_stmt>get_delete_load_balancer_flow lb<block_start><return>LB_FLOWS.get_delete_load_balancer_flow(lb)<block_end><def_stmt>get_listeners_on_lb db_lb<block_start>"""Get a list of the listeners on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format listeners.
"""<line_sep>listener_dicts=[]<for_stmt>listener db_lb.listeners<block_start>prov_listener=provider_utils.db_listener_to_provider_listener(listener)<line_sep>listener_dicts.append(prov_listener.to_dict())<block_end><return>listener_dicts<block_end><def_stmt>get_pools_on_lb db_lb<block_start>"""Get a list of the pools on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format pools.
"""<line_sep>pool_dicts=[]<for_stmt>pool db_lb.pools<block_start>prov_pool=provider_utils.db_pool_to_provider_pool(pool)<line_sep>pool_dicts.append(prov_pool.to_dict())<block_end><return>pool_dicts<block_end><def_stmt>get_cascade_delete_load_balancer_flow lb listeners=() pools=()<block_start><return>LB_FLOWS.get_cascade_delete_load_balancer_flow(lb listeners pools)<block_end><def_stmt>get_update_load_balancer_flow <block_start><return>LB_FLOWS.get_update_load_balancer_flow()<block_end><def_stmt>get_create_amphora_flow <block_start><return>AMP_FLOWS.get_create_amphora_flow()<block_end><def_stmt>get_delete_amphora_flow amphora retry_attempts=<none> retry_interval=<none><block_start><return>AMP_FLOWS.get_delete_amphora_flow(amphora retry_attempts retry_interval)<block_end><def_stmt>get_failover_LB_flow amps lb<block_start><return>LB_FLOWS.get_failover_LB_flow(amps lb)<block_end><def_stmt>get_failover_amphora_flow amphora_dict lb_amp_count<block_start><return>AMP_FLOWS.get_failover_amphora_flow(amphora_dict lb_amp_count)<block_end><def_stmt>cert_rotate_amphora_flow <block_start><return>AMP_FLOWS.cert_rotate_amphora_flow()<block_end><def_stmt>update_amphora_config_flow <block_start><return>AMP_FLOWS.update_amphora_config_flow()<block_end><def_stmt>get_create_health_monitor_flow <block_start><return>HM_FLOWS.get_create_health_monitor_flow()<block_end><def_stmt>get_delete_health_monitor_flow <block_start><return>HM_FLOWS.get_delete_health_monitor_flow()<block_end><def_stmt>get_update_health_monitor_flow <block_start><return>HM_FLOWS.get_update_health_monitor_flow()<block_end><def_stmt>get_create_l7policy_flow <block_start><return>L7_POLICY_FLOWS.get_create_l7policy_flow()<block_end><def_stmt>get_delete_l7policy_flow <block_start><return>L7_POLICY_FLOWS.get_delete_l7policy_flow()<block_end><def_stmt>get_update_l7policy_flow <block_start><return>L7_POLICY_FLOWS.get_update_l7policy_flow()<block_end><def_stmt>get_create_l7rule_flow <block_start><return>L7_RULES_FLOWS.get_create_l7rule_flow()<block_end><def_stmt>get_delete_l7rule_flow <block_start><return>L7_RULES_FLOWS.get_delete_l7rule_flow()<block_end><def_stmt>get_update_l7rule_flow <block_start><return>L7_RULES_FLOWS.get_update_l7rule_flow()<block_end><def_stmt>get_create_listener_flow <block_start><return>LISTENER_FLOWS.get_create_listener_flow()<block_end><def_stmt>get_create_all_listeners_flow <block_start><return>LISTENER_FLOWS.get_create_all_listeners_flow()<block_end><def_stmt>get_delete_listener_flow <block_start><return>LISTENER_FLOWS.get_delete_listener_flow()<block_end><def_stmt>get_update_listener_flow <block_start><return>LISTENER_FLOWS.get_update_listener_flow()<block_end><def_stmt>get_create_member_flow <block_start><return>M_FLOWS.get_create_member_flow()<block_end><def_stmt>get_delete_member_flow <block_start><return>M_FLOWS.get_delete_member_flow()<block_end><def_stmt>get_update_member_flow <block_start><return>M_FLOWS.get_update_member_flow()<block_end><def_stmt>get_batch_update_members_flow old_members new_members updated_members<block_start><return>M_FLOWS.get_batch_update_members_flow(old_members new_members updated_members)<block_end><def_stmt>get_create_pool_flow <block_start><return>P_FLOWS.get_create_pool_flow()<block_end><def_stmt>get_delete_pool_flow <block_start><return>P_FLOWS.get_delete_pool_flow()<block_end><def_stmt>get_update_pool_flow <block_start><return>P_FLOWS.get_update_pool_flow()<block_end> |
<import_from_stmt>django.contrib messages<import_from_stmt>django.http HttpResponseRedirect<import_from_stmt>django.utils.functional cached_property<import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>django.utils.translation ugettext_noop ugettext_lazy<import_from_stmt>couchdbkit ResourceNotFound<import_from_stmt>memoized memoized<import_from_stmt>corehq.apps.fixtures.dispatcher FixtureInterfaceDispatcher<import_from_stmt>corehq.apps.fixtures.models FixtureDataType _id_from_doc<import_from_stmt>corehq.apps.fixtures.views FixtureViewMixIn fixtures_home<import_from_stmt>corehq.apps.reports.filters.base BaseSingleOptionFilter<import_from_stmt>corehq.apps.reports.generic GenericReportView GenericTabularReport<class_stmt>FixtureInterface(FixtureViewMixIn GenericReportView)<block_start>base_template='fixtures/fixtures_base.html'<line_sep>asynchronous=<false><line_sep>dispatcher=FixtureInterfaceDispatcher<line_sep>exportable=<false><line_sep>needs_filters=<false><block_end><class_stmt>FixtureSelectFilter(BaseSingleOptionFilter)<block_start>slug="table_id"<line_sep>label=""<line_sep>placeholder="place"<line_sep>default_text=ugettext_lazy("Select a Table")<line_sep>@property<def_stmt>selected self# ko won't display default selected-value as it should, display default_text instead
<block_start><return>""<block_end>@property@memoized<def_stmt>fixtures self<block_start><return>sorted(FixtureDataType.by_domain(self.domain) key=<lambda>t:t.tag.lower())<block_end>@property@memoized<def_stmt>options self<block_start><return>[(_id_from_doc(f) f.tag)<for>f self.fixtures]<block_end><block_end><class_stmt>FixtureViewInterface(GenericTabularReport FixtureInterface)<block_start>name=ugettext_noop("View Tables")<line_sep>slug="view_lookup_tables"<line_sep>report_template_path='fixtures/view_table.html'<line_sep>fields=['corehq.apps.fixtures.interface.FixtureSelectFilter']<line_sep>@property<def_stmt>view_response self<block_start><if_stmt><not>self.has_tables()<block_start>messages.info(self.request _("You don't have any tables defined yet - create tables to view them."))<line_sep><return>HttpResponseRedirect(fixtures_home(self.domain))<block_end><else_stmt><block_start><return>super(FixtureViewInterface self).view_response<block_end><block_end>@property<def_stmt>report_context self<block_start><assert_stmt>self.has_tables()<if_stmt><not>self.request.GET.get("table_id" <none>)<block_start><return>{"table_not_selected":<true>}<block_end><try_stmt><block_start>context=super(FixtureViewInterface self).report_context<block_end><except_stmt>ResourceNotFound<block_start><return>{"table_not_selected":<true>}<block_end># Build javascript options for DataTables
report_table=context['report_table']<line_sep>headers=report_table.get('headers')<line_sep>data_tables_options={'slug':self.context['report']['slug'] 'defaultRows':report_table.get('default_rows' 10) 'startAtRowNum':report_table.get('start_at_row' 0) 'showAllRowsOption':report_table.get('show_all_rows') 'autoWidth':headers.auto_width }<if_stmt>headers.render_aoColumns<block_start>data_tables_options.update({'aoColumns':headers.render_aoColumns })<block_end><if_stmt>headers.custom_sort<block_start>data_tables_options.update({'customSort':headers.custom_sort })<block_end>pagination=context['report_table'].get('pagination' {})<if_stmt>pagination.get('is_on')<block_start>data_tables_options.update({'ajaxSource':pagination.get('source') 'ajaxParams':pagination.get('params') })<block_end>left_col=context['report_table'].get('left_col' {})<if_stmt>left_col.get('is_fixed')<block_start>data_tables_options.update({'fixColumns':<true> 'fixColsNumLeft':left_col['fixed'].get('num') 'fixColsWidth':left_col['fixed'].get('width') })<block_end>context.update({"selected_table":self.table.get("table_id" "") 'data_tables_options':data_tables_options })<if_stmt>self.lookup_table<block_start>context.update({"table_description":self.lookup_table.description })<block_end><return>context<block_end>@memoized<def_stmt>has_tables self<block_start><return><true><if>list(FixtureDataType.by_domain(self.domain))<else><false><block_end>@property@memoized<def_stmt>table self<block_start><import_from_stmt>corehq.apps.fixtures.views data_table<if_stmt>self.has_tables()<and>self.request.GET.get("table_id" <none>)<block_start><return>data_table(self.request self.domain)<block_end><else_stmt><block_start><return>{"headers":<none> "rows":<none>}<block_end><block_end>@cached_property<def_stmt>lookup_table self<block_start><if_stmt>self.has_tables()<and>self.request.GET.get("table_id" <none>)<block_start><return>FixtureDataType.get(self.request.GET['table_id'])<block_end><return><none><block_end>@property<def_stmt>headers self<block_start><return>self.table["headers"]<block_end>@property<def_stmt>rows self<block_start><return>self.table["rows"]<block_end><block_end><class_stmt>FixtureEditInterface(FixtureInterface)<block_start>name=ugettext_noop("Manage Tables")<line_sep>slug="edit_lookup_tables"<line_sep>report_template_path='fixtures/manage_tables.html'<line_sep>@property<def_stmt>report_context self<block_start>context=super(FixtureEditInterface self).report_context<line_sep>context.update(types=self.data_types)<line_sep><return>context<block_end>@property@memoized<def_stmt>data_types self<block_start><return>list(FixtureDataType.by_domain(self.domain))<block_end><block_end> |
# -*- coding: utf-8 -*-
#
# Copyright (C) tkornuta, IBM Corporation 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__="<NAME>"<import_stmt>unittest<import_from_stmt>unittest.mock MagicMock patch<import_from_stmt>os path<import_from_stmt>ptp.components.mixins.io check_file_existence<import_from_stmt>ptp.components.tasks.image_text_to_class.gqa GQA<import_from_stmt>ptp.configuration.config_interface ConfigInterface<class_stmt>TestGQA(unittest.TestCase)<block_start><def_stmt>test_training_0_split self<block_start>"""
Tests the training_0 split.
..note:
Test on real data is performed only if adequate json source file is found.
"""<line_sep># Empty config.
config=ConfigInterface()<line_sep>config.add_config_params({"gqa_training_0":{"split":"training_0" "globals":{"image_height":"gqa_image_height" "image_width":"gqa_image_width"}}})<line_sep># Check the existence of test set.
<if_stmt><false>#check_file_existence(path.expanduser('~/data/gqa/questions1.2/train_all_questions'),'train_all_questions_0.json'):
# Create object.
<block_start>task=GQA("gqa_training_0" config["gqa_training_0"])<line_sep># Check dataset size.
self.assertEqual(len(task) 1430536)<line_sep># Get sample.
sample=task[0]<block_end><else_stmt><block_start>processed_dataset_content=[{'sample_ids':'07333408' 'image_ids':'2375429' 'questions':'What is on the white wall?' 'answers':'pipe' 'full_answers':'The pipe is on the wall.'}]<line_sep># Mock up the load_dataset method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset" MagicMock(side_effect=[processed_dataset_content]))<block_start>task=GQA("gqa_training_0" config["gqa_training_0"])<block_end># Mock up the get_image method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.get_image" MagicMock(side_effect=["0"]))<block_start>sample=task[0]<block_end><block_end># Check sample.
self.assertEqual(sample['indices'] 0)<line_sep>self.assertEqual(sample['sample_ids'] '07333408')<line_sep>self.assertEqual(sample['image_ids'] '2375429')<line_sep>self.assertEqual(sample['questions'] 'What is on the white wall?')<line_sep>self.assertEqual(sample['answers'] 'pipe')<line_sep>self.assertEqual(sample['full_answers'] 'The pipe is on the wall.')<block_end><def_stmt>test_validation_split self<block_start>"""
Tests the validation split.
..note:
Test on real data is performed only if adequate json source file is found.
"""<line_sep># Empty config.
config=ConfigInterface()<line_sep>config.add_config_params({"gqa_validation":{"split":"validation" "globals":{"image_height":"gqa_image_height" "image_width":"gqa_image_width"}}})<line_sep># Check the existence of test set.
<if_stmt><false>#check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'val_all_questions.json'):
# Create object.
<block_start>task=GQA("gqa_validation" config["gqa_validation"])<line_sep># Check dataset size.
self.assertEqual(len(task) 2011853)<line_sep># Get sample.
sample=task[0]<block_end><else_stmt><block_start>processed_dataset_content=[{'sample_ids':'05451384' 'image_ids':'2382986' 'questions':'Are there blankets under the brown cat?' 'answers':'no' 'full_answers':'No, there is a towel under the cat.'}]<line_sep># Mock up the load_dataset method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset" MagicMock(side_effect=[processed_dataset_content]))<block_start>task=GQA("gqa_validation" config["gqa_validation"])<block_end># Mock up the get_image method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.get_image" MagicMock(side_effect=["0"]))<block_start>sample=task[0]<block_end><block_end># Check sample.
self.assertEqual(sample['indices'] 0)<line_sep>self.assertEqual(sample['sample_ids'] '05451384')<line_sep>self.assertEqual(sample['image_ids'] '2382986')<line_sep>self.assertEqual(sample['questions'] 'Are there blankets under the brown cat?')<line_sep>self.assertEqual(sample['answers'] 'no')<line_sep>self.assertEqual(sample['full_answers'] 'No, there is a towel under the cat.')<block_end><def_stmt>test_test_dev_split self<block_start>"""
Tests the test_dev split.
..note:
Test on real data is performed only if adequate json source file is found.
"""<line_sep># Empty config.
config=ConfigInterface()<line_sep>config.add_config_params({"gqa_testdev":{"split":"test_dev" "globals":{"image_height":"gqa_image_height" "image_width":"gqa_image_width"}}})<line_sep># Check the existence of test set.
<if_stmt><false>#check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'testdev_all_questions.json'):
# Create object.
<block_start>task=GQA("gqa_testdev" config["gqa_testdev"])<line_sep># Check dataset size.
self.assertEqual(len(task) 172174)<line_sep># Get sample.
sample=task[0]<block_end><else_stmt><block_start>processed_dataset_content=[{'sample_ids':'20968379' 'image_ids':'n288870' 'questions':'Do the shorts have dark color?' 'answers':'yes' 'full_answers':'Yes, the shorts are dark.'}]<line_sep># Mock up the load_dataset method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset" MagicMock(side_effect=[processed_dataset_content]))<block_start>task=GQA("gqa_testdev" config["gqa_testdev"])<block_end># Mock up the get_image method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.get_image" MagicMock(side_effect=["0"]))<block_start>sample=task[0]<block_end><block_end># Check sample.
self.assertEqual(sample['indices'] 0)<line_sep>self.assertEqual(sample['sample_ids'] '20968379')<line_sep>self.assertEqual(sample['image_ids'] 'n288870')<line_sep>self.assertEqual(sample['questions'] 'Do the shorts have dark color?')<line_sep>self.assertEqual(sample['answers'] 'yes')<line_sep>self.assertEqual(sample['full_answers'] 'Yes, the shorts are dark.')<block_end><def_stmt>test_test_split self<block_start>"""
Tests the test split.
..note:
Test on real data is performed only if adequate json source file is found.
"""<line_sep># Empty config.
config=ConfigInterface()<line_sep>config.add_config_params({"gqa_test":{"split":"test" "globals":{"image_height":"gqa_image_height" "image_width":"gqa_image_width"}}})<line_sep># Check the existence of test set.
<if_stmt><false>#check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'test_all_questions.json'):
# Create object.
<block_start>task=GQA("gqa_test" config["gqa_test"])<line_sep># Check dataset size.
self.assertEqual(len(task) 1340048)<line_sep># Get sample.
sample=task[0]<block_end><else_stmt><block_start>processed_dataset_content=[{'sample_ids':'201971873' 'image_ids':'n15740' 'questions':'Is the blanket to the right of a pillow?' 'answers':'<UNK>' 'full_answers':'<UNK>'}]<line_sep># Mock up the load_dataset method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset" MagicMock(side_effect=[processed_dataset_content]))<block_start>task=GQA("gqa_test" config["gqa_test"])<block_end># Mock up the get_image method.
<with_stmt>patch("ptp.components.tasks.image_text_to_class.gqa.GQA.get_image" MagicMock(side_effect=["0"]))<block_start>sample=task[0]<block_end><block_end># Check sample.
self.assertEqual(sample['indices'] 0)<line_sep>self.assertEqual(sample['sample_ids'] '201971873')<line_sep>self.assertEqual(sample['image_ids'] 'n15740')<line_sep>self.assertEqual(sample['questions'] 'Is the blanket to the right of a pillow?')<line_sep>self.assertEqual(sample['answers'] '<UNK>')<line_sep>self.assertEqual(sample['full_answers'] '<UNK>')<block_end><block_end>#if __name__ == "__main__":
# unittest.main()
|
#
# entry for gunicorn
#
<import_from_stmt>nntpchan.app app<import_from_stmt>nntpchan viewsp<line_sep> |
<def_stmt>spiral_matrix size<block_start><pass><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>pyquil Program<import_from_stmt>pyquil.api QuantumComputer get_qc<import_from_stmt>grove.alpha.jordan_gradient.gradient_utils binary_float_to_decimal_float measurements_to_bf <import_from_stmt>grove.alpha.phaseestimation.phase_estimation phase_estimation<def_stmt>gradient_program f_h:float precision:int<arrow>Program<block_start>"""
Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501).
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:return: Quil program to estimate gradient of f.
"""<line_sep># encode oracle values into phase
phase_factor=np.exp(1.0j<times>2<times>np.pi<times>abs(f_h))<line_sep>U=np.array([[phase_factor 0] [0 phase_factor]])<line_sep>p_gradient=phase_estimation(U precision)<line_sep><return>p_gradient<block_end><def_stmt>estimate_gradient f_h:float precision:int gradient_max:int=1 n_measurements:int=50 qc:QuantumComputer=<none><arrow>float<block_start>"""
Estimate the gradient using function evaluation at perturbation, h.
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:param gradient_max: OOM estimate of largest gradient value.
:param n_measurements: Number of times to measure system.
:param qc: The QuantumComputer object.
:return: Decimal estimate of gradient.
"""<line_sep># scale f_h by range of values gradient can take on
f_h<augmul>1./gradient_max<line_sep># generate gradient program
perturbation_sign=np.sign(f_h)<line_sep>p_gradient=gradient_program(f_h precision)<line_sep># run gradient program
<if_stmt>qc<is><none><block_start>qc=get_qc(f"{len(p_gradient.get_qubits())}q-qvm")<block_end>p_gradient.wrap_in_numshots_loop(n_measurements)<line_sep>executable=qc.compiler.native_quil_to_executable(p_gradient)<line_sep>measurements=qc.run(executable)<line_sep># summarize measurements
bf_estimate=perturbation_sign<times>measurements_to_bf(measurements)<line_sep>bf_explicit='{0:.16f}'.format(bf_estimate)<line_sep>deci_estimate=binary_float_to_decimal_float(bf_explicit)<line_sep># rescale gradient
deci_estimate<augmul>gradient_max<line_sep><return>deci_estimate<block_end> |
<import_from_stmt>torch randn<import_from_stmt>torch.nn Conv2d<import_from_stmt>backpack extend<def_stmt>data_conv2d device="cpu"<block_start>N,Cin,Hin,Win=100 10 32 32<line_sep>Cout,KernelH,KernelW=25 5 5<line_sep>X=randn(N Cin Hin Win requires_grad=<true> device=device)<line_sep>module=extend(Conv2d(Cin Cout (KernelH KernelW))).to(device=device)<line_sep>out=module(X)<line_sep>Hout=Hin-(KernelH-1)<line_sep>Wout=Win-(KernelW-1)<line_sep>vin=randn(N Cout Hout Wout device=device)<line_sep>vout=randn(N Cin Hin Win device=device)<line_sep><return>{"X":X "module":module "output":out "vout_ag":vout "vout_bp":vout.view(N -1 1) "vin_ag":vin "vin_bp":vin.view(N -1 1) }<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>subprocess<import_stmt>unittest<class_stmt>TestApp(unittest.TestCase)<block_start>"""
This is a Python test that allows to do testing of arbitrary applications
The main purpose of using this approach is to provide an ability to run tests on Windows
(which doesn't support sh_test).
The command is passed to this test using `CMD` environment variable.
"""<def_stmt>test_app self<block_start>self.assertEquals(0 subprocess.call(os.environ["CMD"].split(" ")))<block_end><block_end> |
# Copyright 2018-current ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""block builder for version 0.5 block"""<import_from_stmt>loopchain.blockchain.blocks BlockProverType<import_from_stmt>loopchain.blockchain.blocks.v0_4 BlockBuilder<import_from_stmt>loopchain.blockchain.blocks.v0_5 BlockHeader BlockBody BlockProver<import_from_stmt>loopchain.blockchain.types Hash32<class_stmt>BlockBuilder(BlockBuilder)<block_start>version=BlockHeader.version<line_sep>BlockHeaderClass=BlockHeader<line_sep>BlockBodyClass=BlockBody<def_stmt>_build_transactions_hash self<block_start><if_stmt><not>self.transactions<block_start><return>Hash32.empty()<block_end>block_prover=BlockProver(self.transactions.keys() BlockProverType.Transaction)<line_sep><return>block_prover.get_proof_root()<block_end><block_end> |
"""
At infinite membrane resistance, the Neuron does not leak any current out,
and hence it starts firing with the slightest input current,
This shifts the transfer function towards 0, similar to ReLU activation (centered at 0).
Also, when there is minimal refractory time, the neuron can keep firing
at a high input current which avoids the saturation.
"""<line_sep> |
"""Matplotlib separation plot."""<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>...plot_utils _scale_fig_size<import_from_stmt>. backend_kwarg_defaults backend_show create_axes_grid<def_stmt>plot_separation y y_hat y_hat_line label_y_hat expected_events figsize textsize color legend locs width ax plot_kwargs y_hat_line_kwargs exp_events_kwargs backend_kwargs show <block_start>"""Matplotlib separation plot."""<if_stmt>backend_kwargs<is><none><block_start>backend_kwargs={}<block_end><if_stmt>plot_kwargs<is><none><block_start>plot_kwargs={}<block_end># plot_kwargs.setdefault("color", "C0")
# if color:
plot_kwargs["color"]=color<if_stmt>y_hat_line_kwargs<is><none><block_start>y_hat_line_kwargs={}<block_end>y_hat_line_kwargs.setdefault("color" "k")<if_stmt>exp_events_kwargs<is><none><block_start>exp_events_kwargs={}<block_end>exp_events_kwargs.setdefault("color" "k")<line_sep>exp_events_kwargs.setdefault("marker" "^")<line_sep>exp_events_kwargs.setdefault("s" 100)<line_sep>exp_events_kwargs.setdefault("zorder" 2)<line_sep>backend_kwargs={**backend_kwarg_defaults() **backend_kwargs }<line_sep>(figsize *_)=_scale_fig_size(figsize textsize 1 1)<line_sep>backend_kwargs.setdefault("figsize" figsize)<line_sep>backend_kwargs["squeeze"]=<true><if_stmt>ax<is><none><block_start>_,ax=create_axes_grid(1 backend_kwargs=backend_kwargs)<block_end>idx=np.argsort(y_hat)<for_stmt>i,loc enumerate(locs)<block_start>positive=<not>y[idx][i]<eq>0<line_sep>alpha=1<if>positive<else>0.3<line_sep>ax.bar(loc 1 width=width alpha=alpha **plot_kwargs)<block_end><if_stmt>y_hat_line<block_start>ax.plot(np.linspace(0 1 len(y_hat)) y_hat[idx] label=label_y_hat **y_hat_line_kwargs)<block_end><if_stmt>expected_events<block_start>expected_events=int(np.round(np.sum(y_hat)))<line_sep>ax.scatter(y_hat[idx][len(y_hat)-expected_events-1] 0 label="Expected events" **exp_events_kwargs)<block_end><if_stmt>legend<and>(expected_events<or>y_hat_line)<block_start>handles,labels=ax.get_legend_handles_labels()<line_sep>labels_dict=dict(zip(labels handles))<line_sep>ax.legend(labels_dict.values() labels_dict.keys())<block_end>ax.set_xticks([])<line_sep>ax.set_yticks([])<line_sep>ax.set_xlim(0 1)<line_sep>ax.set_ylim(0 1)<if_stmt>backend_show(show)<block_start>plt.show()<block_end><return>ax<block_end> |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
<import_from_future_stmt> print_function<import_stmt>os importlib sys<import_from_stmt>cntk_helpers imWidthHeight nnPredict applyNonMaximaSuppression makeDirectory visualizeResults imshow<import_stmt>PARAMETERS<line_sep>####################################
# Parameters
####################################
image_set='test'# 'train', 'test'
<def_stmt>visualize_output_rois testing=<false><block_start>p=PARAMETERS.get_parameters_for_dataset()<line_sep># no need to change these parameters
boUseNonMaximaSurpression=<true><line_sep>visualizationDir=os.path.join(p.resultsDir "visualizations")<line_sep>cntkParsedOutputDir=os.path.join(p.cntkFilesDir image_set+"_parsed")<line_sep>makeDirectory(p.resultsDir)<line_sep>makeDirectory(visualizationDir)<line_sep># loop over all images and visualize
imdb=p.imdbs[image_set]<for_stmt>imgIndex range(0 imdb.num_images)<block_start>imgPath=imdb.image_path_at(imgIndex)<line_sep>imgWidth,imgHeight=imWidthHeight(imgPath)<line_sep># evaluate classifier for all rois
labels,scores=nnPredict(imgIndex cntkParsedOutputDir p.cntk_nrRois len(p.classes) <none>)<line_sep># remove the zero-padded rois
scores=scores[:len(imdb.roidb[imgIndex]['boxes'])]<line_sep>labels=labels[:len(imdb.roidb[imgIndex]['boxes'])]<line_sep># perform non-maxima surpression. note that the detected classes in the image is not affected by this.
nmsKeepIndices=[]<if_stmt>boUseNonMaximaSurpression<block_start>nmsKeepIndices=applyNonMaximaSuppression(p.nmsThreshold labels scores imdb.roidb[imgIndex]['boxes'])<line_sep>print("Non-maxima surpression kept {:4} of {:4} rois (nmsThreshold={})".format(len(nmsKeepIndices) len(labels) p.nmsThreshold))<block_end># visualize results
imgDebug=visualizeResults(imgPath labels scores imdb.roidb[imgIndex]['boxes'] p.cntk_padWidth p.cntk_padHeight p.classes nmsKeepIndices boDrawNegativeRois=<true>)<if_stmt><not>testing<block_start>imshow(imgDebug waitDuration=0 maxDim=800)<line_sep># imwrite(imgDebug, visualizationDir + "/" + str(imgIndex) + os.path.basename(imgPath))
<block_end><block_end>print("DONE.")<line_sep><return><true><block_end><if_stmt>__name__<eq>'__main__'<block_start>visualize_output_rois()<block_end> |
<import_from_stmt>.standard *<import_from_stmt>.few_shot *<line_sep> |
""" Python 3 native (desktop/mobile) OAuth 2.0 example.
This example can be run from the command line and will show you how the
OAuth 2.0 flow should be handled if you are a web based application.
Prerequisites:
* Create an SSO application at developers.eveonline.com with the scope
"esi-characters.read_blueprints.v1" and the callback URL
"https://localhost/callback/". Note: never use localhost as a callback
in released applications.
* Have a Python 3 environment available to you (possibly by using a
virtual environment: https://virtualenv.pypa.io/en/stable/).
* Run pip install -r requirements.txt with this directory as your root.
To run this example, make sure you have completed the prerequisites and then
run the following command from this directory as the root:
>>> python esi_oauth_native.py
then follow the prompts.
"""<import_stmt>base64<import_stmt>hashlib<import_stmt>secrets<import_from_stmt>shared_flow print_auth_url<import_from_stmt>shared_flow send_token_request<import_from_stmt>shared_flow handle_sso_token_response<def_stmt>main <block_start>""" Takes you through a local example of the OAuth 2.0 native flow."""<line_sep>print("This program will take you through an example OAuth 2.0 flow "<concat>"that you should be using if you are building a desktop or mobile "<concat>"application. Follow the prompts and enter the info asked for.")<line_sep># Generate the PKCE code challenge
random=base64.urlsafe_b64encode(secrets.token_bytes(32))<line_sep>m=hashlib.sha256()<line_sep>m.update(random)<line_sep>d=m.digest()<line_sep>code_challenge=base64.urlsafe_b64encode(d).decode().replace("=" "")<line_sep>client_id=input("Copy your SSO application's client ID and enter it "<concat>"here: ")<line_sep>print("\nBecause this is a desktop/mobile application, you should use "<concat>"the PKCE protocol when contacting the EVE SSO. In this case, that "<concat>"means sending a base 64 encoded sha256 hashed 32 byte string "<concat>"called a code challenge. This 32 byte string should be ephemeral "<concat>"and never stored anywhere. The code challenge string generated for "<concat>"this program is {} and the hashed code challenge is {}. \nNotice "<concat>"that the query parameter of the following URL will contain this "<concat>"code challenge.".format(random code_challenge))<line_sep>input("\nPress any key to continue:")<line_sep>print_auth_url(client_id code_challenge=code_challenge)<line_sep>auth_code=input("Copy the \"code\" query parameter and enter it here: ")<line_sep>code_verifier=random<line_sep>form_values={"grant_type":"authorization_code" "client_id":client_id "code":auth_code "code_verifier":code_verifier}<line_sep>print("\nBecause this is using PCKE protocol, your application never has "<concat>"to share its secret key with the SSO. Instead, this next request "<concat>"will send the base 64 encoded unhashed value of the code "<concat>"challenge, called the code verifier, in the request body so EVE's "<concat>"SSO knows your application was not tampered with since the start "<concat>"of this process. The code verifier generated for this program is "<concat>"{} derived from the raw string {}".format(code_verifier random))<line_sep>input("\nPress any key to continue:")<line_sep>res=send_token_request(form_values)<line_sep>handle_sso_token_response(res)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>random<import_stmt>Pyro4<if_stmt>sys.version_info<l>(3 0)<block_start>input=raw_input<block_end>uri=input("Enter the URI of the server object: ")<with_stmt>Pyro4.Proxy(uri)<as>proxy<block_start>print("currently allocated resources:" proxy.list())<line_sep>name1=hex(random.randint(0 999999))[-4:]<line_sep>name2=hex(random.randint(0 999999))[-4:]<line_sep>print("allocating resource..." name1)<line_sep>proxy.allocate(name1)<line_sep>print("allocating resource..." name2)<line_sep>proxy.allocate(name2)<line_sep>input("\nhit Enter now to continue normally or ^C/break to abort the connection forcefully:")<line_sep>print("free resources normally...")<line_sep>proxy.free(name1)<line_sep>proxy.free(name2)<line_sep>print("allocated resources:" proxy.list())<block_end>print("done.")<line_sep> |
<import_stmt>brownie<def_stmt>test_set_minter_admin_only accounts token<block_start><with_stmt>brownie.reverts("dev: admin only")<block_start>token.set_minter(accounts[2] {"from":accounts[1]})<block_end><block_end><def_stmt>test_set_admin_admin_only accounts token<block_start><with_stmt>brownie.reverts("dev: admin only")<block_start>token.set_admin(accounts[2] {"from":accounts[1]})<block_end><block_end><def_stmt>test_set_name_admin_only accounts token<block_start><with_stmt>brownie.reverts("Only admin is allowed to change name")<block_start>token.set_name("Foo Token" "FOO" {"from":accounts[1]})<block_end><block_end><def_stmt>test_set_minter accounts token<block_start>token.set_minter(accounts[1] {"from":accounts[0]})<assert_stmt>token.minter()<eq>accounts[1]<block_end><def_stmt>test_set_admin accounts token<block_start>token.set_admin(accounts[1] {"from":accounts[0]})<assert_stmt>token.admin()<eq>accounts[1]<block_end><def_stmt>test_set_name accounts token<block_start>token.set_name("Foo Token" "FOO" {"from":accounts[0]})<assert_stmt>token.name()<eq>"Foo Token"<assert_stmt>token.symbol()<eq>"FOO"<block_end> |
# coding: utf-8
<class_stmt>RurouniException(Exception)<block_start><pass><block_end><class_stmt>ConfigException(RurouniException)<block_start><pass><block_end><class_stmt>TokenBucketFull(RurouniException)<block_start><pass><block_end><class_stmt>UnexpectedMetric(RurouniException)<block_start><pass><block_end> |
<class_stmt>ControlStyles(Enum IComparable IFormattable IConvertible)<block_start>"""
Specifies the style and behavior of a control.
enum (flags) ControlStyles,values: AllPaintingInWmPaint (8192),CacheText (16384),ContainerControl (1),DoubleBuffer (65536),EnableNotifyMessage (32768),FixedHeight (64),FixedWidth (32),Opaque (4),OptimizedDoubleBuffer (131072),ResizeRedraw (16),Selectable (512),StandardClick (256),StandardDoubleClick (4096),SupportsTransparentBackColor (2048),UserMouse (1024),UserPaint (2),UseTextForAccessibility (262144)
"""<def_stmt>__eq__ self *args<block_start>""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """<line_sep><pass><block_end><def_stmt>__format__ self *args<block_start>""" __format__(formattable: IFormattable,format: str) -> str """<line_sep><pass><block_end><def_stmt>__ge__ self *args<block_start><pass><block_end><def_stmt>__gt__ self *args<block_start><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__le__ self *args<block_start><pass><block_end><def_stmt>__lt__ self *args<block_start><pass><block_end><def_stmt>__ne__ self *args<block_start><pass><block_end><def_stmt>__reduce_ex__ self *args<block_start><pass><block_end><def_stmt>__str__ self *args<block_start><pass><block_end>AllPaintingInWmPaint=<none><line_sep>CacheText=<none><line_sep>ContainerControl=<none><line_sep>DoubleBuffer=<none><line_sep>EnableNotifyMessage=<none><line_sep>FixedHeight=<none><line_sep>FixedWidth=<none><line_sep>Opaque=<none><line_sep>OptimizedDoubleBuffer=<none><line_sep>ResizeRedraw=<none><line_sep>Selectable=<none><line_sep>StandardClick=<none><line_sep>StandardDoubleClick=<none><line_sep>SupportsTransparentBackColor=<none><line_sep>UserMouse=<none><line_sep>UserPaint=<none><line_sep>UseTextForAccessibility=<none><line_sep>value__=<none><block_end> |
# encode = utf-8
<import_stmt>os<import_stmt>sys<import_stmt>re<line_sep>ta_name='SA-ctf_scoreboard'<line_sep>ta_lib_name='sa_ctf_scoreboard'<line_sep>pattern=re.compile(r"[\\/]etc[\\/]apps[\\/][^\\/]+[\\/]bin[\\/]?$")<line_sep>new_paths=[path<for>path sys.path<if><not>pattern.search(path)<or>ta_name<in>path]<line_sep>new_paths.insert(0 os.path.sep.join([os.path.dirname(__file__) ta_lib_name]))<line_sep>sys.path=new_paths<line_sep> |
<import_stmt>mimetypes<import_stmt>re<import_from_stmt>urllib.parse quote<import_from_stmt>seleniumwire.thirdparty.mitmproxy.net.http headers<def_stmt>encode head l<block_start>k=head.get("content-type")<if_stmt>k<block_start>k=headers.parse_content_type(k)<if_stmt>k<is><not><none><block_start><try_stmt><block_start>boundary=k[2]["boundary"].encode("ascii")<line_sep>boundary=quote(boundary)<block_end><except_stmt>(KeyError UnicodeError)<block_start><return>b""<block_end>hdrs=[]<for_stmt>key,value l<block_start>file_type=mimetypes.guess_type(str(key))[0]<or>"text/plain; charset=utf-8"<if_stmt>key<block_start>hdrs.append(b"--%b"%boundary.encode('utf-8'))<line_sep>disposition=b'form-data; name="%b"'%key<line_sep>hdrs.append(b"Content-Disposition: %b"%disposition)<line_sep>hdrs.append(b"Content-Type: %b"%file_type.encode('utf-8'))<line_sep>hdrs.append(b'')<line_sep>hdrs.append(value)<block_end>hdrs.append(b'')<if_stmt>value<is><not><none># If boundary is found in value then raise ValueError
<block_start><if_stmt>re.search(rb"^--%b$"%re.escape(boundary.encode('utf-8')) value)<block_start><raise>ValueError(b"boundary found in encoded string")<block_end><block_end><block_end>hdrs.append(b"--%b--\r\n"%boundary.encode('utf-8'))<line_sep>temp=b"\r\n".join(hdrs)<line_sep><return>temp<block_end><block_end><block_end><def_stmt>decode hdrs content<block_start>"""
Takes a multipart boundary encoded string and returns list of (key, value) tuples.
"""<line_sep>v=hdrs.get("content-type")<if_stmt>v<block_start>v=headers.parse_content_type(v)<if_stmt><not>v<block_start><return>[]<block_end><try_stmt><block_start>boundary=v[2]["boundary"].encode("ascii")<block_end><except_stmt>(KeyError UnicodeError)<block_start><return>[]<block_end>rx=re.compile(br'\bname="([^"]+)"')<line_sep>r=[]<if_stmt>content<is><not><none><block_start><for_stmt>i content.split(b"--"+boundary)<block_start>parts=i.splitlines()<if_stmt>len(parts)<g>1<and>parts[0][0:2]<ne>b"--"<block_start>match=rx.search(parts[1])<if_stmt>match<block_start>key=match.group(1)<line_sep>value=b"".join(parts[3+parts[2:].index(b""):])<line_sep>r.append((key value))<block_end><block_end><block_end><block_end><return>r<block_end><return>[]<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>nlpaug.model.audio Audio<class_stmt>Normalization(Audio)<block_start><def_stmt>manipulate self data method start_pos end_pos<block_start>aug_data=data.copy()<if_stmt>method<eq>'minmax'<block_start>new_data=self._min_max(aug_data[start_pos:end_pos])<block_end><elif_stmt>method<eq>'max'<block_start>new_data=self._max(aug_data[start_pos:end_pos])<block_end><elif_stmt>method<eq>'standard'<block_start>new_data=self._standard(aug_data[start_pos:end_pos])<block_end>aug_data[start_pos:end_pos]=new_data<line_sep><return>aug_data<block_end><def_stmt>get_support_methods self<block_start><return>['minmax' 'max' 'standard']<block_end><def_stmt>_standard self data<block_start><return>(data-np.mean(data))/np.std(data)<block_end><def_stmt>_max self data<block_start><return>data/np.amax(np.abs(data))<block_end><def_stmt>_min_max self data<block_start>lower=np.amin(np.abs(data))<line_sep><return>(data-lower)/(np.amax(np.abs(data))-lower)<block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>robogym.envs.rearrange.common.utils get_mesh_bounding_box make_block make_blocks_and_targets <import_from_stmt>robogym.envs.rearrange.simulation.composer RandomMeshComposer<import_from_stmt>robogym.mujoco.mujoco_xml MujocoXML<def_stmt>_get_default_xml <block_start>xml_source="""
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""<line_sep>xml=MujocoXML.from_string(xml_source)<line_sep><return>xml<block_end><def_stmt>test_mesh_composer <block_start><for_stmt>path [<none> RandomMeshComposer.GEOM_ASSET_PATH RandomMeshComposer.GEOM_ASSET_PATH ]<block_start>composer=RandomMeshComposer(mesh_path=path)<for_stmt>num_geoms range(1 6)<block_start>xml=_get_default_xml()<line_sep>composer.reset()<line_sep>xml.append(composer.sample("object0" num_geoms object_size=0.05))<line_sep>sim=xml.build()<assert_stmt>len(sim.model.geom_names)<eq>num_geoms<line_sep>pos,size=get_mesh_bounding_box(sim "object0")<assert_stmt>np.isclose(np.max(size) 0.05)<line_sep>pos2,size2=composer.get_bounding_box(sim "object0")<assert_stmt>np.allclose(pos pos2)<assert_stmt>np.allclose(size size2)<block_end><block_end><block_end><def_stmt>test_block_object <block_start>xml=_get_default_xml()<line_sep>xml.append(make_block("object0" object_size=np.ones(3)<times>0.05))<line_sep>sim=xml.build()<assert_stmt>len(sim.model.geom_size)<eq>1<line_sep>assert_allclose(sim.model.geom_size 0.05)<block_end><def_stmt>test_blocks_and_targets <block_start>xml=_get_default_xml()<for_stmt>obj_xml,target_xml make_blocks_and_targets(num_objects=5 block_size=0.05)<block_start>xml.append(obj_xml)<line_sep>xml.append(target_xml)<block_end>sim=xml.build()<assert_stmt>len(sim.model.geom_size)<eq>10<line_sep>assert_allclose(sim.model.geom_size 0.05)<block_end> |
<import_stmt>functools<import_from_stmt>unittest TestCase<import_from_stmt>common.base_game_spec BaseGameSpec<import_from_stmt>common.network_helpers create_network<import_from_stmt>games.tic_tac_toe TicTacToeGameSpec<import_from_stmt>games.tic_tac_toe_x TicTacToeXGameSpec<import_from_stmt>techniques.train_policy_gradient train_policy_gradients<class_stmt>_VerySimpleGameSpec(BaseGameSpec)<block_start><def_stmt>new_board self<block_start><return>[0 0]<block_end><def_stmt>apply_move self board_state move side<block_start>board_state[move]=side<line_sep><return>board_state<block_end><def_stmt>has_winner self board_state<block_start><return>board_state[0]<block_end><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>available_moves self board_state<block_start><return>[i<for>i,x enumerate(board_state)<if>x<eq>0]<block_end><def_stmt>board_dimensions self<block_start><return>2 <block_end><block_end><class_stmt>TestTrainPolicyGradient(TestCase)<block_start><def_stmt>test_learn_simple_game self<block_start>game_spec=_VerySimpleGameSpec()<line_sep>create_model_func=functools.partial(create_network 2 (4 ))<line_sep>variables,win_rate=train_policy_gradients(game_spec create_model_func <none> learn_rate=0.1 number_of_games=1000 print_results_every=100 batch_size=20 randomize_first_player=<false>)<line_sep>self.assertGreater(win_rate 0.9)<block_end><def_stmt>test_tic_tac_toe self<block_start>game_spec=TicTacToeGameSpec()<line_sep>create_model_func=functools.partial(create_network game_spec.board_squares() (100 100 100 ))<line_sep>variables,win_rate=train_policy_gradients(game_spec create_model_func <none> learn_rate=1e-4 number_of_games=60000 print_results_every=1000 batch_size=100 randomize_first_player=<false>)<line_sep>self.assertGreater(win_rate 0.4)<block_end><block_end> |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
test_sanity_bundle_augmentation.py
Unittest for bundle augmentation.
"""<import_from_future_stmt> absolute_import<import_stmt>sys<import_stmt>unittest<import_from_stmt>ydk.services CRUDService<import_from_stmt>ydk.providers NetconfServiceProvider<import_from_stmt>ydk.models.augmentation ietf_aug_base_1<import_from_stmt>ydk.models.augmentation ietf_aug_base_2<import_from_stmt>test_utils assert_with_error<import_from_stmt>test_utils ParametrizedTestCase<import_from_stmt>test_utils get_device_info<class_stmt>SanityYang(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.ncc=NetconfServiceProvider(cls.hostname cls.username cls.password cls.port cls.protocol cls.on_demand cls.common_cache cls.timeout)<line_sep>cls.crud=CRUDService()<block_end><def_stmt>setUp self<block_start>self.crud.delete(self.ncc ietf_aug_base_1.Cpython())<line_sep>self.crud.delete(self.ncc ietf_aug_base_2.Cpython())<block_end><def_stmt>tearDown self<block_start>self.crud.delete(self.ncc ietf_aug_base_1.Cpython())<line_sep>self.crud.delete(self.ncc ietf_aug_base_2.Cpython())<block_end><def_stmt>test_aug_base_1 self<block_start>cpython=ietf_aug_base_1.Cpython()<line_sep>cpython.doc.ydktest_aug_1.aug_one='aug one'<line_sep>cpython.doc.ydktest_aug_2.aug_two='aug two'<line_sep>cpython.doc.ydktest_aug_4.aug_four='aug four'<line_sep>cpython.lib.ydktest_aug_1.ydktest_aug_nested_1.aug_one='aug one'<line_sep>cpython.lib.ydktest_aug_2.ydktest_aug_nested_2.aug_two='aug two'<line_sep>cpython.lib.ydktest_aug_4.ydktest_aug_nested_4.aug_four='aug four'<line_sep>cpython.doc.disutils.four_aug_list.enabled=<true><line_sep>item1=cpython.doc.disutils.four_aug_list.Ldata()<line_sep>item2=cpython.doc.disutils.four_aug_list.Ldata()<line_sep>item1.name,item1.number='one' 1<line_sep>item2.name,item1.number='two' 2<line_sep>self.crud.create(self.ncc cpython)<line_sep>cpython_read=self.crud.read(self.ncc ietf_aug_base_1.Cpython())<line_sep>self.assertEqual(cpython cpython_read)<block_end><def_stmt>test_aug_base_2 self<block_start>cpython=ietf_aug_base_2.Cpython()<line_sep>cpython.tools.aug_four='aug four'<line_sep>self.crud.create(self.ncc cpython)<line_sep>cpython_read=self.crud.read(self.ncc ietf_aug_base_2.Cpython())<line_sep>self.assertEqual(cpython cpython_read)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>device,non_demand,common_cache,timeout=get_device_info()<line_sep>suite=unittest.TestSuite()<line_sep>suite.addTest(ParametrizedTestCase.parametrize(SanityYang device=device non_demand=non_demand common_cache=common_cache timeout=timeout))<line_sep>ret=<not>unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()<line_sep>sys.exit(ret)<block_end> |
<import_stmt>vaex<import_stmt>os<line_sep># Open the main data
taxi_path='s3://vaex/taxi/yellow_taxi_2012_zones.hdf5?anon=true'<line_sep># override the path, e.g. $ export TAXI_PATH=/data/taxi/yellow_taxi_2012_zones.hdf5
taxi_path=os.environ.get('TAXI_PATH' taxi_path)<line_sep>df_original=vaex.open(taxi_path)<line_sep># Make sure the data is cached locally
used_columns=['pickup_longitude' 'pickup_latitude' 'dropoff_longitude' 'dropoff_latitude' 'total_amount' 'trip_duration_min' 'trip_speed_mph' 'pickup_hour' 'pickup_day' 'dropoff_borough' 'dropoff_zone' 'pickup_borough' 'pickup_zone']<for_stmt>col used_columns<block_start>print(f'Making sure column "{col}" is cached...')<line_sep>df_original.nop(col progress=<true>)<block_end> |
<import_stmt>re<import_from_stmt>.utils validator<line_sep>regex=(r'^[A-Z]{2}[0-9]{2}[A-Z0-9]{13,30}$')<line_sep>pattern=re.compile(regex)<def_stmt>char_value char<block_start>"""A=10, B=11, ..., Z=35
"""<if_stmt>char.isdigit()<block_start><return>int(char)<block_end><else_stmt><block_start><return>10+ord(char)-ord('A')<block_end><block_end><def_stmt>modcheck value<block_start>"""Check if the value string passes the mod97-test.
"""<line_sep># move country code and check numbers to end
rearranged=value[4:]+value[:4]<line_sep># convert letters to numbers
converted=[char_value(char)<for>char rearranged]<line_sep># interpret as integer
integerized=int(''.join([str(i)<for>i converted]))<line_sep><return>(integerized%97<eq>1)<block_end>@validator<def_stmt>iban value<block_start>"""
Return whether or not given value is a valid IBAN code.
If the value is a valid IBAN this function returns ``True``, otherwise
:class:`~validators.utils.ValidationFailure`.
Examples::
>>> iban('DE29100500001061045672')
True
>>> iban('123456')
ValidationFailure(func=iban, ...)
.. versionadded:: 0.8
:param value: IBAN string to validate
"""<line_sep><return>pattern.match(value)<and>modcheck(value)<block_end> |
<import_from_stmt>subproject d<line_sep>print("c")<line_sep> |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the behaviour classes of the registration_aw1 skill."""<import_stmt>logging<import_from_stmt>pathlib Path<import_from_stmt>unittest.mock patch<import_from_stmt>aea.helpers.transaction.base RawMessage Terms<import_from_stmt>packages.fetchai.protocols.register.message RegisterMessage<import_from_stmt>packages.fetchai.protocols.signing.message SigningMessage<import_from_stmt>tests.conftest ROOT_DIR<import_from_stmt>tests.test_packages.test_skills.test_registration_aw1.intermediate_class RegiatrationAW1TestCase <class_stmt>TestAW1Registration(RegiatrationAW1TestCase)<block_start>"""Test registration behaviour of registration_aw1."""<line_sep>path_to_skill=Path(ROOT_DIR "packages" "fetchai" "skills" "registration_aw1")<line_sep>@classmethod<def_stmt>setup cls<block_start>"""Setup the test class."""<line_sep>super().setup()<block_end><def_stmt>test_setup_i self<block_start>"""Test the setup method of the registration behaviour NOT developer_handle_mode and announce_termination_key is None."""<line_sep># setup
self.strategy.announce_termination_key=<none><line_sep>self.strategy.developer_handle_mode=<false><line_sep># operation
<with_stmt>patch.object(self.logger "log")<as>mock_logger<block_start>self.register_behaviour.setup()<block_end># after
self.assert_quantity_in_decision_making_queue(1)<line_sep>message=self.get_message_from_decision_maker_inbox()<line_sep>has_attributes,error_str=self.message_has_attributes(actual_message=message message_type=SigningMessage performative=SigningMessage.Performative.SIGN_MESSAGE to=self.skill.skill_context.decision_maker_address sender=str(self.skill.skill_context.skill_id) raw_message=RawMessage(self.strategy.ledger_id self.strategy.ethereum_address.encode("utf-8")) terms=Terms(ledger_id=self.strategy.ledger_id sender_address="" counterparty_address="" amount_by_currency_id={} quantities_by_good_id={} nonce="" ) )<assert_stmt>has_attributes error_str<line_sep>mock_logger.assert_any_call(logging.INFO "sending signing_msg to decision maker...")<block_end><def_stmt>test_setup_ii self<block_start>"""Test the setup method of the registration behaviour IN developer_handle_mode and announce_termination_key is NOT None."""<line_sep># setup
key="some_key"<line_sep>self.strategy.announce_termination_key=key<line_sep>self.strategy.developer_handle_only=<true><line_sep># operation
self.register_behaviour.setup()<line_sep># after
self.assert_quantity_in_decision_making_queue(0)<assert_stmt>self.skill.skill_context.shared_state[key]<is><false><block_end><def_stmt>test_act_i self<block_start>"""Test the act method of the registration behaviour where is_ready_to_register is False."""<line_sep># setup
self.strategy.is_ready_to_register=<false><line_sep># operation
self.register_behaviour.act()<line_sep># after
self.assert_quantity_in_outbox(0)<block_end><def_stmt>test_act_ii self<block_start>"""Test the act method of the registration behaviour where aw1_registration_aeas is None."""<line_sep># setup
self.strategy.is_ready_to_register=<true><line_sep># operation
self.register_behaviour.act()<line_sep># after
self.assert_quantity_in_outbox(0)<block_end><def_stmt>test_act_iii self<block_start>"""Test the act method of the registration behaviour where is_registered is True."""<line_sep># setup
self.strategy.is_ready_to_register=<true><line_sep>self.skill.skill_context.shared_state[self.shared_storage_key]=self.aw1_registration_aeas<line_sep>self.strategy.is_registered=<true><line_sep># operation
self.register_behaviour.act()<line_sep># after
self.assert_quantity_in_outbox(0)<block_end><def_stmt>test_act_iv self<block_start>"""Test the act method of the registration behaviour where is_registration_pending is True."""<line_sep># setup
self.strategy.is_ready_to_register=<true><line_sep>self.skill.skill_context.shared_state[self.shared_storage_key]=self.aw1_registration_aeas<line_sep>self.strategy.is_registered=<false><line_sep>self.strategy.is_registration_pending=<true><line_sep># operation
self.register_behaviour.act()<line_sep># after
self.assert_quantity_in_outbox(0)<block_end><def_stmt>test_act_v self<block_start>"""Test the act method of the registration behaviour where _register_for_aw1 is called."""<line_sep># setup
self.strategy.is_ready_to_register=<true><line_sep>self.skill.skill_context.shared_state[self.shared_storage_key]=self.aw1_registration_aeas<line_sep>self.strategy.is_registered=<false><line_sep>self.strategy.is_registration_pending=<false><line_sep># operation
<with_stmt>patch.object(self.logger "log")<as>mock_logger<block_start>self.register_behaviour.act()<block_end># after
self.assert_quantity_in_outbox(len(self.aw1_registration_aeas))<assert_stmt>self.strategy.is_registration_pending<is><true><line_sep># _register_for_aw1
info=self.strategy.registration_info<line_sep>message=self.get_message_from_outbox()<line_sep>has_attributes,error_str=self.message_has_attributes(actual_message=message message_type=RegisterMessage performative=RegisterMessage.Performative.REGISTER to=self.aw1_registration_aea sender=self.skill.skill_context.agent_address info=info )<assert_stmt>has_attributes error_str<line_sep>mock_logger.assert_any_call(logging.INFO f"sending registration info: {info}" )<block_end><def_stmt>test_act_vi self<block_start>"""Test the act method of the registration behaviour where aw1 agent is NOT in the whitelist."""<line_sep># setup
self.strategy.is_ready_to_register=<true><line_sep>self.skill.skill_context.shared_state[self.shared_storage_key]=self.aw1_registration_aeas<line_sep>self.strategy.is_registered=<false><line_sep>self.strategy.is_registration_pending=<false><line_sep>self.strategy._whitelist=[]<line_sep># operation
<with_stmt>patch.object(self.logger "log")<as>mock_logger<block_start>self.register_behaviour.act()<block_end># after
self.assert_quantity_in_outbox(0)<assert_stmt>self.strategy.is_registration_pending<is><true><line_sep>mock_logger.assert_any_call(logging.INFO f"agent={self.aw1_registration_aea} not in whitelist={self.strategy._whitelist}" )<block_end><def_stmt>test_teardown self<block_start>"""Test the teardown method of the registration behaviour."""<assert_stmt>self.register_behaviour.teardown()<is><none><line_sep>self.assert_quantity_in_outbox(0)<block_end><block_end> |
<import_from_stmt>mitmproxy contentviews<import_from_stmt>. base<class_stmt>ViewAuto(base.View)<block_start>name="Auto"<def_stmt>__call__ self data **metadata# TODO: The auto view has little justification now that views implement render_priority,
# but we keep it around for now to not touch more parts.
<block_start>priority,view=max((v.render_priority(data **metadata) v)<for>v contentviews.views)<if_stmt>priority<eq>0<and><not>data<block_start><return>"No content" []<block_end><return>view(data **metadata)<block_end><def_stmt>render_priority self data:bytes **metadata<arrow>float<block_start><return>-1<block_end><block_end># don't recurse.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>time<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.eager profiler<class_stmt>BatchTimestamp(object)<block_start>"""A structure to store batch time stamp."""<def_stmt>__init__ self batch_index timestamp<block_start>self.batch_index=batch_index<line_sep>self.timestamp=timestamp<block_end><def_stmt>__repr__ self<block_start><return>"'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(self.batch_index self.timestamp)<block_end><block_end><class_stmt>TimeHistory(tf.keras.callbacks.Callback)<block_start>"""Callback for Keras models."""<def_stmt>__init__ self batch_size log_steps<block_start>"""Callback for logging performance (# examples/second).
Args:
batch_size: Total batch size.
log_steps: Interval of time history logs.
"""<line_sep>self.batch_size=batch_size<line_sep>super(TimeHistory self).__init__()<line_sep>self.log_steps=log_steps<line_sep># Logs start of step 0 then end of each step based on log_steps interval.
self.timestamp_log=[]<block_end><def_stmt>on_train_begin self logs=<none><block_start>self.record_batch=<true><block_end><def_stmt>on_train_end self logs=<none><block_start>self.train_finish_time=time.time()<block_end><def_stmt>on_batch_begin self batch logs=<none><block_start><if_stmt>self.record_batch<block_start>timestamp=time.time()<line_sep>self.start_time=timestamp<line_sep>self.record_batch=<false><if_stmt>batch<eq>0<block_start>self.timestamp_log.append(BatchTimestamp(batch timestamp))<block_end><block_end><block_end><def_stmt>on_batch_end self batch logs=<none><block_start><if_stmt>batch%self.log_steps<eq>0<block_start>timestamp=time.time()<line_sep>elapsed_time=timestamp-self.start_time<line_sep>examples_per_second=(self.batch_size<times>self.log_steps)/elapsed_time<if_stmt>batch<ne>0<block_start>self.record_batch=<true><line_sep>self.timestamp_log.append(BatchTimestamp(batch timestamp))<line_sep>tf.compat.v1.logging.info("BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"<concat>"'examples_per_second': %f}"%(batch elapsed_time examples_per_second))<block_end><block_end><block_end><block_end><def_stmt>get_profiler_callback model_dir profile_steps enable_tensorboard<block_start>"""Validate profile_steps flag value and return profiler callback."""<line_sep>profile_steps_error_message=('profile_steps must be a comma separated pair of positive integers, '<concat>'specifying the first and last steps to be profiled.')<try_stmt><block_start>profile_steps=[int(i)<for>i profile_steps.split(',')]<block_end><except_stmt>ValueError<block_start><raise>ValueError(profile_steps_error_message)<block_end><if_stmt>len(profile_steps)<ne>2<block_start><raise>ValueError(profile_steps_error_message)<block_end>start_step,stop_step=profile_steps<if_stmt>start_step<l>0<or>start_step<g>stop_step<block_start><raise>ValueError(profile_steps_error_message)<block_end><if_stmt>enable_tensorboard<block_start>tf.compat.v1.logging.warn('Both TensorBoard and profiler callbacks are used. Note that the '<concat>'TensorBoard callback profiles the 2nd step (unless otherwise '<concat>'specified). Please make sure the steps profiled by the two callbacks '<concat>'do not overlap.')<block_end><return>ProfilerCallback(model_dir start_step stop_step)<block_end><class_stmt>ProfilerCallback(tf.keras.callbacks.Callback)<block_start>"""Save profiles in specified step range to log directory."""<def_stmt>__init__ self log_dir start_step stop_step<block_start>super(ProfilerCallback self).__init__()<line_sep>self.log_dir=log_dir<line_sep>self.start_step=start_step<line_sep>self.stop_step=stop_step<block_end><def_stmt>on_batch_begin self batch logs=<none><block_start><if_stmt>batch<eq>self.start_step<block_start>profiler.start()<line_sep>tf.compat.v1.logging.info('Profiler started at Step %s' self.start_step)<block_end><block_end><def_stmt>on_batch_end self batch logs=<none><block_start><if_stmt>batch<eq>self.stop_step<block_start>results=profiler.stop()<line_sep>profiler.save(self.log_dir results)<line_sep>tf.compat.v1.logging.info('Profiler saved profiles for steps between %s and %s to %s' self.start_step self.stop_step self.log_dir)<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Highcharts Demos
Donut chart: http://www.highcharts.com/demo/pie-donut
"""<import_from_stmt>highcharts Highchart<line_sep>H=Highchart(width=850 height=400)<line_sep>data=[{'y':55.11 'color':'Highcharts.getOptions().colors[0]' 'drilldown':{'name':'MSIE versions' 'categories':['MSIE 6.0' 'MSIE 7.0' 'MSIE 8.0' 'MSIE 9.0'] 'data':[10.85 7.35 33.06 2.81] 'color':'Highcharts.getOptions().colors[0]'}} {'y':21.63 'color':'Highcharts.getOptions().colors[1]' 'drilldown':{'name':'Firefox versions' 'categories':['Firefox 2.0' 'Firefox 3.0' 'Firefox 3.5' 'Firefox 3.6' 'Firefox 4.0'] 'data':[0.20 0.83 1.58 13.12 5.43] 'color':'Highcharts.getOptions().colors[1]'}} {'y':11.94 'color':'Highcharts.getOptions().colors[2]' 'drilldown':{'name':'Chrome versions' 'categories':['Chrome 5.0' 'Chrome 6.0' 'Chrome 7.0' 'Chrome 8.0' 'Chrome 9.0' 'Chrome 10.0' 'Chrome 11.0' 'Chrome 12.0'] 'data':[0.12 0.19 0.12 0.36 0.32 9.91 0.50 0.22] 'color':'Highcharts.getOptions().colors[2]'}} {'y':7.15 'color':'Highcharts.getOptions().colors[3]' 'drilldown':{'name':'Safari versions' 'categories':['Safari 5.0' 'Safari 4.0' 'Safari Win 5.0' 'Safari 4.1' 'Safari/Maxthon' 'Safari 3.1' 'Safari 4.1'] 'data':[4.55 1.42 0.23 0.21 0.20 0.19 0.14] 'color':'Highcharts.getOptions().colors[3]'}} {'y':2.14 'color':'Highcharts.getOptions().colors[4]' 'drilldown':{'name':'Opera versions' 'categories':['Opera 9.x' 'Opera 10.x' 'Opera 11.x'] 'data':[0.12 0.37 1.65] 'color':'Highcharts.getOptions().colors[4]'}}]<line_sep>options={'chart':{'type':'pie'} 'title':{'text':'Browser market share, April, 2011'} 'yAxis':{'title':{'text':'Total percent market share'}} 'plotOptions':{'pie':{'shadow':<false> 'center':['50%' '50%']}} 'tooltip':{'valueSuffix':'%'} }<line_sep>categories=['MSIE' 'Firefox' 'Chrome' 'Safari' 'Opera']<line_sep>browserData=[]<line_sep>versionsData=[]<for_stmt>i range(len(data))<block_start>browserData.append({'name':categories[i] 'y':data[i]['y'] 'color':data[i]['color']})<line_sep>drillDataLen=len(data[i]['drilldown']['data'])<for_stmt>j range(drillDataLen)<block_start>brightness=0.2-(j/drillDataLen)/5<line_sep>versionsData.append({'name':data[i]['drilldown']['categories'][j] 'y':data[i]['drilldown']['data'][j] 'color':'Highcharts.Color('+data[i]['color']+').brighten('+str(brightness)+').get()'})<block_end><block_end>H.set_dict_options(options)<line_sep>H.add_data_set(browserData 'pie' 'Browsers' size='60%' dataLabels={'formatter':'function () { \
return this.y > 5 ? this.point.name : null;\
}' 'color':'white' 'distance':-30})<line_sep>H.add_data_set(versionsData 'pie' 'Versions' size='80%' innerSize='60%' dataLabels={'formatter':"function () {\
return this.y > 1 ? '<b>' + this.point.name + ':</b> ' + this.y + '%' : null;\
}"})<line_sep>H.htmlcontent<line_sep> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>run2_GEM_2017=cms.Modifier()<line_sep> |
# !/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by <NAME> on 20 April 2020
<import_stmt>argparse<import_stmt>datetime<import_stmt>glob<import_stmt>os<import_stmt>random<import_stmt>sys<import_stmt>time<import_from_stmt>PIL Image<import_from_stmt>PIL.PngImagePlugin PngInfo<try_stmt><block_start>sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg'%(sys.version_info.major sys.version_info.minor 'win-amd64'<if>os.name<eq>'nt'<else>'linux-x86_64'))[0])<block_end><except_stmt>IndexError<block_start><pass><block_end><import_stmt>carla<import_stmt>math<import_from_stmt>dotmap DotMap<try_stmt><block_start><import_stmt>pygame<block_end><except_stmt>ImportError<block_start><raise>RuntimeError('cannot import pygame, make sure pygame package is installed')<block_end><try_stmt><block_start><import_stmt>numpy<as>np<block_end><except_stmt>ImportError<block_start><raise>RuntimeError('cannot import numpy, make sure numpy package is installed')<block_end><try_stmt><block_start><import_stmt>queue<block_end><except_stmt>ImportError<block_start><import_stmt>Queue<as>queue<block_end><import_from_stmt>agents.navigation.agent Agent AgentState<import_from_stmt>agents.navigation.local_planner LocalPlanner<import_from_stmt>agents.navigation.global_route_planner GlobalRoutePlanner<import_from_stmt>agents.tools.misc is_within_distance_ahead compute_magnitude_angle<import_from_stmt>agents.navigation.global_route_planner_dao GlobalRoutePlannerDAO<def_stmt>is_within_distance target_location current_location orientation max_distance d_angle_th_up d_angle_th_low=0<block_start>"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""<line_sep>target_vector=np.array([target_location.x-current_location.x target_location.y-current_location.y])<line_sep>norm_target=np.linalg.norm(target_vector)<line_sep># If the vector is too short, we can simply stop here
<if_stmt>norm_target<l>0.001<block_start><return><true><block_end><if_stmt>norm_target<g>max_distance<block_start><return><false><block_end>forward_vector=np.array([math.cos(math.radians(orientation)) math.sin(math.radians(orientation))])<line_sep>d_angle=math.degrees(math.acos(np.clip(np.dot(forward_vector target_vector)/norm_target -1. 1.)))<line_sep><return>d_angle_th_low<l>d_angle<l>d_angle_th_up<block_end><def_stmt>compute_distance location_1 location_2<block_start>"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""<line_sep>x=location_2.x-location_1.x<line_sep>y=location_2.y-location_1.y<line_sep>z=location_2.z-location_1.z<line_sep>norm=np.linalg.norm([x y z])+np.finfo(float).eps<line_sep><return>norm<block_end><class_stmt>CarlaSyncMode(object)<block_start>"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""<def_stmt>__init__ self world *sensors **kwargs<block_start>self.world=world<line_sep>self.sensors=sensors<line_sep>self.frame=<none><line_sep>self.delta_seconds=1.0/kwargs.get('fps' 20)<line_sep>self._queues=[]<line_sep>self._settings=<none><line_sep>self.start()<block_end><def_stmt>start self<block_start>self._settings=self.world.get_settings()<line_sep>self.frame=self.world.apply_settings(carla.WorldSettings(no_rendering_mode=<false> synchronous_mode=<true> fixed_delta_seconds=self.delta_seconds))<def_stmt>make_queue register_event<block_start>q=queue.Queue()<line_sep>register_event(q.put)<line_sep>self._queues.append(q)<block_end>make_queue(self.world.on_tick)<for_stmt>sensor self.sensors<block_start>make_queue(sensor.listen)<block_end><block_end><def_stmt>tick self timeout<block_start>self.frame=self.world.tick()<line_sep>data=[self._retrieve_data(q timeout)<for>q self._queues]<assert_stmt>all(x.frame<eq>self.frame<for>x data)<line_sep><return>data<block_end><def_stmt>__exit__ self *args **kwargs<block_start>self.world.apply_settings(self._settings)<block_end><def_stmt>_retrieve_data self sensor_queue timeout<block_start><while_stmt><true><block_start>data=sensor_queue.get(timeout=timeout)<if_stmt>data.frame<eq>self.frame<block_start><return>data<block_end><block_end><block_end><block_end><def_stmt>draw_image surface image blend=<false><block_start>array=np.frombuffer(image.raw_data dtype=np.dtype("uint8"))<line_sep>array=np.reshape(array (image.height image.width 4))<line_sep>array=array[: : :3]<line_sep>array=array[: : ::-1]<line_sep>image_surface=pygame.surfarray.make_surface(array.swapaxes(0 1))<if_stmt>blend<block_start>image_surface.set_alpha(100)<block_end>surface.blit(image_surface (0 0))<block_end><def_stmt>get_font <block_start>fonts=[x<for>x pygame.font.get_fonts()]<line_sep>default_font='ubuntumono'<line_sep>font=default_font<if>default_font<in>fonts<else>fonts[0]<line_sep>font=pygame.font.match_font(font)<line_sep><return>pygame.font.Font(font 14)<block_end><def_stmt>should_quit <block_start><for_stmt>event pygame.event.get()<block_start><if_stmt>event.type<eq>pygame.QUIT<block_start><return><true><block_end><elif_stmt>event.type<eq>pygame.KEYUP<block_start><if_stmt>event.key<eq>pygame.K_ESCAPE<block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>clamp value minimum=0.0 maximum=100.0<block_start><return>max(minimum min(value maximum))<block_end><class_stmt>Sun(object)<block_start><def_stmt>__init__ self azimuth altitude<block_start>self.azimuth=azimuth<line_sep>self.altitude=altitude<line_sep>self._t=0.0<block_end><def_stmt>tick self delta_seconds<block_start>self._t<augadd>0.008<times>delta_seconds<line_sep>self._t<augmod>2.0<times>math.pi<line_sep>self.azimuth<augadd>0.25<times>delta_seconds<line_sep>self.azimuth<augmod>360.0<line_sep>min_alt,max_alt=[20 90]<line_sep>self.altitude=0.5<times>(max_alt+min_alt)+0.5<times>(max_alt-min_alt)<times>math.cos(self._t)<block_end><def_stmt>__str__ self<block_start><return>'Sun(alt: %.2f, azm: %.2f)'%(self.altitude self.azimuth)<block_end><block_end><class_stmt>Storm(object)<block_start><def_stmt>__init__ self precipitation<block_start>self._t=precipitation<if>precipitation<g>0.0<else>-50.0<line_sep>self._increasing=<true><line_sep>self.clouds=0.0<line_sep>self.rain=0.0<line_sep>self.wetness=0.0<line_sep>self.puddles=0.0<line_sep>self.wind=0.0<line_sep>self.fog=0.0<block_end><def_stmt>tick self delta_seconds<block_start>delta=(1.3<if>self._increasing<else>-1.3)<times>delta_seconds<line_sep>self._t=clamp(delta+self._t -250.0 100.0)<line_sep>self.clouds=clamp(self._t+40.0 0.0 90.0)<line_sep>self.clouds=clamp(self._t+40.0 0.0 60.0)<line_sep>self.rain=clamp(self._t 0.0 80.0)<line_sep>delay=-10.0<if>self._increasing<else>90.0<line_sep>self.puddles=clamp(self._t+delay 0.0 85.0)<line_sep>self.wetness=clamp(self._t<times>5 0.0 100.0)<line_sep>self.wind=5.0<if>self.clouds<le>20<else>90<if>self.clouds<ge>70<else>40<line_sep>self.fog=clamp(self._t-10 0.0 30.0)<if_stmt>self._t<eq>-250.0<block_start>self._increasing=<true><block_end><if_stmt>self._t<eq>100.0<block_start>self._increasing=<false><block_end><block_end><def_stmt>__str__ self<block_start><return>'Storm(clouds=%d%%, rain=%d%%, wind=%d%%)'%(self.clouds self.rain self.wind)<block_end><block_end><class_stmt>Weather(object)<block_start><def_stmt>__init__ self world changing_weather_speed<block_start>self.world=world<line_sep>self.reset()<line_sep>self.weather=world.get_weather()<line_sep>self.changing_weather_speed=changing_weather_speed<line_sep>self._sun=Sun(self.weather.sun_azimuth_angle self.weather.sun_altitude_angle)<line_sep>self._storm=Storm(self.weather.precipitation)<block_end><def_stmt>reset self<block_start>weather_params=carla.WeatherParameters(sun_altitude_angle=90.)<line_sep>self.world.set_weather(weather_params)<block_end><def_stmt>tick self<block_start>self._sun.tick(self.changing_weather_speed)<line_sep>self._storm.tick(self.changing_weather_speed)<line_sep>self.weather.cloudiness=self._storm.clouds<line_sep>self.weather.precipitation=self._storm.rain<line_sep>self.weather.precipitation_deposits=self._storm.puddles<line_sep>self.weather.wind_intensity=self._storm.wind<line_sep>self.weather.fog_density=self._storm.fog<line_sep>self.weather.wetness=self._storm.wetness<line_sep>self.weather.sun_azimuth_angle=self._sun.azimuth<line_sep>self.weather.sun_altitude_angle=self._sun.altitude<line_sep>self.world.set_weather(self.weather)<block_end><def_stmt>__str__ self<block_start><return>'%s %s'%(self._sun self._storm)<block_end><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--vision_size' type=int default=84)<line_sep>parser.add_argument('--vision_fov' type=int default=90)<line_sep>parser.add_argument('--weather' default=<false> action='store_true')<line_sep>parser.add_argument('--frame_skip' type=int default=1) <line_sep>parser.add_argument('--steps' type=int default=100000)<line_sep>parser.add_argument('--multiagent' default=<false> action='store_true') <line_sep>parser.add_argument('--lane' type=int default=0)<line_sep>parser.add_argument('--lights' default=<false> action='store_true')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><class_stmt>LocalPlannerModified(LocalPlanner)<block_start><def_stmt>__del__ self<block_start><pass><block_end># otherwise it deletes our vehicle object
<def_stmt>run_step self<block_start><return>super().run_step(debug=<false>)<block_end><block_end># otherwise by default shows waypoints, that interfere with our camera
<class_stmt>RoamingAgent(Agent)<block_start>"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""<def_stmt>__init__ self env<block_start>"""
:param vehicle: actor to apply to local planner logic onto
"""<line_sep>vehicle=env.vehicle<line_sep>follow_traffic_lights=env.follow_traffic_lights<line_sep>super(RoamingAgent self).__init__(vehicle)<line_sep>self._proximity_threshold=10.0# meters
self._state=AgentState.NAVIGATING<line_sep>self._local_planner=LocalPlannerModified(self._vehicle)<line_sep>self._follow_traffic_lights=follow_traffic_lights<block_end><def_stmt>compute_action self<block_start>action,traffic_light=self.run_step()<line_sep>throttle=action.throttle<line_sep>brake=action.brake<line_sep>steer=action.steer<line_sep>#print('tbsl:', throttle, brake, steer, traffic_light)
<if_stmt>brake<eq>0.0<block_start><return>np.array([throttle steer])<block_end><else_stmt><block_start><return>np.array([-brake steer])<block_end><block_end><def_stmt>run_step self<block_start>"""
Execute one step of navigation.
:return: carla.VehicleControl
"""<line_sep># is there an obstacle in front of us?
hazard_detected=<false><line_sep># retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list=self._world.get_actors()<line_sep>vehicle_list=actor_list.filter("*vehicle*")<line_sep>lights_list=actor_list.filter("*traffic_light*")<line_sep># check possible obstacles
vehicle_state,vehicle=self._is_vehicle_hazard(vehicle_list)<if_stmt>vehicle_state<block_start>self._state=AgentState.BLOCKED_BY_VEHICLE<line_sep>hazard_detected=<true><block_end># check for the state of the traffic lights
traffic_light_color=self._is_light_red(lights_list)<if_stmt>traffic_light_color<eq>'RED'<and>self._follow_traffic_lights<block_start>self._state=AgentState.BLOCKED_RED_LIGHT<line_sep>hazard_detected=<true><block_end><if_stmt>hazard_detected<block_start>control=self.emergency_stop()<block_end><else_stmt><block_start>self._state=AgentState.NAVIGATING<line_sep># standard local planner behavior
control=self._local_planner.run_step()<block_end>#print ('Action chosen: ', control)
<return>control traffic_light_color<block_end># override case class
<def_stmt>_is_light_red_europe_style self lights_list<block_start>"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""<line_sep>ego_vehicle_location=self._vehicle.get_location()<line_sep>ego_vehicle_waypoint=self._map.get_waypoint(ego_vehicle_location)<line_sep>traffic_light_color="NONE"# default, if no traffic lights are seen
<for_stmt>traffic_light lights_list<block_start>object_waypoint=self._map.get_waypoint(traffic_light.get_location())<if_stmt>object_waypoint.road_id<ne>ego_vehicle_waypoint.road_id<or>object_waypoint.lane_id<ne>ego_vehicle_waypoint.lane_id<block_start><continue><block_end><if_stmt>is_within_distance_ahead(traffic_light.get_transform() self._vehicle.get_transform() self._proximity_threshold)<block_start><if_stmt>traffic_light.state<eq>carla.TrafficLightState.Red<block_start><return>"RED"<block_end><elif_stmt>traffic_light.state<eq>carla.TrafficLightState.Yellow<block_start>traffic_light_color="YELLOW"<block_end><elif_stmt>traffic_light.state<eq>carla.TrafficLightState.Green<block_start><if_stmt>traffic_light_color<is><not>"YELLOW"# (more severe)
<block_start>traffic_light_color="GREEN"<block_end><block_end><else_stmt><block_start><import_stmt>pdb<line_sep>pdb.set_trace()<line_sep># investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
<block_end><block_end><block_end><return>traffic_light_color<block_end># override case class
<def_stmt>_is_light_red_us_style self lights_list debug=<false><block_start>ego_vehicle_location=self._vehicle.get_location()<line_sep>ego_vehicle_waypoint=self._map.get_waypoint(ego_vehicle_location)<line_sep>traffic_light_color="NONE"# default, if no traffic lights are seen
<if_stmt>ego_vehicle_waypoint.is_junction# It is too late. Do not block the intersection! Keep going!
<block_start><return>"JUNCTION"<block_end><if_stmt>self._local_planner.target_waypoint<is><not><none><block_start><if_stmt>self._local_planner.target_waypoint.is_junction<block_start>min_angle=180.0<line_sep>sel_magnitude=0.0<line_sep>sel_traffic_light=<none><for_stmt>traffic_light lights_list<block_start>loc=traffic_light.get_location()<line_sep>magnitude,angle=compute_magnitude_angle(loc ego_vehicle_location self._vehicle.get_transform().rotation.yaw)<if_stmt>magnitude<l>60.0<and>angle<l>min(25.0 min_angle)<block_start>sel_magnitude=magnitude<line_sep>sel_traffic_light=traffic_light<line_sep>min_angle=angle<block_end><block_end><if_stmt>sel_traffic_light<is><not><none><block_start><if_stmt>debug<block_start>print('=== Magnitude = {} | Angle = {} | ID = {}'.format(sel_magnitude min_angle sel_traffic_light.id))<block_end><if_stmt>self._last_traffic_light<is><none><block_start>self._last_traffic_light=sel_traffic_light<block_end><if_stmt>self._last_traffic_light.state<eq>carla.TrafficLightState.Red<block_start><return>"RED"<block_end><elif_stmt>self._last_traffic_light.state<eq>carla.TrafficLightState.Yellow<block_start>traffic_light_color="YELLOW"<block_end><elif_stmt>self._last_traffic_light.state<eq>carla.TrafficLightState.Green<block_start><if_stmt>traffic_light_color<is><not>"YELLOW"# (more severe)
<block_start>traffic_light_color="GREEN"<block_end><block_end><else_stmt><block_start><import_stmt>pdb<line_sep>pdb.set_trace()<line_sep># investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
<block_end><block_end><else_stmt><block_start>self._last_traffic_light=<none><block_end><block_end><block_end><return>traffic_light_color<block_end><block_end><if_stmt>__name__<eq>'__main__'# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
<block_start>args=parse_args()<line_sep>env=CarlaEnv(args)<try_stmt><block_start>done=<false><while_stmt><not>done<block_start>action,traffic_light_color=env.compute_action()<line_sep>next_obs,reward,done,info=env.step(action traffic_light_color)<line_sep>print('Reward: ' reward 'Done: ' done 'Location: ' env.vehicle.get_location())<if_stmt>done# env.reset_init()
# env.reset()
<block_start>done=<false><block_end><block_end><block_end><finally_stmt><block_start>env.finish()<block_end><block_end> |
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.ops math_ops<import_stmt>logging<import_from_stmt>. imagenet_processing<import_from_stmt>custom_exceptions UnsupportedFormat DimensionError<class_stmt>DataTransformer<block_start>logger=logging.getLogger('data_transformer')<line_sep>@staticmethod<def_stmt>normalization ds scale=1/255.0 img_type=tf.float32# Applying normalization before `ds.cache()` to re-use it.
# Note: Random transformations (e.g. images augmentations) should be applied
# after both `ds.cache()` (to avoid caching randomness) and `ds.batch()`
# (for vectorization https://www.tensorflow.org/guide/data_performance#vectorizing_mapping).
<block_start><if_stmt><not>isinstance(ds tf.data.Dataset)<block_start><raise>UnsupportedFormat(f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')<block_end><if_stmt><not>hasattr(ds.element_spec '__len__')<or>len(ds.element_spec)<ne>2<block_start><raise>DimensionError(f'Data dimension is not the one supported (2) {ds.element_spec}')<block_end>multiplier=tf.cast(scale img_type)<line_sep><return>ds.map(<lambda>x y:(multiplier<times>tf.cast(x img_type) tf.cast(y tf.int32)) num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end>@staticmethod<def_stmt>cache_shuffle ds:tf.data.Dataset buffer_size:int=1 shuffle:bool=<true> seed:int=42<block_start><if_stmt><not>isinstance(ds tf.data.Dataset)<block_start><raise>UnsupportedFormat(f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')<block_end>ds=ds.cache()<if_stmt>shuffle<block_start>ds=ds.shuffle(buffer_size seed=seed)<block_end><return>ds<block_end>@staticmethod<def_stmt>cifar_preprocess ds buffer_size img_type=tf.float32 is_training=<true> accelerator_side_preprocess=<false> pipeline_num_parallel=48 seed=42<block_start><if_stmt><not>isinstance(ds tf.data.Dataset)<block_start><raise>UnsupportedFormat(f'Type of ds is not the one expected (tf.data.Dataset) {type(ds)}')<block_end><if_stmt><not>hasattr(ds.element_spec '__len__')<or>len(ds.element_spec)<ne>2<block_start><raise>DimensionError(f'Data dimension is not the one supported (2) {ds.element_spec}')<block_end>ds=DataTransformer.cache_shuffle(ds buffer_size is_training seed)<line_sep>preprocess_fn=cifar_preprocess_training_fn<if>is_training<else>cifar_preprocess_inference_fn<if_stmt>accelerator_side_preprocess<block_start>host_side_preprocess_fn=<none><line_sep>accelerator_side_preprocess_fn=preprocess_fn<block_end><else_stmt><block_start>host_side_preprocess_fn=preprocess_fn<line_sep>accelerator_side_preprocess_fn=<none><block_end><def_stmt>cifar_preprocess_map_func x_image<block_start><assert_stmt>(x_image.shape<eq>(32 32 3))<if_stmt>host_side_preprocess_fn<is><not><none><block_start>x_image=tf.cast(x_image tf.float32)<line_sep>x_image=host_side_preprocess_fn(x_image)<block_end>x_image=tf.cast(x_image img_type)<if_stmt>is_training<block_start>shape=x_image.get_shape().as_list()<line_sep>padding=4<line_sep>x_image=tf.pad(x_image [[padding padding] [padding padding] [0 0]] "CONSTANT")<line_sep>x_image=tf.image.random_crop(x_image shape seed=seed)<block_end><return>x_image<block_end>ds=ds.map(<lambda>x y:(cifar_preprocess_map_func(x) tf.cast(y tf.int32)) num_parallel_calls=pipeline_num_parallel)<line_sep>accelerator_side_preprocess_fn=preprocess_fn<if>accelerator_side_preprocess<is><true><else><none><line_sep><return>ds accelerator_side_preprocess_fn<block_end>@staticmethod<def_stmt>imagenet_preprocessing ds img_type is_training accelerator_side_preprocess=<true> pipeline_num_parallel=48 seed=<none><block_start>preprocessing_fn=imagenet_preprocess_training_fn<if>is_training<else>imagenet_preprocess_inference_fn<if_stmt>accelerator_side_preprocess<block_start>host_side_preprocess_fn=<none><line_sep>accelerator_side_preprocess_fn=preprocessing_fn<block_end><else_stmt><block_start>host_side_preprocess_fn=preprocessing_fn<line_sep>accelerator_side_preprocess_fn=<none><block_end><def_stmt>processing_fn raw_record<block_start><return>imagenet_processing.parse_record(raw_record is_training img_type host_side_preprocess_fn seed=seed)<block_end><return>ds.map(processing_fn num_parallel_calls=pipeline_num_parallel) accelerator_side_preprocess_fn<block_end><block_end><def_stmt>_image_normalisation image mean std scale=255<block_start>mean=tf.cast(mean dtype=image.dtype)<line_sep>std=tf.cast(std dtype=image.dtype)<line_sep>mean=tf.broadcast_to(mean tf.shape(image))<line_sep>std=tf.broadcast_to(std tf.shape(image))<line_sep><return>(image/scale-mean)/std<block_end><def_stmt>_imagenet_normalize image<block_start>IMAGENET_NORMALISATION_MEAN=[0.485 0.456 0.406]<line_sep>IMAGENET_NORMALISATION_STD=[0.229 0.224 0.225]<line_sep><return>_image_normalisation(image IMAGENET_NORMALISATION_MEAN IMAGENET_NORMALISATION_STD)<block_end><def_stmt>_cifar_normalize image<block_start>mean=math_ops.reduce_mean(image axis=[-1 -2 -3] keepdims=<true>)<line_sep>std=math_ops.reduce_std(image axis=[-1 -2 -3] keepdims=<true>)<line_sep><return>_image_normalisation(image mean std scale=1)<block_end><def_stmt>imagenet_preprocess_training_fn image<block_start><return>_imagenet_normalize(image)<block_end><def_stmt>imagenet_preprocess_inference_fn image<block_start><return>_imagenet_normalize(image)<block_end><def_stmt>cifar_preprocess_training_fn image<block_start>image=tf.image.random_flip_left_right(image)<line_sep><return>_cifar_normalize(image)<block_end><def_stmt>cifar_preprocess_inference_fn image<block_start><return>_cifar_normalize(image)<block_end> |
<class_stmt>GraphLearner<block_start>"""Base class for causal discovery methods.
Subclasses implement different discovery methods. All discovery methods are in the package "dowhy.causal_discoverers"
"""<def_stmt>__init__ self data library_class *args **kwargs<block_start>self._data=data<line_sep>self._labels=list(self._data.columns)<line_sep>self._adjacency_matrix=<none><line_sep>self._graph_dot=<none><block_end><def_stmt>learn_graph self<block_start>'''
Discover causal graph and the graph in DOT format.
'''<line_sep><raise>NotImplementedError<block_end><block_end> |
# coding:utf-8
<import_stmt>datetime<import_stmt>json<import_stmt>random<import_stmt>requests<def_stmt>get_stock_type stock_code<block_start>"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""<line_sep>stock_code=str(stock_code)<if_stmt>stock_code.startswith(("sh" "sz"))<block_start><return>stock_code[:2]<block_end><if_stmt>stock_code.startswith(("50" "51" "60" "73" "90" "110" "113" "132" "204" "78"))<block_start><return>"sh"<block_end><if_stmt>stock_code.startswith(("00" "13" "18" "15" "16" "18" "20" "30" "39" "115" "1318"))<block_start><return>"sz"<block_end><if_stmt>stock_code.startswith(("5" "6" "9"))<block_start><return>"sh"<block_end><return>"sz"<block_end><def_stmt>get_30_date <block_start>"""
获得用于查询的默认日期, 今天的日期, 以及30天前的日期
用于查询的日期格式通常为 20160211
:return:
"""<line_sep>now=datetime.datetime.now()<line_sep>end_date=now.date()<line_sep>start_date=end_date-datetime.timedelta(days=30)<line_sep><return>start_date.strftime("%Y%m%d") end_date.strftime("%Y%m%d")<block_end><def_stmt>get_today_ipo_data <block_start>"""
查询今天可以申购的新股信息
:return: 今日可申购新股列表 apply_code申购代码 price发行价格
"""<line_sep>agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0"<line_sep>send_headers={"Host":"xueqiu.com" "User-Agent":agent "Accept":"application/json, text/javascript, */*; q=0.01" "Accept-Language":"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3" "Accept-Encoding":"deflate" "Cache-Control":"no-cache" "X-Requested-With":"XMLHttpRequest" "Referer":"https://xueqiu.com/hq" "Connection":"keep-alive" }<line_sep>timestamp=random.randint(1000000000000 9999999999999)<line_sep>home_page_url="https://xueqiu.com"<line_sep>ipo_data_url=("https://xueqiu.com/proipo/query.json?column=symbol,name,onl_subcode,onl_subbegdate,actissqty,onl"<concat>"_actissqty,onl_submaxqty,iss_price,onl_lotwiner_stpub_date,onl_lotwinrt,onl_lotwin_amount,stock_"<concat>"income&orderBy=onl_subbegdate&order=desc&stockType=&page=1&size=30&_=%s"%(str(timestamp)))<line_sep>session=requests.session()<line_sep>session.get(home_page_url headers=send_headers)# 产生cookies
ipo_response=session.post(ipo_data_url headers=send_headers)<line_sep>json_data=json.loads(ipo_response.text)<line_sep>today_ipo=[]<for_stmt>line json_data["data"]<block_start><if_stmt>datetime.datetime.now().strftime("%a %b %d")<eq>line[3][:10]<block_start>today_ipo.append({"stock_code":line[0] "stock_name":line[1] "apply_code":line[2] "price":line[7] })<block_end><block_end><return>today_ipo<block_end> |
'''
Venn diagram plotting routines.
Two-circle venn plotter.
Copyright 2012, <NAME>.
http://kt.era.ee/
Licensed under MIT license.
'''<line_sep># Make sure we don't try to do GUI stuff when running tests
<import_stmt>sys os<if_stmt>'py.test'<in>os.path.basename(sys.argv[0])# (XXX: Ugly hack)
<block_start><import_stmt>matplotlib<line_sep>matplotlib.use('Agg')<block_end><import_stmt>numpy<as>np<import_stmt>warnings<import_from_stmt>collections Counter<import_from_stmt>matplotlib.patches Circle<import_from_stmt>matplotlib.colors ColorConverter<import_from_stmt>matplotlib.pyplot gca<import_from_stmt>matplotlib_venn._math *<import_from_stmt>matplotlib_venn._common *<import_from_stmt>matplotlib_venn._region VennCircleRegion<def_stmt>compute_venn2_areas diagram_areas normalize_to=1.0<block_start>'''
The list of venn areas is given as 3 values, corresponding to venn diagram areas in the following order:
(Ab, aB, AB) (i.e. last element corresponds to the size of intersection A&B&C).
The return value is a list of areas (A, B, AB), such that the total area is normalized
to normalize_to. If total area was 0, returns (1e-06, 1e-06, 0.0)
Assumes all input values are nonnegative (to be more precise, all areas are passed through and abs() function)
>>> compute_venn2_areas((1, 1, 0))
(0.5, 0.5, 0.0)
>>> compute_venn2_areas((0, 0, 0))
(1e-06, 1e-06, 0.0)
>>> compute_venn2_areas((1, 1, 1), normalize_to=3)
(2.0, 2.0, 1.0)
>>> compute_venn2_areas((1, 2, 3), normalize_to=6)
(4.0, 5.0, 3.0)
'''<line_sep># Normalize input values to sum to 1
areas=np.array(np.abs(diagram_areas) float)<line_sep>total_area=np.sum(areas)<if_stmt>np.abs(total_area)<l>tol<block_start>warnings.warn("Both circles have zero area")<line_sep><return>(1e-06 1e-06 0.0)<block_end><else_stmt><block_start>areas=areas/total_area<times>normalize_to<line_sep><return>(areas[0]+areas[2] areas[1]+areas[2] areas[2])<block_end><block_end><def_stmt>solve_venn2_circles venn_areas<block_start>'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3).tolist()
[0.564, 0.564]
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3).tolist()
[0.461, 0.515]
'''<line_sep>(A_a A_b A_ab)=list(map(float venn_areas))<line_sep>r_a,r_b=np.sqrt(A_a/np.pi) np.sqrt(A_b/np.pi)<line_sep>radii=np.array([r_a r_b])<if_stmt>A_ab<g>tol# Nonzero intersection
<block_start>coords=np.zeros((2 2))<line_sep>coords[1][0]=find_distance_by_area(radii[0] radii[1] A_ab)<block_end><else_stmt># Zero intersection
<block_start>coords=np.zeros((2 2))<line_sep>coords[1][0]=radii[0]+radii[1]+max(np.mean(radii)<times>1.1 0.2)# The max here is needed for the case r_a = r_b = 0
<block_end>coords=normalize_by_center_of_mass(coords radii)<line_sep><return>(coords radii)<block_end><def_stmt>compute_venn2_regions centers radii<block_start>'''
Returns a triple of VennRegion objects, describing the three regions of the diagram, corresponding to sets
(Ab, aB, AB)
>>> centers, radii = solve_venn2_circles((1, 1, 0.5))
>>> regions = compute_venn2_regions(centers, radii)
'''<line_sep>A=VennCircleRegion(centers[0] radii[0])<line_sep>B=VennCircleRegion(centers[1] radii[1])<line_sep>Ab,AB=A.subtract_and_intersect_circle(B.center B.radius)<line_sep>aB,_=B.subtract_and_intersect_circle(A.center A.radius)<line_sep><return>(Ab aB AB)<block_end><def_stmt>compute_venn2_colors set_colors<block_start>'''
Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram.
returns a list of 3 elements, providing colors for regions (10, 01, 11).
>>> str(compute_venn2_colors(('r', 'g'))).replace(' ', '')
'(array([1.,0.,0.]),array([0.,0.5,0.]),array([0.7,0.35,0.]))'
'''<line_sep>ccv=ColorConverter()<line_sep>base_colors=[np.array(ccv.to_rgb(c))<for>c set_colors]<line_sep><return>(base_colors[0] base_colors[1] mix_colors(base_colors[0] base_colors[1]))<block_end><def_stmt>compute_venn2_subsets a b<block_start>'''
Given two set or Counter objects, computes the sizes of (a & ~b, b & ~a, a & b).
Returns the result as a tuple.
>>> compute_venn2_subsets(set([1,2,3,4]), set([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([1,2,3,4]), Counter([2,3,4,5,6]))
(1, 2, 3)
>>> compute_venn2_subsets(Counter([]), Counter([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([]), set([]))
(0, 0, 0)
>>> compute_venn2_subsets(set([1]), set([]))
(1, 0, 0)
>>> compute_venn2_subsets(set([1]), set([1]))
(0, 0, 1)
>>> compute_venn2_subsets(Counter([1]), Counter([1]))
(0, 0, 1)
>>> compute_venn2_subsets(set([1,2]), set([1]))
(1, 0, 1)
>>> compute_venn2_subsets(Counter([1,1,2,2,2]), Counter([1,2,3,3]))
(3, 2, 2)
>>> compute_venn2_subsets(Counter([1,1,2]), Counter([1,2,2]))
(1, 1, 2)
>>> compute_venn2_subsets(Counter([1,1]), set([]))
Traceback (most recent call last):
...
ValueError: Both arguments must be of the same type
'''<if_stmt><not>(type(a)<eq>type(b))<block_start><raise>ValueError("Both arguments must be of the same type")<block_end>set_size=len<if>type(a)<ne>Counter<else><lambda>x:sum(x.values())# We cannot use len to compute the cardinality of a Counter
<return>(set_size(a-b) set_size(b-a) set_size(a&b))<block_end><def_stmt>venn2_circles subsets normalize_to=1.0 alpha=1.0 color='black' linestyle='solid' linewidth=2.0 ax=<none> **kwargs<block_start>'''
Plots only the two circles for the corresponding Venn diagram.
Useful for debugging or enhancing the basic venn diagram.
parameters ``subsets``, ``normalize_to`` and ``ax`` are the same as in venn2()
``kwargs`` are passed as-is to matplotlib.patches.Circle.
returns a list of three Circle patches.
>>> c = venn2_circles((1, 2, 3))
>>> c = venn2_circles({'10': 1, '01': 2, '11': 3}) # Same effect
>>> c = venn2_circles([set([1,2,3,4]), set([2,3,4,5,6])]) # Also same effect
'''<if_stmt>isinstance(subsets dict)<block_start>subsets=[subsets.get(t 0)<for>t ['10' '01' '11']]<block_end><elif_stmt>len(subsets)<eq>2<block_start>subsets=compute_venn2_subsets(*subsets)<block_end>areas=compute_venn2_areas(subsets normalize_to)<line_sep>centers,radii=solve_venn2_circles(areas)<if_stmt>ax<is><none><block_start>ax=gca()<block_end>prepare_venn_axes(ax centers radii)<line_sep>result=[]<for_stmt>(c r) zip(centers radii)<block_start>circle=Circle(c r alpha=alpha edgecolor=color facecolor='none' linestyle=linestyle linewidth=linewidth **kwargs)<line_sep>ax.add_patch(circle)<line_sep>result.append(circle)<block_end><return>result<block_end><def_stmt>venn2 subsets set_labels=('A' 'B') set_colors=('r' 'g') alpha=0.4 normalize_to=1.0 ax=<none> subset_label_formatter=<none><block_start>'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''<if_stmt>isinstance(subsets dict)<block_start>subsets=[subsets.get(t 0)<for>t ['10' '01' '11']]<block_end><elif_stmt>len(subsets)<eq>2<block_start>subsets=compute_venn2_subsets(*subsets)<block_end><if_stmt>subset_label_formatter<is><none><block_start>subset_label_formatter=str<block_end>areas=compute_venn2_areas(subsets normalize_to)<line_sep>centers,radii=solve_venn2_circles(areas)<line_sep>regions=compute_venn2_regions(centers radii)<line_sep>colors=compute_venn2_colors(set_colors)<if_stmt>ax<is><none><block_start>ax=gca()<block_end>prepare_venn_axes(ax centers radii)<line_sep># Create and add patches and subset labels
patches=[r.make_patch()<for>r regions]<for_stmt>(p c) zip(patches colors)<block_start><if_stmt>p<is><not><none><block_start>p.set_facecolor(c)<line_sep>p.set_edgecolor('none')<line_sep>p.set_alpha(alpha)<line_sep>ax.add_patch(p)<block_end><block_end>label_positions=[r.label_position()<for>r regions]<line_sep>subset_labels=[ax.text(lbl[0] lbl[1] subset_label_formatter(s) va='center' ha='center')<if>lbl<is><not><none><else><none><for>(lbl s) zip(label_positions subsets)]<line_sep># Position set labels
<if_stmt>set_labels<is><not><none><block_start>padding=np.mean([r<times>0.1<for>r radii])<line_sep>label_positions=[centers[0]+np.array([0.0 -radii[0]-padding]) centers[1]+np.array([0.0 -radii[1]-padding])]<line_sep>labels=[ax.text(pos[0] pos[1] txt size='large' ha='right' va='top')<for>(pos txt) zip(label_positions set_labels)]<line_sep>labels[1].set_ha('left')<block_end><else_stmt><block_start>labels=<none><block_end><return>VennDiagram(patches subset_labels labels centers radii)<block_end> |
# See also examples/example_track/track_meta.py for a longer, commented example
track=dict(author_username='dansbecker' course_name='Machine Learning' course_url='https://www.kaggle.com/learn/intro-to-machine-learning')<line_sep>lessons=[dict(topic='Your First BiqQuery ML Model' ) ]<line_sep>notebooks=[dict(filename='tut1.ipynb' lesson_idx=0 type='tutorial' scriptid=4076893 ) dict(filename='ex1.ipynb' lesson_idx=0 type='exercise' scriptid=4077160 ) ]<line_sep> |
<class_stmt>Solution<block_start><def_stmt>canThreePartsEqualSum self A:List[int]<arrow>bool# Since all the three parts are equal, if we sum all element of arrary it should be a multiplication of 3
# so the sum of each part must be equal to sum of all element divided by 3
<block_start>quotient,remainder=divmod(sum(A) 3)<if_stmt>remainder<ne>0<block_start><return><false><block_end>subarray=0<line_sep>partitions=0<for_stmt>num A<block_start>subarray<augadd>num<if_stmt>subarray<eq>quotient<block_start>partitions<augadd>1<line_sep>subarray=0<block_end><block_end># Check if it consist at least 3 partitions
<return>partitions<ge>3<block_end><block_end> |
<import_stmt>os<import_stmt>sys<line_sep>sys.path.append('.')<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>os.path<as>osp<import_from_stmt>multiprocessing Process Pool<import_from_stmt>glob glob<import_from_stmt>tqdm tqdm<import_stmt>tensorflow<as>tf<import_from_stmt>PIL Image<import_from_stmt>lib.core.config INSTA_DIR INSTA_IMG_DIR<def_stmt>process_single_record fname outdir split<block_start>sess=tf.Session()<line_sep>#print(fname)
record_name=fname.split('/')[-1]<for_stmt>vid_idx,serialized_ex enumerate(tf.python_io.tf_record_iterator(fname))#print(vid_idx)
<block_start>os.makedirs(osp.join(outdir split record_name str(vid_idx)) exist_ok=<true>)<line_sep>example=tf.train.Example()<line_sep>example.ParseFromString(serialized_ex)<line_sep>N=int(example.features.feature['meta/N'].int64_list.value[0])<line_sep>images_data=example.features.feature['image/encoded'].bytes_list.value<for_stmt>i range(N)<block_start>image=np.expand_dims(sess.run(tf.image.decode_jpeg(images_data[i] channels=3)) axis=0)<line_sep>#video.append(image)
image=Image.fromarray(np.squeeze(image axis=0))<line_sep>image.save(osp.join(outdir split record_name str(vid_idx) str(i)+".jpg"))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--inp_dir' type=str help='tfrecords file path' default=INSTA_DIR)<line_sep>parser.add_argument('--n' type=int help='total num of workers')<line_sep>parser.add_argument('--i' type=int help='current index of worker (from 0 to n-1)')<line_sep>parser.add_argument('--split' type=str help='train or test')<line_sep>parser.add_argument('--out_dir' type=str help='output images path' default=INSTA_IMG_DIR)<line_sep>args=parser.parse_args()<line_sep>fpaths=glob(f'{args.inp_dir}/{args.split}/*.tfrecord')<line_sep>fpaths=sorted(fpaths)<line_sep>total=len(fpaths)<line_sep>fpaths=fpaths[args.i<times>total<floordiv>args.n:(args.i+1)<times>total<floordiv>args.n]<line_sep>#print(fpaths)
#print(len(fpaths))
os.makedirs(args.out_dir exist_ok=<true>)<for_stmt>idx,fp enumerate(fpaths)<block_start>process_single_record(fp args.out_dir args.split)<block_end><block_end> |
<import_from_stmt>conans ConanFile tools<class_stmt>HapplyConan(ConanFile)<block_start>name="happly"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>homepage="https://github.com/nmwsharp/happly"<line_sep>topics=("conan" "happly" "ply" "3D")<line_sep>license="MIT"<line_sep>description="A C++ header-only parser for the PLY file format. Parse .ply happily!"<line_sep>settings="compiler"<line_sep>no_copy_source=<true><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end><def_stmt>validate self<block_start><if_stmt>self.settings.compiler.cppstd<block_start>tools.check_min_cppstd(self 11)<block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] destination=self._source_subfolder strip_root=<true>)<block_end><def_stmt>package self<block_start>self.copy("LICENSE" src=self._source_subfolder dst="licenses")<line_sep>self.copy("happly.h" src=self._source_subfolder dst="include")<block_end><def_stmt>package_id self<block_start>self.info.header_only()<block_end><block_end> |
<import_stmt>pytest<import_stmt>stweet<as>st<import_from_stmt>tests.test_util get_temp_test_file_name get_tweets_to_tweet_output_test two_lists_assert_equal<def_stmt>test_csv_serialization <block_start>csv_filename=get_temp_test_file_name('csv')<line_sep>tweets_collector=st.CollectorTweetOutput()<line_sep>get_tweets_to_tweet_output_test([st.CsvTweetOutput(csv_filename) tweets_collector])<line_sep>tweets_from_csv=st.read_tweets_from_csv_file(csv_filename)<line_sep>two_lists_assert_equal(tweets_from_csv tweets_collector.get_raw_list())<block_end><def_stmt>test_file_json_lines_serialization <block_start>jl_filename=get_temp_test_file_name('jl')<line_sep>tweets_collector=st.CollectorTweetOutput()<line_sep>get_tweets_to_tweet_output_test([st.JsonLineFileTweetOutput(jl_filename) tweets_collector])<line_sep>tweets_from_jl=st.read_tweets_from_json_lines_file(jl_filename)<line_sep>two_lists_assert_equal(tweets_from_jl tweets_collector.get_raw_list())<block_end> |
<def_stmt>segmented_sieve n# Create an boolean array with all values True
<block_start>primes=[<true>]<times>n<for_stmt>p range(2 n)#If prime[p] is True,it is a prime and its multiples are not prime
<block_start><if_stmt>primes[p]<block_start><for_stmt>i range(2<times>p n p)# Mark every multiple of a prime as not prime
<block_start>primes[i]=<false><block_end><block_end><block_end>#If value is true it is prime and print value
<for_stmt>l range(2 n)<block_start><if_stmt>primes[l]<block_start>print(f"{l} ")<block_end><block_end><block_end>#Test
<while_stmt><true><block_start><try_stmt><block_start>input_value=int(input("Please a number: "))<line_sep>segmented_sieve(input_value)<line_sep><break><block_end><except_stmt>ValueError<block_start>print("No valid integer! Please try again ...")<block_end><block_end> |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=no-member,relative-import
"""Unit tests for blink_idl_parser.py."""<import_stmt>unittest<import_from_stmt>blink_idl_parser BlinkIDLParser<class_stmt>BlinkIDLParserTest(unittest.TestCase)<block_start><def_stmt>test_missing_semicolon_between_definitions self# No semicolon after enum definition.
<block_start>text='''enum TestEnum { "value" } dictionary TestDictionary {};'''<line_sep>parser=BlinkIDLParser()<line_sep>parser.ParseText(filename='' data=text)<line_sep>self.assertGreater(parser.GetErrors() 0)<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.bcdeter bcdeter<def_stmt>test_bcdeter <block_start>"""Test module bcdeter.py by downloading
bcdeter.csv and testing shape of
extracted data has 95 rows and 3 columns
"""<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=bcdeter(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(95 3)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end> |
# this is the three dimensional configuration space for rrt
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""<import_stmt>numpy<as>np<line_sep># from utils3D import OBB2AABB
<def_stmt>R_matrix z_angle y_angle x_angle# s angle: row; y angle: pitch; z angle: yaw
# generate rotation matrix in SO3
# RzRyRx = R, ZYX intrinsic rotation
# also (r1,r2,r3) in R3*3 in {W} frame
# used in obb.O
# [[R p]
# [0T 1]] gives transformation from body to world
<block_start><return>np.array([[np.cos(z_angle) -np.sin(z_angle) 0.0] [np.sin(z_angle) np.cos(z_angle) 0.0] [0.0 0.0 1.0]])@np.array([[np.cos(y_angle) 0.0 np.sin(y_angle)] [0.0 1.0 0.0] [-np.sin(y_angle) 0.0 np.cos(y_angle)]])@np.array([[1.0 0.0 0.0] [0.0 np.cos(x_angle) -np.sin(x_angle)] [0.0 np.sin(x_angle) np.cos(x_angle)]])<block_end><def_stmt>getblocks # AABBs
<block_start>block=[[4.00e+00 1.20e+01 0.00e+00 5.00e+00 2.00e+01 5.00e+00] [5.5e+00 1.20e+01 0.00e+00 1.00e+01 1.30e+01 5.00e+00] [1.00e+01 1.20e+01 0.00e+00 1.40e+01 1.30e+01 5.00e+00] [1.00e+01 9.00e+00 0.00e+00 2.00e+01 1.00e+01 5.00e+00] [9.00e+00 6.00e+00 0.00e+00 1.00e+01 1.00e+01 5.00e+00]]<line_sep>Obstacles=[]<for_stmt>i block<block_start>i=np.array(i)<line_sep>Obstacles.append([j<for>j i])<block_end><return>np.array(Obstacles)<block_end><def_stmt>getballs <block_start>spheres=[[2.0 6.0 2.5 1.0] [14.0 14.0 2.5 2]]<line_sep>Obstacles=[]<for_stmt>i spheres<block_start>Obstacles.append([j<for>j i])<block_end><return>np.array(Obstacles)<block_end><def_stmt>getAABB blocks# used for Pyrr package for detecting collision
<block_start>AABB=[]<for_stmt>i blocks<block_start>AABB.append(np.array([np.add(i[0:3] -0) np.add(i[3:6] 0)]))# make AABBs alittle bit of larger
<block_end><return>AABB<block_end><def_stmt>getAABB2 blocks# used in lineAABB
<block_start>AABB=[]<for_stmt>i blocks<block_start>AABB.append(aabb(i))<block_end><return>AABB<block_end><def_stmt>add_block block=[1.51e+01 0.00e+00 2.10e+00 1.59e+01 5.00e+00 6.00e+00]<block_start><return>block<block_end><class_stmt>aabb(object)# make AABB out of blocks,
# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
<block_start><def_stmt>__init__ self AABB<block_start>self.P=[(AABB[3]+AABB[0])/2 (AABB[4]+AABB[1])/2 (AABB[5]+AABB[2])/2]# center point
self.E=[(AABB[3]-AABB[0])/2 (AABB[4]-AABB[1])/2 (AABB[5]-AABB[2])/2]# extents
self.O=[[1 0 0] [0 1 0] [0 0 1]]<block_end><block_end><class_stmt>obb(object)# P: center point
# E: extents
# O: Rotation matrix in SO(3), in {w}
<block_start><def_stmt>__init__ self P E O<block_start>self.P=P<line_sep>self.E=E<line_sep>self.O=O<line_sep>self.T=np.vstack([np.column_stack([self.O.T -self.O.T@self.P]) [0 0 0 1]])<block_end><block_end><class_stmt>env()<block_start><def_stmt>__init__ self xmin=0 ymin=0 zmin=0 xmax=20 ymax=20 zmax=5 resolution=1# def __init__(self, xmin=-5, ymin=0, zmin=-5, xmax=10, ymax=5, zmax=10, resolution=1):
<block_start>self.resolution=resolution<line_sep>self.boundary=np.array([xmin ymin zmin xmax ymax zmax])<line_sep>self.blocks=getblocks()<line_sep>self.AABB=getAABB2(self.blocks)<line_sep>self.AABB_pyrr=getAABB(self.blocks)<line_sep>self.balls=getballs()<line_sep>self.OBB=np.array([obb([5.0 7.0 2.5] [0.5 2.0 2.5] R_matrix(135 0 0)) obb([12.0 4.0 2.5] [0.5 2.0 2.5] R_matrix(45 0 0))])<line_sep>self.start=np.array([2.0 2.0 2.0])<line_sep>self.goal=np.array([6.0 16.0 0.0])<line_sep>self.t=0<block_end># time
<def_stmt>New_block self<block_start>newblock=add_block()<line_sep>self.blocks=np.vstack([self.blocks newblock])<line_sep>self.AABB=getAABB2(self.blocks)<line_sep>self.AABB_pyrr=getAABB(self.blocks)<block_end><def_stmt>move_start self x<block_start>self.start=x<block_end><def_stmt>move_block self a=[0 0 0] s=0 v=[0.1 0 0] block_to_move=0 mode='translation'# t is time , v is velocity in R3, a is acceleration in R3, s is increment ini time,
# R is an orthorgonal transform in R3*3, is the rotation matrix
# (s',t') = (s + tv, t) is uniform transformation
# (s',t') = (s + a, t + s) is a translation
<block_start><if_stmt>mode<eq>'translation'<block_start>ori=np.array(self.blocks[block_to_move])<line_sep>self.blocks[block_to_move]=np.array([ori[0]+a[0] ori[1]+a[1] ori[2]+a[2] ori[3]+a[0] ori[4]+a[1] ori[5]+a[2]])<line_sep>self.AABB[block_to_move].P=[self.AABB[block_to_move].P[0]+a[0] self.AABB[block_to_move].P[1]+a[1] self.AABB[block_to_move].P[2]+a[2]]<line_sep>self.t<augadd>s<line_sep># return a range of block that the block might moved
a=self.blocks[block_to_move]<line_sep><return>np.array([a[0]-self.resolution a[1]-self.resolution a[2]-self.resolution a[3]+self.resolution a[4]+self.resolution a[5]+self.resolution]) np.array([ori[0]-self.resolution ori[1]-self.resolution ori[2]-self.resolution ori[3]+self.resolution ori[4]+self.resolution ori[5]+self.resolution])<line_sep># return a,ori
<block_end># (s',t') = (Rx, t)
<block_end><def_stmt>move_OBB self obb_to_move=0 theta=[0 0 0] translation=[0 0 0]# theta stands for rotational angles around three principle axis in world frame
# translation stands for translation in the world frame
<block_start>ori=[self.OBB[obb_to_move]]<line_sep># move obb position
self.OBB[obb_to_move].P=[self.OBB[obb_to_move].P[0]+translation[0] self.OBB[obb_to_move].P[1]+translation[1] self.OBB[obb_to_move].P[2]+translation[2]]<line_sep># Calculate orientation
self.OBB[obb_to_move].O=R_matrix(z_angle=theta[0] y_angle=theta[1] x_angle=theta[2])<line_sep># generating transformation matrix
self.OBB[obb_to_move].T=np.vstack([np.column_stack([self.OBB[obb_to_move].O.T -self.OBB[obb_to_move].O.T@self.OBB[obb_to_move].P]) [translation[0] translation[1] translation[2] 1]])<line_sep><return>self.OBB[obb_to_move] ori[0]<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>newenv=env()<block_end> |
"""
MCP3008 analog to digital converter
"""<import_stmt>logging<import_from_stmt>typing cast<import_from_stmt>mqtt_io.types ConfigType SensorValueType<import_from_stmt>. GenericSensor<line_sep>REQUIREMENTS=("adafruit-mcp3008" )<line_sep>CONFIG_SCHEMA={"spi_port":dict(type="integer" required=<false> empty=<false> default=0) "spi_device":dict(type="integer" required=<false> empty=<false> default=0) "chip_addr":dict(type="integer" required=<false> empty=<false> default=0) }<line_sep>_LOG=logging.getLogger(__name__)<class_stmt>Sensor(GenericSensor)<block_start>"""
Implementation of MCP3008 ADC sensor.
"""<line_sep>SENSOR_SCHEMA={"channel":dict(type="integer" required=<true> min=0 max=7 )}<def_stmt>setup_module self<arrow><none><block_start>"""
Init the mcp on SPI CE0
"""<line_sep># pylint: disable=import-outside-toplevel,import-error
<import_stmt>Adafruit_GPIO.SPI<as>SPI# type: ignore
<import_stmt>Adafruit_MCP3008# type: ignore
self.mcp=Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(self.config["spi_port"] self.config["spi_device"]))<block_end><def_stmt>get_value self sens_conf:ConfigType<arrow>SensorValueType<block_start>"""
Get the analog value from the adc for the configured channel
"""<line_sep># Returns an integer from 0-1023
<return>cast(int self.mcp.read_adc(sens_conf["channel"]))<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_stmt>unittest<import_from_stmt>pyes scriptfields<class_stmt>ScriptFieldsTest(unittest.TestCase)<block_start><def_stmt>test_scriptfieldserror_imported self<block_start>self.assertTrue(hasattr(scriptfields 'ScriptFieldsError'))<block_end><def_stmt>test_ignore_failure self<block_start>fields=scriptfields.ScriptFields("a_field" "return _source.field" ignore_failure=<true>)<line_sep>serialized=fields.serialize()<line_sep>self.assertIn("ignore_failure" serialized.get("a_field" {}))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""Matplotlib Density Comparison plot."""<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>...distplot plot_dist<import_from_stmt>...plot_utils _scale_fig_size<import_from_stmt>. backend_kwarg_defaults backend_show<def_stmt>plot_dist_comparison ax nvars ngroups figsize dc_plotters legend groups textsize labeller prior_kwargs posterior_kwargs observed_kwargs backend_kwargs show <block_start>"""Matplotlib Density Comparison plot."""<if_stmt>backend_kwargs<is><none><block_start>backend_kwargs={}<block_end>backend_kwargs={**backend_kwarg_defaults() **backend_kwargs }<if_stmt>prior_kwargs<is><none><block_start>prior_kwargs={}<block_end><if_stmt>posterior_kwargs<is><none><block_start>posterior_kwargs={}<block_end><if_stmt>observed_kwargs<is><none><block_start>observed_kwargs={}<block_end><if_stmt>backend_kwargs<is><none><block_start>backend_kwargs={}<block_end>(figsize _ _ _ linewidth _)=_scale_fig_size(figsize textsize 2<times>nvars ngroups)<line_sep>backend_kwargs.setdefault("figsize" figsize)<line_sep>posterior_kwargs.setdefault("plot_kwargs" {})<line_sep>posterior_kwargs["plot_kwargs"]["color"]=posterior_kwargs["plot_kwargs"].get("color" "C0")<line_sep>posterior_kwargs["plot_kwargs"].setdefault("linewidth" linewidth)<line_sep>posterior_kwargs.setdefault("hist_kwargs" {})<line_sep>posterior_kwargs["hist_kwargs"].setdefault("alpha" 0.5)<line_sep>prior_kwargs.setdefault("plot_kwargs" {})<line_sep>prior_kwargs["plot_kwargs"]["color"]=prior_kwargs["plot_kwargs"].get("color" "C1")<line_sep>prior_kwargs["plot_kwargs"].setdefault("linewidth" linewidth)<line_sep>prior_kwargs.setdefault("hist_kwargs" {})<line_sep>prior_kwargs["hist_kwargs"].setdefault("alpha" 0.5)<line_sep>observed_kwargs.setdefault("plot_kwargs" {})<line_sep>observed_kwargs["plot_kwargs"]["color"]=observed_kwargs["plot_kwargs"].get("color" "C2")<line_sep>observed_kwargs["plot_kwargs"].setdefault("linewidth" linewidth)<line_sep>observed_kwargs.setdefault("hist_kwargs" {})<line_sep>observed_kwargs["hist_kwargs"].setdefault("alpha" 0.5)<if_stmt>ax<is><none><block_start>axes=np.empty((nvars ngroups+1) dtype=object)<line_sep>fig=plt.figure(**backend_kwargs)<line_sep>gs=fig.add_gridspec(ncols=ngroups nrows=nvars<times>2)<for_stmt>i range(nvars)<block_start><for_stmt>j range(ngroups)<block_start>axes[i j]=fig.add_subplot(gs[2<times>i j])<block_end>axes[i -1]=fig.add_subplot(gs[2<times>i+1 :])<block_end><block_end><else_stmt><block_start>axes=ax<if_stmt>ax.shape<ne>(nvars ngroups+1)<block_start><raise>ValueError("Found {} shape of axes, which is not equal to data shape {}.".format(axes.shape (nvars ngroups+1)))<block_end><block_end><for_stmt>idx,plotter enumerate(dc_plotters)<block_start>group=groups[idx]<line_sep>kwargs=(prior_kwargs<if>group.startswith("prior")<else>posterior_kwargs<if>group.startswith("posterior")<else>observed_kwargs)<for_stmt>idx2,(var_name sel isel data ) enumerate(plotter)<block_start>label=f"{group}"<line_sep>plot_dist(data label=label<if>legend<else><none> ax=axes[idx2 idx] **kwargs )<line_sep>plot_dist(data label=label<if>legend<else><none> ax=axes[idx2 -1] **kwargs )<if_stmt>idx<eq>0<block_start>axes[idx2 -1].set_xlabel(labeller.make_label_vert(var_name sel isel))<block_end><block_end><block_end><if_stmt>backend_show(show)<block_start>plt.show()<block_end><return>axes<block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.us_pop us_pop<def_stmt>test_us_pop <block_start>"""Test module us_pop.py by downloading
us_pop.csv and testing shape of
extracted data has 22 rows and 2 columns
"""<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=us_pop(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(22 2)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end> |
<import_from_stmt>app.factory create_app celery_app<line_sep>app=create_app(config_name="DEVELOPMENT")<line_sep>app.app_context().push()<if_stmt>__name__<eq>"__main__"<block_start>app.run()<block_end> |
# test unicode in identifiers
# comment
# αβγδϵφζ
# global identifiers
α=1<line_sep>αβγ=2<line_sep>bβ=3<line_sep>βb=4<line_sep>print(α αβγ bβ βb)<line_sep># function, argument, local identifiers
<def_stmt>α β γ<block_start>δ=β+γ<line_sep>print(β γ δ)<block_end>α(1 2)<line_sep># class, method identifiers
<class_stmt>φ<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>δ self ϵ<block_start>print(ϵ)<block_end><block_end>zζzζz=φ()<if_stmt>hasattr(zζzζz "δ")<block_start>zζzζz.δ(ϵ=123)<block_end> |
<import_stmt>datetime<import_stmt>pytz<import_from_stmt>tws_async *<line_sep>stocks=[Stock('TSLA') Stock('AAPL') Stock('GOOG') Stock('INTC' primaryExchange='NASDAQ')]<line_sep>forexs=[Forex('EURUSD') Forex('GBPUSD') Forex('USDJPY')]<line_sep>endDate=datetime.date.today()<line_sep>startDate=endDate-datetime.timedelta(days=7)<line_sep>histReqs=[]<for_stmt>date util.dateRange(startDate endDate)<block_start>histReqs<augadd>[HistRequest(stock date)<for>stock stocks]<line_sep>histReqs<augadd>[HistRequest(forex date whatToShow='MIDPOINT' durationStr='30 D' barSizeSetting='1 day')<for>forex forexs]<block_end>timezone=datetime.timezone.utc<line_sep># timezone = pytz.timezone('Europe/Amsterdam')
# timezone = pytz.timezone('US/Eastern')
util.logToConsole()<line_sep>tws=HistRequester()<line_sep>tws.connect('127.0.0.1' 7497 clientId=1)<line_sep>task=tws.download(histReqs rootDir='data' timezone=timezone)<line_sep>tws.run(task)<line_sep> |
"""cff2Lib_test.py -- unit test for Adobe CFF fonts."""<import_from_stmt>fontTools.ttLib TTFont<import_from_stmt>io StringIO<import_stmt>re<import_stmt>os<import_stmt>unittest<line_sep>CURR_DIR=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))<line_sep>DATA_DIR=os.path.join(CURR_DIR 'data')<line_sep>CFF_TTX=os.path.join(DATA_DIR "C_F_F__2.ttx")<line_sep>CFF_BIN=os.path.join(DATA_DIR "C_F_F__2.bin")<def_stmt>strip_VariableItems string# ttlib changes with the fontTools version
<block_start>string=re.sub(' ttLibVersion=".*"' '' string)<line_sep># head table checksum and mod date changes with each save.
string=re.sub('<checkSumAdjustment value="[^"]+"/>' '' string)<line_sep>string=re.sub('<modified value="[^"]+"/>' '' string)<line_sep><return>string<block_end><class_stmt>CFFTableTest(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start><with_stmt>open(CFF_BIN 'rb')<as>f<block_start>font=TTFont(file=CFF_BIN)<line_sep>cffTable=font['CFF2']<line_sep>cls.cff2Data=cffTable.compile(font)<block_end><with_stmt>open(CFF_TTX 'r')<as>f<block_start>cff2XML=f.read()<line_sep>cff2XML=strip_VariableItems(cff2XML)<line_sep>cls.cff2XML=cff2XML.splitlines()<block_end><block_end><def_stmt>test_toXML self<block_start>font=TTFont(file=CFF_BIN)<line_sep>cffTable=font['CFF2']<line_sep>cffData=cffTable.compile(font)<line_sep>out=StringIO()<line_sep>font.saveXML(out)<line_sep>cff2XML=out.getvalue()<line_sep>cff2XML=strip_VariableItems(cff2XML)<line_sep>cff2XML=cff2XML.splitlines()<line_sep>self.assertEqual(cff2XML self.cff2XML)<block_end><def_stmt>test_fromXML self<block_start>font=TTFont(sfntVersion='OTTO')<line_sep>font.importXML(CFF_TTX)<line_sep>cffTable=font['CFF2']<line_sep>cff2Data=cffTable.compile(font)<line_sep>self.assertEqual(cff2Data self.cff2Data)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
### based on https://github.com/kylemcdonald/Parametric-t-SNE/blob/master/Parametric%20t-SNE%20(Keras).ipynb
<import_stmt>numpy<as>np<import_from_stmt>tensorflow.keras backend<as>K<import_from_stmt>tensorflow.keras.losses categorical_crossentropy<import_from_stmt>tqdm.autonotebook tqdm<import_stmt>tensorflow<as>tf<def_stmt>Hbeta D beta<block_start>"""Computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity (P) of the distribution."""<line_sep>P=np.exp(-D<times>beta)<line_sep>sumP=np.sum(P)<line_sep>H=np.log(sumP)+beta<times>np.sum(np.multiply(D P))/sumP<line_sep>P=P/sumP<line_sep><return>H P<block_end><def_stmt>x2p X u=15 tol=1e-4 print_iter=500 max_tries=50 verbose=0<block_start>"""
% X2P Identifies appropriate sigma's to get kk NNs up to some tolerance
%
% [P, beta] = x2p(xx, kk, tol)
%
% Identifies the required precision (= 1 / variance^2) to obtain a Gaussian
% kernel with a certain uncertainty for every datapoint. The desired
% uncertainty can be specified through the perplexity u (default = 15). The
% desired perplexity is obtained up to some tolerance that can be specified
% by tol (default = 1e-4).
% The function returns the final Gaussian kernel in P, as well as the
% employed precisions per instance in beta.
%
"""<line_sep># Initialize some variables
n=X.shape[0]# number of instances
P=np.zeros((n n))# empty probability matrix
beta=np.ones(n)# empty precision vector
logU=np.log(u)# log of perplexity (= entropy)
# Compute pairwise distances
<if_stmt>verbose<g>0<block_start>print("Computing pairwise distances...")<block_end>sum_X=np.sum(np.square(X) axis=1)<line_sep># note: translating sum_X' from matlab to numpy means using reshape to add a dimension
D=sum_X+sum_X[: <none>]+-2<times>X.dot(X.T)<line_sep># Run over all datapoints
<if_stmt>verbose<g>0<block_start>print("Computing P-values...")<block_end><for_stmt>i range(n)<block_start><if_stmt>verbose<g>1<and>print_iter<and>i%print_iter<eq>0<block_start>print("Computed P-values {} of {} datapoints...".format(i n))<block_end># Set minimum and maximum values for precision
betamin=float("-inf")<line_sep>betamax=float("+inf")<line_sep># Compute the Gaussian kernel and entropy for the current precision
indices=np.concatenate((np.arange(0 i) np.arange(i+1 n)))<line_sep>Di=D[i indices]<line_sep>H,thisP=Hbeta(Di beta[i])<line_sep># Evaluate whether the perplexity is within tolerance
Hdiff=H-logU<line_sep>tries=0<while_stmt>abs(Hdiff)<g>tol<and>tries<l>max_tries# If not, increase or decrease precision
<block_start><if_stmt>Hdiff<g>0<block_start>betamin=beta[i]<if_stmt>np.isinf(betamax)<block_start>beta[i]<augmul>2<block_end><else_stmt><block_start>beta[i]=(beta[i]+betamax)/2<block_end><block_end><else_stmt><block_start>betamax=beta[i]<if_stmt>np.isinf(betamin)<block_start>beta[i]<augdiv>2<block_end><else_stmt><block_start>beta[i]=(beta[i]+betamin)/2<block_end><block_end># Recompute the values
H,thisP=Hbeta(Di beta[i])<line_sep>Hdiff=H-logU<line_sep>tries<augadd>1<block_end># Set the final row of P
P[i indices]=thisP<block_end><if_stmt>verbose<g>0<block_start>print("Mean value of sigma: {}".format(np.mean(np.sqrt(1/beta))))<line_sep>print("Minimum value of sigma: {}".format(np.min(np.sqrt(1/beta))))<line_sep>print("Maximum value of sigma: {}".format(np.max(np.sqrt(1/beta))))<block_end><return>P beta<block_end><def_stmt>compute_joint_probabilities samples batch_size=5000 d=2 perplexity=30 tol=1e-5 verbose=0<block_start>""" This function computes the probababilities in X, split up into batches
% Gaussians employed in the high-dimensional space have the specified
% perplexity (default = 30). The number of degrees of freedom of the
% Student-t distribution may be specified through v (default = d - 1).
"""<line_sep>v=d-1<line_sep># Initialize some variables
n=samples.shape[0]<line_sep>batch_size=min(batch_size n)<line_sep># Precompute joint probabilities for all batches
<if_stmt>verbose<g>0<block_start>print("Precomputing P-values...")<block_end>batch_count=int(n/batch_size)<line_sep>P=np.zeros((batch_count batch_size batch_size))<line_sep># for each batch of data
<for_stmt>i,start enumerate(tqdm(range(0 n-batch_size+1 batch_size)))# select batch
<block_start>curX=samples[start:start+batch_size]<line_sep># compute affinities using fixed perplexity
P[i],_=x2p(curX perplexity tol verbose=verbose)<line_sep># make sure we don't have NaN's
P[i][np.isnan(P[i])]=0<line_sep># make symmetric
P[i]=P[i]+P[i].T# / 2
# obtain estimation of joint probabilities
P[i]=P[i]/P[i].sum()<line_sep>P[i]=np.maximum(P[i] np.finfo(P[i].dtype).eps)<block_end><return>P<block_end><def_stmt>z2p z d n eps=10e-15<block_start>""" Computes the low dimensional probability
"""<line_sep>v=d-1<line_sep>sum_act=tf.math.reduce_sum(tf.math.square(z) axis=1)<line_sep>Q=K.reshape(sum_act [-1 1])+-2<times>tf.keras.backend.dot(z tf.transpose(z))<line_sep>Q=(sum_act+Q)/v<line_sep>Q=tf.math.pow(1+Q -(v+1)/2)<line_sep>Q<augmul>1-np.eye(n)<line_sep>Q<augdiv>tf.math.reduce_sum(Q)<line_sep>Q=tf.math.maximum(Q eps)<line_sep><return>Q<block_end><def_stmt>tsne_loss d batch_size eps=10e-15# v = d - 1.0
<block_start><def_stmt>loss P Z<block_start>""" KL divergence
P is the joint probabilities for this batch (Keras loss functions call this y_true)
Z is the low-dimensional output (Keras loss functions call this y_pred)
"""<line_sep>Q=z2p(Z d n=batch_size eps=eps)<line_sep><return>tf.math.reduce_sum(P<times>tf.math.log((P+eps)/(Q+eps)))<block_end><return>loss<block_end> |
"""
Copyright (c) 2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Query the koji build target, if any, to find the enabled architectures. Remove any excluded
architectures, and return the resulting list.
"""<import_from_stmt>typing List Optional<import_from_stmt>atomic_reactor.plugin Plugin<import_from_stmt>atomic_reactor.util is_scratch_build is_isolated_build map_to_user_params<import_from_stmt>atomic_reactor.constants PLUGIN_CHECK_AND_SET_PLATFORMS_KEY<import_from_stmt>atomic_reactor.config get_koji_session<class_stmt>CheckAndSetPlatformsPlugin(Plugin)<block_start>key=PLUGIN_CHECK_AND_SET_PLATFORMS_KEY<line_sep>is_allowed_to_fail=<false><line_sep>args_from_user_params=map_to_user_params("koji_target")<def_stmt>__init__ self workflow koji_target=<none><block_start>"""
constructor
:param workflow: DockerBuildWorkflow instance
:param koji_target: str, Koji build target name
"""<line_sep># call parent constructor
super(CheckAndSetPlatformsPlugin self).__init__(workflow)<line_sep>self.koji_target=koji_target<block_end><def_stmt>_limit_platforms self platforms:List[str]<arrow>List[str]<block_start>"""Limit platforms in a specific range by platforms config.
:param platforms: a list of platforms to be filtered.
:type platforms: list[str]
:return: the limited platforms.
:rtype: list[str]
"""<line_sep>final_platforms=set(platforms)<line_sep>source_config=self.workflow.source.config<line_sep>only_platforms=set(source_config.only_platforms)<line_sep>excluded_platforms=set(source_config.excluded_platforms)<if_stmt>only_platforms<block_start><if_stmt>only_platforms<eq>excluded_platforms<block_start>self.log.warning('only and not platforms are the same: %r' only_platforms)<block_end>final_platforms<augand>only_platforms<block_end><return>list(final_platforms-excluded_platforms)<block_end><def_stmt>run self<arrow>Optional[List[str]]<block_start>"""
run the plugin
"""<line_sep>user_platforms:Optional[List[str]]=self.workflow.user_params.get("platforms")<if_stmt>self.koji_target<block_start>koji_session=get_koji_session(self.workflow.conf)<line_sep>self.log.info("Checking koji target for platforms")<line_sep>event_id=koji_session.getLastEvent()['id']<line_sep>target_info=koji_session.getBuildTarget(self.koji_target event=event_id)<line_sep>build_tag=target_info['build_tag']<line_sep>koji_build_conf=koji_session.getBuildConfig(build_tag event=event_id)<line_sep>koji_platforms=koji_build_conf['arches']<if_stmt><not>koji_platforms<block_start>self.log.info("No platforms found in koji target")<line_sep><return><none><block_end>platforms=koji_platforms.split()<line_sep>self.log.info("Koji platforms are %s" sorted(platforms))<if_stmt>is_scratch_build(self.workflow)<or>is_isolated_build(self.workflow)<block_start>override_platforms=set(user_platforms<or>[])<if_stmt>override_platforms<and>override_platforms<ne>set(platforms)<block_start>sorted_platforms=sorted(override_platforms)<line_sep>self.log.info("Received user specified platforms %s" sorted_platforms)<line_sep>self.log.info("Using them instead of koji platforms")<line_sep># platforms from user params do not match platforms from koji target
# that almost certainly means they were overridden and should be used
self.workflow.build_dir.init_build_dirs(sorted_platforms self.workflow.source)<line_sep><return>sorted_platforms<block_end><block_end><block_end><else_stmt><block_start>platforms=user_platforms<line_sep>self.log.info("No koji platforms. User specified platforms are %s" sorted(platforms)<if>platforms<else><none> )<block_end><if_stmt><not>platforms<block_start><raise>RuntimeError("Cannot determine platforms; no koji target or platform list")<block_end># Filter platforms based on configured remote hosts
remote_host_pools=self.workflow.conf.remote_hosts.get("pools" {})<line_sep>enabled_platforms=[]<line_sep>defined_but_disabled=[]<def_stmt>has_enabled_hosts platform:str<arrow>bool<block_start>platform_hosts=remote_host_pools.get(platform {})<line_sep><return>any(host_info["enabled"]<for>host_info platform_hosts.values())<block_end><for_stmt>p platforms<block_start><if_stmt>has_enabled_hosts(p)<block_start>enabled_platforms.append(p)<block_end><elif_stmt>p<in>remote_host_pools<block_start>defined_but_disabled.append(p)<block_end><else_stmt><block_start>self.log.warning("No remote hosts found for platform '%s' in "<concat>"reactor config map, skipping" p)<block_end><block_end><if_stmt>defined_but_disabled<block_start>msg='Platforms specified in config map, but have all remote hosts disabled'<concat>' {}'.format(defined_but_disabled)<line_sep><raise>RuntimeError(msg)<block_end>final_platforms=self._limit_platforms(enabled_platforms)<line_sep>self.log.info("platforms in limits : %s" final_platforms)<if_stmt><not>final_platforms<block_start>self.log.error("platforms in limits are empty")<line_sep><raise>RuntimeError("No platforms to build for")<block_end>self.workflow.build_dir.init_build_dirs(final_platforms self.workflow.source)<line_sep><return>final_platforms<block_end><block_end> |
# Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>solo.utils.misc gather get_rank<def_stmt>simclr_loss_func z:torch.Tensor indexes:torch.Tensor temperature:float=0.1<arrow>torch.Tensor<block_start>"""Computes SimCLR's loss given batch of projected features z
from different views, a positive boolean mask of all positives and
a negative boolean mask of all negatives.
Args:
z (torch.Tensor): (N*views) x D Tensor containing projected features from the views.
indexes (torch.Tensor): unique identifiers for each crop (unsupervised)
or targets of each crop (supervised).
Return:
torch.Tensor: SimCLR loss.
"""<line_sep>z=F.normalize(z dim=-1)<line_sep>gathered_z=gather(z)<line_sep>sim=torch.exp(torch.einsum("if, jf -> ij" z gathered_z)/temperature)<line_sep>gathered_indexes=gather(indexes)<line_sep>indexes=indexes.unsqueeze(0)<line_sep>gathered_indexes=gathered_indexes.unsqueeze(0)<line_sep># positives
pos_mask=indexes.t()<eq>gathered_indexes<line_sep>pos_mask[: z.size(0)<times>get_rank():].fill_diagonal_(0)<line_sep># negatives
neg_mask=indexes.t()<ne>gathered_indexes<line_sep>pos=torch.sum(sim<times>pos_mask 1)<line_sep>neg=torch.sum(sim<times>neg_mask 1)<line_sep>loss=-(torch.mean(torch.log(pos/(pos+neg))))<line_sep><return>loss<block_end> |
<import_from_stmt>factory Faker<import_from_stmt>.network_node NetworkNodeFactory<import_from_stmt>..constants.network ACCOUNT_FILE_HASH_LENGTH BLOCK_IDENTIFIER_LENGTH MAX_POINT_VALUE MIN_POINT_VALUE<import_from_stmt>..models.network_validator NetworkValidator<class_stmt>NetworkValidatorFactory(NetworkNodeFactory)<block_start>daily_confirmation_rate=Faker('pyint' max_value=MAX_POINT_VALUE min_value=MIN_POINT_VALUE)<line_sep>root_account_file=Faker('url')<line_sep>root_account_file_hash=Faker('text' max_nb_chars=ACCOUNT_FILE_HASH_LENGTH)<line_sep>seed_block_identifier=Faker('text' max_nb_chars=BLOCK_IDENTIFIER_LENGTH)<class_stmt>Meta<block_start>model=NetworkValidator<line_sep>abstract=<true><block_end><block_end> |
# k3d.py
#
# Copyright 2020 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
<import_stmt>logging<import_stmt>os<import_stmt>ssl<import_stmt>subprocess<import_stmt>time<import_stmt>urllib.error<import_stmt>urllib.request<import_stmt>datetime<import_from_stmt>dateutil.parser parse<import_from_stmt>typing Dict Iterator List<import_from_stmt>typing Optional Tuple Callable<import_from_stmt>gi.repository GObject<import_from_stmt>.config APP_ENV_PREFIX<import_from_stmt>.config ApplicationSettings<import_from_stmt>.config DEFAULT_EXTRA_PATH DEFAULT_API_SERVER_PORT_RANGE DEFAULT_K3D_WAIT_TIME <import_from_stmt>.docker DockerController<import_from_stmt>.helm HelmChart cleanup_for_owner<import_from_stmt>.utils call_in_main_thread find_unused_port_in_range parse_or_get_address find_executable run_command_stdout <import_from_stmt>.utils_ui show_notification<line_sep># the header/footer length in the "k3d list" output
K3D_LIST_HEADER_LEN=3<line_sep>K3D_LIST_FOOTER_LEN=1<line_sep># directory in the K3s conatiner where we should put manifests for being automatically loaded
K3D_DOCKER_MANIFESTS_DIR="/var/lib/rancher/k3s/server/manifests/"<line_sep>###############################################################################
k3d_exe=find_executable("k3d" extra_paths=DEFAULT_EXTRA_PATH)<line_sep>logging.debug(f"k3d found at {k3d_exe}")<def_stmt>run_k3d_command *args **kwargs<arrow>Iterator[str]<block_start>"""
Run a k3d command
"""<line_sep>logging.debug(f"[K3D] Running k3d command: {args}")<line_sep><yield><from>run_command_stdout(k3d_exe *args **kwargs)<block_end>###############################################################################
# errors
###############################################################################
<class_stmt>K3dError(Exception)<block_start>"""Base class for other k3d exceptions"""<line_sep><pass><block_end><class_stmt>EmptyClusterNameError(K3dError)<block_start>"""No cluster name"""<line_sep><pass><block_end><class_stmt>InvalidNumWorkersError(K3dError)<block_start>"""Invalid num workers"""<line_sep><pass><block_end><class_stmt>ClusterCreationError(K3dError)<block_start>"""Cluster creation error"""<line_sep><pass><block_end><class_stmt>ClusterDestructionError(K3dError)<block_start>"""Cluster destruction error"""<line_sep><pass><block_end><class_stmt>ClusterNotFoundError(K3dError)<block_start>"""Cluster not found error"""<line_sep><pass><block_end><class_stmt>NoKubeconfigObtainedError(K3dError)<block_start>"""No kubeconfig obtained error"""<line_sep><pass><block_end><class_stmt>NoServerError(K3dError)<block_start>"""No Docker server error"""<line_sep><pass><block_end>###############################################################################
# k3d clusters
###############################################################################
<class_stmt>K3dCluster(GObject.GObject)<block_start>name:str=""<line_sep>status:str="running"<line_sep>num_workers:int=0<line_sep>use_registry:bool=<false><line_sep>registry_name:str=<none><line_sep>registry_port:str=<none><line_sep>registry_volume:str=<none><line_sep>cache_hub:bool=<false><line_sep>api_server:str=<none><line_sep>image:str=<none><line_sep>volumes:Dict[str str]={}<line_sep>charts:List[HelmChart]=[]<line_sep>server_args:str=<none><line_sep>__gsignals__={# a signal emmited when the cluster has been created
"created":(GObject.SIGNAL_RUN_LAST GObject.TYPE_NONE (str )) # a signal emmited when the cluster has been destroyed
"destroyed":(GObject.SIGNAL_RUN_LAST GObject.TYPE_NONE (str ))}<def_stmt>__init__ self settings:ApplicationSettings docker:DockerController **kwargs<block_start>super().__init__()<line_sep>self._docker=docker<line_sep>self._settings=settings<line_sep>self._kubeconfig=<none><line_sep>self._docker_created:Optional[datetime.datetime]=<none><line_sep>self._docker_server_ip=<none><line_sep>self._destroyed=<false><line_sep>self._status=kwargs.pop("status" "running")<line_sep>self.__dict__.update(kwargs)<line_sep># TODO: check the name is valid
<if_stmt>len(self.name)<eq>0<block_start><raise>InvalidNumWorkersError<block_end><if_stmt>self.num_workers<l>0<block_start><raise>InvalidNumWorkersError<block_end><block_end><def_stmt>__str__ self<arrow>str<block_start><return>f"{self.name}"<block_end><def_stmt>__eq__ self other<arrow>bool<block_start><if_stmt>other<is><none><block_start><return><false><block_end><if_stmt>isinstance(other K3dCluster)<block_start><return>self.name<eq>other.name<block_end><if_stmt>isinstance(other str)<block_start><return>self.name<eq>other<block_end>logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")<line_sep><return>NotImplemented<block_end><def_stmt>__ne__ self other<arrow>bool<block_start><if_stmt>other<is><none><block_start><return><true><block_end><if_stmt>isinstance(other K3dCluster)<block_start><return>self.name<ne>other.name<block_end><if_stmt>isinstance(other str)<block_start><return>self.name<ne>other<block_end>logging.warning(f"Comparing cluster {self.name} to incompatible type {other}")<line_sep><return>NotImplemented<block_end><def_stmt>quit self<block_start><pass><block_end><def_stmt>create self wait=<true><arrow><none><block_start>"""
Create the cluster by invoking `k3d create`
"""<line_sep>args=[]<line_sep>kwargs={}<if_stmt><not>self.name<block_start><raise>EmptyClusterNameError()<block_end>args<augadd>[f"--name={self.name}"]<if_stmt>self.use_registry<block_start>args<augadd>["--enable-registry"]<if_stmt>self.cache_hub<block_start>args<augadd>["--enable-registry-cache"]<block_end><if_stmt>self.registry_volume<block_start>args<augadd>[f"--registry-volume={self.registry_volume}"]<block_end><if_stmt>self.registry_name<block_start>args<augadd>[f"--registry-name={self.registry_name}"]<block_end><if_stmt>self.registry_port<block_start>args<augadd>[f"--registry-port={self.registry_port}"]<block_end><block_end><if_stmt>wait<block_start>args<augadd>[f"--wait={DEFAULT_K3D_WAIT_TIME}"]<block_end><if_stmt>self.num_workers<g>0<block_start>args<augadd>[f"--workers={self.num_workers}"]<block_end><if_stmt>self.image<block_start>args<augadd>[f"--image={self.image}"]<block_end># create some k3s server arguments
# by default, we add a custom DNS domain with the same name as the cluster
args<augadd>[f"--server-arg=--cluster-domain={self.name}.local"]<if_stmt>self.server_args<block_start>args<augadd>[f"--server-arg={arg}"<for>arg self.server_args<if>len(arg)<g>0]<block_end># append any extra volumes
<for_stmt>vol_k,vol_v self.volumes.items()<block_start>args<augadd>[f"--volume={vol_k}:{vol_v}"]<block_end># append any extra Charts as volumes too
<for_stmt>chart self.charts<block_start>src=chart.generate(self)<line_sep>dst=f"{K3D_DOCKER_MANIFESTS_DIR}/{chart.name}.yaml"<line_sep>args<augadd>[f"--volume={src}:{dst}"]<block_end># use the given API port or find an unused one
self.api_server=parse_or_get_address(self.api_server *DEFAULT_API_SERVER_PORT_RANGE)<line_sep>logging.info(f"[K3D] Using API address {self.api_server}")<line_sep>args<augadd>[f"--api-port={self.api_server}"]<line_sep># check if we must use an env variable for the DOCKER_HOST
docker_host=self._docker.docker_host<line_sep>default_docker_host=self._docker.default_docker_host<if_stmt>docker_host<ne>self._docker.default_docker_host<block_start>logging.debug(f"[K3D] Overriding DOCKER_HOST={docker_host} (!= {default_docker_host})")<line_sep>new_env=os.environ.copy()<line_sep>new_env["DOCKER_HOST"]=docker_host<line_sep>kwargs["env"]=new_env<block_end><try_stmt><block_start>logging.info(f"[K3D] Creating cluster (with {args})")<while_stmt><true><block_start><try_stmt><block_start>line=next(run_k3d_command("create" *args **kwargs))<line_sep>logging.debug(f"[K3D] {line}")<line_sep># detect errors in the output
<if_stmt>"level=fatal"<in>line<block_start><raise>ClusterCreationError(line.strip())<block_end><block_end><except_stmt>StopIteration<block_start><break><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>logging.error(f"Could not create cluster: {e}. Cleaning up...")<line_sep>self._cleanup()<line_sep>self._destroyed=<true><line_sep><raise>e<block_end>logging.info("[K3D] The cluster has been created")<line_sep>self._status="running"<line_sep>call_in_main_thread(<lambda>:self.emit("created" self.name))<block_end><def_stmt>destroy self<arrow><none><block_start>"""
Destroy this cluster with `k3d delete`
"""<line_sep>logging.info("[K3D] Destroying cluster")<if_stmt><not>self.name<block_start><raise>EmptyClusterNameError()<block_end><if_stmt>self._destroyed<block_start><raise>ClusterDestructionError("Trying to destroy an already destroyed cluster")<block_end>args=[]<line_sep>args<augadd>[f"--name={self.name}"]<line_sep>args<augadd>["--keep-registry-volume"]<while_stmt><true><block_start><try_stmt><block_start>line=next(run_k3d_command("delete" *args))<line_sep>logging.debug(f"[K3D] {line}")<block_end><except_stmt>StopIteration<block_start><break><block_end><block_end>self._cleanup()<line_sep>self._destroyed=<true><line_sep>call_in_main_thread(<lambda>:self.emit("destroyed" self.name))<block_end><def_stmt>_cleanup self<arrow><none><block_start>"""
Cleanup any remaining things after destroying a cluster
"""<line_sep>logging.debug(f"[K3D] Cleaning up for {self.name}")<line_sep>cleanup_for_owner(self.name)<block_end>@property<def_stmt>kubeconfig self<arrow>Optional[str]<block_start>"""
Get the kubeconfig file for this cluster, or None if no
"""<if_stmt>self._destroyed<block_start><return><none><block_end># cache the kubeconfig: once obtained, it will not change
<if_stmt><not>self._kubeconfig<block_start><for_stmt>_ range(0 20)<block_start><try_stmt><block_start>line=next(run_k3d_command("get-kubeconfig" f"--name={self.name}"))<block_end><except_stmt>StopIteration<block_start><break><block_end><except_stmt>subprocess.CalledProcessError<block_start>logging.debug(f"[K3D] ... KUBECONFIG for {self.name} not ready yet...")<line_sep>time.sleep(1)<block_end><else_stmt><block_start>logging.debug(f"[K3D] ... obtained KUBECONFIG for {self.name} at {line}")<line_sep>self._kubeconfig=line<line_sep><break><block_end><block_end><block_end><return>self._kubeconfig<block_end>@property<def_stmt>running self<arrow>bool<block_start><return>self._status<eq>"running"<block_end><def_stmt>start self<arrow><none><block_start><if_stmt><not>self.running<block_start>args=[]<line_sep>args<augadd>[f"--name={self.name}"]<line_sep>logging.debug(f"[K3D] Starting {self.name}...")<while_stmt><true><block_start><try_stmt><block_start>line=next(run_k3d_command("start" *args))<line_sep>logging.debug(f"[K3D] {line}")<block_end><except_stmt>StopIteration<block_start><break><block_end><block_end><block_end><block_end><def_stmt>stop self<arrow><none><block_start><if_stmt>self.running<block_start>args=[]<line_sep>args<augadd>[f"--name={self.name}"]<line_sep>logging.debug(f"[K3D] Stopping {self.name}...")<while_stmt><true><block_start><try_stmt><block_start>line=next(run_k3d_command("stop" *args))<line_sep>logging.debug(f"[K3D] {line}")<block_end><except_stmt>StopIteration<block_start><break><block_end><block_end><block_end><block_end>@property<def_stmt>docker_created self<arrow>Optional[datetime.datetime]<block_start><if_stmt>self._destroyed<block_start><return><none><block_end><if_stmt>self._docker_created<is><none><block_start>c=self._docker.get_container_by_name(self.docker_server_name)<if_stmt>c<block_start>t=self._docker.get_container_created(c)<if_stmt>t<block_start><try_stmt><block_start>self._docker_created=parse(t)<block_end><except_stmt>Exception<as>e<block_start>logging.error(f"[K3D] could not parse time string {t}: {e}")<block_end><block_end><block_end><block_end><return>self._docker_created<block_end>@property<def_stmt>docker_server_name self<arrow>Optional[str]<block_start><if_stmt>self._destroyed<block_start><return><none><block_end><return>f"k3d-{self.name}-server"<block_end>@property<def_stmt>docker_network_name self<arrow>Optional[str]<block_start><if_stmt>self._destroyed<block_start><return><none><block_end><return>f"k3d-{self.name}"<block_end>@property<def_stmt>docker_server_ip self<arrow>Optional[str]<block_start><if_stmt>self._destroyed<block_start><return><none><block_end><if_stmt><not>self._docker_server_ip<block_start>c=self._docker.get_container_by_name(self.docker_server_name)<if_stmt>c<block_start>ip=self._docker.get_container_ip(c self.docker_network_name)<if_stmt>ip<is><none><block_start><raise>NoServerError(f"could not obtain server IP for {self.docker_server_name} in network {self.docker_network_name}")<block_end>self._docker_server_ip=ip<block_end><block_end><return>self._docker_server_ip<block_end>@property<def_stmt>dashboard_url self<arrow>Optional[str]<block_start><if_stmt>self._destroyed<block_start><return><none><block_end>ip=self.docker_server_ip<if_stmt>ip<block_start><return>f"https://{self.docker_server_ip}/"<block_end><block_end><def_stmt>check_dashboard self *args<arrow>bool<block_start>"""
Check that the Dashboard is ready
"""<try_stmt><block_start>context=ssl._create_unverified_context()<line_sep><return>urllib.request.urlopen(self.dashboard_url context=context).getcode()<block_end><except_stmt>urllib.error.URLError<as>e<block_start>logging.info(f"Error when checking {self.dashboard_url}: {e}")<line_sep><return><false><block_end><block_end><def_stmt>open_dashboard self *args<arrow><none><block_start><import_stmt>webbrowser<line_sep>u=self.dashboard_url<if_stmt>u<is><not><none><block_start>logging.debug(f"[K3D] Opening '{u}' in default web browser")<line_sep>webbrowser.open(u)<block_end><else_stmt><block_start>logging.warning(f"[K3D] No URL to open")<block_end><block_end>@property<def_stmt>script_environment self<arrow>Dict[str str]<block_start>"""
Return a dictionary with env variables for running scripts for this cluster
"""<line_sep># Note: make sure we do not return any non-string value or subprocess.run will throw an exception.
env={f"{APP_ENV_PREFIX}_CLUSTER_NAME":str(self.name) }<if_stmt><not>self._destroyed<block_start>env.update({f"{APP_ENV_PREFIX}_REGISTRY_ENABLED":"1"<if>self.use_registry<else>"" f"{APP_ENV_PREFIX}_REGISTRY_NAME":str(self.registry_name)<if>self.registry_name<else>"" f"{APP_ENV_PREFIX}_REGISTRY_PORT":str(self.registry_port)<if>self.registry_port<else>"" f"{APP_ENV_PREFIX}_MASTER_IP":str(self.docker_server_ip)<if>self.docker_server_ip<is><not><none><else>"" f"{APP_ENV_PREFIX}_KUBECONFIG":self.kubeconfig<if>self.kubeconfig<is><not><none><else>"" })<block_end><return>env<block_end><block_end>GObject.type_register(K3dCluster)<line_sep> |
# This file contains the list of API's for operations on ZTP
# @author : <NAME> (<EMAIL>)
<import_from_stmt>spytest st<import_stmt>apis.system.basic<as>basic_obj<import_stmt>utilities.utils<as>utils_obj<import_stmt>apis.system.switch_configuration<as>switch_conf_obj<import_stmt>apis.system.interface<as>intf_obj<import_stmt>apis.routing.ip<as>ip_obj<import_stmt>apis.system.reboot<as>reboot_obj<import_stmt>apis.system.boot_up<as>boot_up_obj<import_stmt>datetime<line_sep>wait_5=5<line_sep>wait_10=10<line_sep>wait_60=60<def_stmt>show_ztp_status dut expect_reboot=<false> cli_type=""<block_start>"""
Author: <NAME> (<EMAIL>)
API to show ztp status
:param dut:
:return:
"""<line_sep>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>result=dict()<line_sep>cli_type="klish"<if>cli_type<in>["rest-put" "rest-patch"]<else>cli_type<if_stmt>cli_type<not><in>["click" "klish"]<block_start>st.error("UNSUPPORTED CLI TYPE")<line_sep><return>result<block_end>command="sudo ztp status"<if>cli_type<eq>"click"<else>"show ztp-status"<line_sep>output=st.show(dut command expect_reboot=<false> type=cli_type)<line_sep>file_name=dict()<line_sep>timestamps=dict()<line_sep>#excluded_file_name = ["--sonic-mgmt--#"]
<if_stmt>output<block_start><for_stmt>row output<block_start>result["filenames"]=list()<line_sep>result["timestamps"]=list()<if_stmt>result.get("service")<block_start><pass><block_end><else_stmt><block_start>result["service"]=row.get("service" "")<block_end># if not result["source"]:
<if_stmt>result.get("source")<block_start><pass><block_end><else_stmt><block_start>result["source"]=row.get("source" "")<block_end># if not result["status"]:
<if_stmt>result.get("status")<block_start><pass><block_end><else_stmt><block_start>result["status"]=row.get("status" "")<block_end># if not result["adminmode"]:
<if_stmt>result.get("adminmode")<block_start><pass><block_end><else_stmt><block_start>result["adminmode"]=row.get("adminmode" "")<block_end># if not result["timestamp"]:
result["timestamp"]=row.get("timestamp" "")<if_stmt>row.get("filename")<block_start><if_stmt>cli_type<eq>"click"<block_start>values=row["filename"].split(":")<line_sep>file_name[values[0].strip()]=values[1].strip()<line_sep>result["filenames"].append(file_name)<block_end><elif_stmt>cli_type<eq>"klish"<block_start>file_name[row.get("filename")]=row.get("filestatus")<line_sep>result["filenames"].append(file_name)<if_stmt>row.get("filetimestamp")<block_start>timestamps.update({row.get("filename"):row.get("filetimestamp")})<line_sep>result["timestamps"].append(timestamps)<block_end><block_end><block_end># if not result["processingtext"]:
# result["processingtext"] = row["processingtext"] if "processingtext" in row and row["processingtext"] else ""
<block_end><block_end>st.debug(result)<line_sep><return>result<block_end><def_stmt>verify_ztp_config_section_from_status dut file_names=list() status="SUCCESS" cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
Author: <NAME> (<EMAIL>)
API to verify the config section
:param dut:
:param file_names:
:param status:
:return:
"""<line_sep>is_found=1<if_stmt>file_names<block_start>response=show_ztp_status(dut cli_type=cli_type)<for_stmt>file_name file_names<block_start><for_stmt>names response["filenames"]<block_start><if_stmt>names[file_name]<ne>status<block_start>is_found=0<block_end><else_stmt><block_start>is_found=1<block_end><block_end><block_end><if_stmt><not>is_found<block_start><return><false><block_end><return><true><block_end><block_end><def_stmt>_verify_ztp_status_with_retry dut retry_cnt cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
Author: <NAME> (<EMAIL>)
API to verify ZTP status with retry value
:param dut:
:param retry_cnt:
:return:
"""<line_sep>not_started_retry_cnt=0<line_sep>st.log("Verifying the ZTP status with retry method ...")<for_stmt>_ range(1 retry_cnt+1)<block_start>response=show_ztp_status(dut cli_type=cli_type)<if_stmt>response["adminmode"]<eq>"True"<block_start>st.log("Found that admin mode as {}".format(response["adminmode"]))<if_stmt>response["service"]<eq>"Inactive"<block_start>st.log("Found that service as {}".format(response["service"]))<if_stmt>response["status"]<eq>"FAILED"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><false><block_end><elif_stmt>response["status"]<eq>"SUCCESS"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><true><block_end><block_end><elif_stmt>response["service"]<eq>"Processing"<or>response["service"]<eq>"Active Discovery"<block_start>st.log("Found that service as {}".format(response["service"]))<if_stmt>response["status"]<eq>"IN-PROGRESS"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep>st.wait(3)<block_end><elif_stmt>response["status"]<eq>"FAILED"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><false><block_end><elif_stmt>response["status"]<eq>"Not Started"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep>not_started_retry_cnt<augadd>1<if_stmt>not_started_retry_cnt<ge>retry_cnt<block_start><return><false><block_end>st.wait(3)<block_end><else_stmt><block_start><return><true><block_end><block_end><elif_stmt>response["service"]<eq>"SUCCESS"<block_start>st.log("Found that service as {}".format(response["service"]))<line_sep><return><true><block_end><block_end><else_stmt><block_start>st.log("Found that ZTP is disabled hence enabling it ..")<line_sep><return><false><block_end><block_end><return><false><block_end><def_stmt>poll_ztp_status dut status=["IN-PROGRESS" "Not Started"] iteration=40 retry=3 cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
API to poll the ztp status
Author: <NAME> (<EMAIL>)
:param dut:
:param status:
:param iteration:
:param retry:
:return:
"""<line_sep>i=1<line_sep>status=list([str(e)<for>e status])<if>isinstance(status list)<else>[status]<while_stmt><true><block_start>response=show_ztp_status(dut cli_type=cli_type)<if_stmt>response["status"]<in>status<block_start>st.log("Observed {} during polling ...".format(status))<line_sep><return><true><block_end><if_stmt>i<g>iteration<block_start>st.log("Max polling interval {} exceeded ...".format(i))<line_sep><return><false><block_end>i<augadd>1<line_sep>st.wait(retry)<block_end><block_end># This function should be called with running ztp run command
<def_stmt>verify_ztp_status dut retry_cnt=0 iteration=300 retry=3 expect_reboot=<false> reboot_on_success=list() cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
Author: <NAME> (<EMAIL>)
API to verify ZTP status
:param dut:
:param retry_cnt:
:return:
"""<line_sep>retry_count_if_no_response=0<if_stmt>retry_cnt<block_start><return>_verify_ztp_status_with_retry(dut retry_cnt cli_type=cli_type)<block_end><else_stmt><block_start>st.log("Verifying the ZTP status with iteration method ...")<for_stmt>_ range(1 iteration+1)<block_start>response=show_ztp_status(dut expect_reboot=expect_reboot cli_type=cli_type)<if_stmt><not>response<block_start>st.log("Observed no response in ZTP status ... retrying {} .. ".format(retry_count_if_no_response))<if_stmt>retry_count_if_no_response<g>5<block_start>st.error("show ztp status returned empty data...")<line_sep><return><false><block_end>st.wait(retry)<line_sep>retry_count_if_no_response<augadd>1<line_sep><continue><block_end><if_stmt>"service"<not><in>response<or>"status"<not><in>response<or>"adminmode"<not><in>response<block_start>st.log("Values of service or status or adminmode is not populated yet, retrying ...")<line_sep>st.wait(10)<line_sep><continue><block_end><if_stmt>response["adminmode"]<eq>"True"<block_start><if_stmt>"service"<not><in>response<or>"status"<not><in>response<or>"adminmode"<not><in>response<block_start>st.log("Values of service or status or adminmode is not populated yet, retrying ...")<line_sep>st.wait(retry)<block_end><else_stmt># return verify_ztp_status(dut)
<block_start>st.log("Found that admin mode as {}".format(response["adminmode"]))<if_stmt>response["service"]<eq>"Inactive"<block_start>st.log("Found that service as {}".format(response["service"]))<if_stmt>response["status"]<eq>"FAILED"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><false><block_end><elif_stmt>response["status"]<eq>"SUCCESS"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><true><block_end><else_stmt><block_start>st.log("ZTP status is not in expected values , retrying...")<line_sep>st.wait(retry)<line_sep># return verify_ztp_status(dut)
<block_end><block_end><elif_stmt>response["service"]<eq>"Processing"<or>response["service"]<eq>"Active Discovery"<block_start>st.log("Found that service as {}".format(response["service"]))<if_stmt>response["status"]<eq>"IN-PROGRESS"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep>st.log("Files - {}".format(response["filenames"]))<if_stmt>reboot_on_success<and>"filenames"<in>response<and>response["filenames"]<block_start>reboot_flag=list(reboot_on_success)<if>isinstance(reboot_on_success list)<else>[reboot_on_success]<if_stmt>len(response["filenames"])<g>0<block_start>filenames=response["filenames"][0]<for_stmt>filename reboot_flag<block_start><if_stmt>filename<in>filenames<and>filenames[filename]<eq>"SUCCESS"<block_start><return><true><block_end><block_end><block_end><block_end><if_stmt>cli_type<eq>"klish"<block_start><if_stmt>len(response["filenames"])<g>0<block_start><for_stmt>key,value response["filenames"][0].items()<block_start><if_stmt>("configdb-json"<in>key<or>"graphservice"<in>key)<and>value<eq>"IN-PROGRESS"<block_start>st.wait(300)<block_end><block_end><block_end><block_end>st.wait(retry)<line_sep># return verify_ztp_status(dut)
<block_end><elif_stmt>response["status"]<eq>"FAILED"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep><return><false><block_end><elif_stmt>response["status"]<eq>"Not Started"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep>st.wait(retry)<line_sep># return verify_ztp_status(dut)
<block_end><elif_stmt>response["status"]<eq>"SUCCESS"<block_start>st.log("Found that status as {}".format(response["status"]))<line_sep>st.wait(retry)<line_sep># return verify_ztp_status(dut)
<block_end><else_stmt><block_start>st.log("ZTP status is not in expected values, retrying...")<line_sep>st.wait(retry)<block_end><block_end><elif_stmt>response["service"]<eq>"SUCCESS"<block_start>st.log("Found that service as {}".format(response["service"]))<line_sep><return><true><block_end><block_end><block_end><else_stmt><block_start>st.log("Found that ZTP is disabled hence enabling it ..")<line_sep>ztp_operations(dut "enable")<line_sep># ztp_operations(dut, "run")
# return verify_ztp_status(dut)
<block_end><block_end><return><false><block_end><block_end><def_stmt>get_ztp_timestamp_obj ztp_timestamp<block_start>"""
Author: <NAME> (<EMAIL>)
API to get ztp timestamp
:param ztp_timestamp:
:return:
"""<try_stmt><block_start><return>datetime.datetime.strptime(ztp_timestamp '%Y-%m-%d %H:%M:%S')<block_end><except_stmt>ValueError<as>e<block_start>st.error(e)<block_end><block_end><def_stmt>enable_ztp_if_disabled dut iteration=5 delay=1 cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
API to enable ztp if it is disabled, added check for enable in polling mechanism
Author: <NAME> (<EMAIL>)
:param dut:
:param iteration:
:param delay:
:return:
"""<line_sep>i=1<while_stmt><true><block_start>response=show_ztp_status(dut cli_type=cli_type)<if_stmt>"adminmode"<in>response<and>response["adminmode"]<ne>"True"<block_start>st.log("Enabling ZTP ...")<line_sep>ztp_operations(dut "enable")<line_sep><break><block_end><if_stmt>i<g>iteration<block_start>st.log("ZTP admin mode not found after max iterations ...")<line_sep><break><block_end>i<augadd>1<line_sep>st.wait(delay)<block_end>i=1<while_stmt><true><block_start>response=show_ztp_status(dut cli_type=cli_type)<if_stmt>"adminmode"<in>response<and>response["adminmode"]<eq>"True"<block_start>st.log("Admin mode enabled at {} iteration".format(i))<line_sep><return><true><block_end><if_stmt>i<g>iteration<block_start>st.log("Max iteration {} count reached ".format(i))<line_sep><return><false><block_end>i<augadd>1<line_sep>st.wait(delay)<block_end><block_end><def_stmt>ztp_operations dut operation cli_type="" max_time=0<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
Author: <NAME> (<EMAIL>)
API to do ZTP operations
:param dut:
:param operation:
:return:
"""<if_stmt>cli_type<eq>"click"<block_start>supported_opers=["run" "enable" "disable"]<if_stmt>operation<not><in>supported_opers<block_start><return><false><block_end><if_stmt>operation<in>["run" "disable"]<block_start>command="ztp {} -y".format(operation)<block_end><else_stmt><block_start>command="ztp {}".format(operation)<block_end><block_end><elif_stmt>cli_type<eq>"klish"<block_start>no_form="no"<if>operation<eq>"disable"<else>""<line_sep>command="{} ztp enable".format(no_form)<block_end>st.config(dut command type=cli_type max_time=max_time)<block_end><def_stmt>ztp_push_full_config dut cli_type=""<block_start>"""
NOT USED ANYWHERE
Author: <NAME> (<EMAIL>)
APU to push full config
:param dut:
:return:
"""<line_sep>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>config_dbjson="config_db.json"<line_sep>config_file="ztp_data_local.json"<line_sep>plugin_file_path="/etc/sonic/ztp/{}".format(config_file)<line_sep>source="/tmp/{}".format(config_dbjson)<line_sep>plugin_json={config_dbjson:{"url":{"source":"file://{}".format(source) "timeout":300} "save-config":"true"}}<line_sep>file_path=basic_obj.write_to_json_file(plugin_json)<line_sep>st.upload_file_to_dut(dut file_path plugin_file_path)<line_sep>running_config=switch_conf_obj.get_running_config(dut)<line_sep>file_path=basic_obj.write_to_json_file(running_config)<line_sep>st.upload_file_to_dut(dut file_path source)<line_sep>st.wait(wait_5)<line_sep>ztp_operations(dut "run")<line_sep>st.wait(wait_60)<line_sep>show_ztp_status(dut cli_type=cli_type)<line_sep>st.wait(wait_10)<line_sep>show_ztp_status(dut cli_type=cli_type)<block_end><def_stmt>prepare_and_write_option_67_config_string ssh_conn_obj static_ip config_path config_file dhcp_config_file type="http"<block_start>"""
NOT USED ANYWHERE
Author: <NAME> (<EMAIL>)
Common function to write option 67 to DHCP server
:param ssh_conn_obj:
:param static_ip:
:param config_path:
:param config_file:
:param dhcp_config_file:
:param type:
:return:
"""<line_sep>option_67_config="option bootfile-name"<if_stmt>type<eq>"http"<block_start>config_json_url="http://{}{}/{}".format(static_ip config_path config_file)<block_end><elif_stmt>type<eq>"tftp"<block_start>config_json_url="tftp://{}/{}/{}".format(static_ip config_path config_file)<block_end><elif_stmt>type<eq>"ftp"<block_start>config_json_url="ftp://{}/{}/{}".format(static_ip config_path config_file)<block_end>option_67_config_string='{} "{}";'.format(option_67_config config_json_url)<if_stmt><not>basic_obj.write_update_file(ssh_conn_obj option_67_config option_67_config_string dhcp_config_file)<block_start>st.log("Written content in file {} not found".format(dhcp_config_file))<line_sep>st.report_fail("content_not_found")<block_end><block_end><def_stmt>write_option_67_to_dhcp_server ssh_conn_obj data<block_start>"""
NOT USER ANY WHERE
:param ssh_conn_obj:
:param data:
:return:
"""<line_sep>option_67_config="option bootfile-name"<if_stmt>data.type<eq>"http"<block_start>config_json_url="http://{}{}/{}".format(data.static_ip data.config_path data.config_file)<block_end><elif_stmt>data.type<eq>"tftp"<block_start>config_json_url="tftp://{}/{}/{}".format(data.static_ip data.config_path data.config_file)<block_end><elif_stmt>data.type<eq>"ftp"<block_start>config_json_url="ftp://{}/{}/{}".format(data.static_ip data.config_path data.config_file)<block_end>option_67_config_string='{} "{}";'.format(option_67_config config_json_url)<if_stmt><not>basic_obj.write_update_file(ssh_conn_obj option_67_config option_67_config_string data.dhcp_config_file)<block_start>st.log("Written content in file {} not found".format(data.dhcp_config_file))<line_sep>st.report_fail("content_not_found")<block_end>basic_obj.service_operations(ssh_conn_obj data.dhcp_service_name data.action data.device)<if_stmt><not>verify_dhcpd_service_status(ssh_conn_obj data.dhcpd_pid)<block_start>st.log("{} service not running".format(data.dhcp_service_name))<line_sep>st.report_fail("service_not_running" data.dhcp_service_name)<block_end><block_end><def_stmt>config_and_verify_dhcp_option ssh_conn_obj dut ztp_params data expect_reboot=<false> reboot_on_success=list() cli_type=""<block_start>"""
Common function to configure DHCP option along with status / logs verification
Author: <NAME> (<EMAIL>)
:param ssh_conn_obj:
:param dut:
:param ztp_params:
:param data:
:return:
"""<line_sep>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>cli_type="klish"<if>cli_type<in>["rest-put" "rest-patch"]<else>cli_type<line_sep>retry_count=data.retry_count<if>"retry_count"<in>data<and>data.retry_count<else>0<line_sep>iteration=data.iteration<if>"iteration"<in>data<and>data.iteration<else>300<line_sep>delay=data.delay<if>"delay"<in>data<and>data.delay<else>3<if_stmt>"func_name"<in>data<block_start>syslog_file_names=["syslog_1_{}".format(data.func_name) "syslog_{}".format(data.func_name)]<block_end># basic_obj.copy_config_db_to_temp(dut, data.config_db_path, data.config_db_temp)
<if_stmt>"config_file_type"<in>data<and>data.config_file_type<eq>"text"<block_start>file_path="/tmp/file_temp.json"<line_sep>basic_obj.write_to_file(ssh_conn_obj data.json_content file_path device="server")<block_end><elif_stmt>"config_file_type"<in>data<and>data.config_file_type<eq>"EoL"<block_start>file_path=""<block_end><else_stmt><block_start>file_path=basic_obj.write_to_json_file(data.json_content)<block_end><if_stmt>file_path<block_start>destination_path="{}{}/{}".format(ztp_params.home_path ztp_params.config_path data.config_file)<line_sep>basic_obj.copy_file_from_client_to_server(ssh_conn_obj src_path=file_path dst_path=destination_path)<block_end><if_stmt>"config_db_location"<in>data<and>data.config_db_location<eq>"json"<block_start>st.download_file_from_dut(dut data.config_db_temp file_path)<line_sep>destination_path="{}{}/{}".format(ztp_params.home_path ztp_params.config_path data.config_db_file_name)<line_sep>basic_obj.copy_file_from_client_to_server(ssh_conn_obj src_path=file_path dst_path=destination_path)<block_end><if_stmt>"scenario"<in>data<and>data.scenario<eq>"invalid-json"<block_start>st.log("Writing invalid content to make invalid json ...")<line_sep>basic_obj.write_to_file_to_line(ssh_conn_obj "," 5 destination_path "server")<block_end><if_stmt>data.option_type<eq>"67"<block_start>st.log("Creating {} file on DHCP server ...".format(data.config_file))<line_sep>data.search_pattern=r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'<line_sep>data.option_string="option bootfile-name"<if_stmt>data.type<eq>"http"<block_start>data.option_url="http://{}{}/{}".format(data.static_ip data.config_path data.config_file)<block_end><elif_stmt>data.type<eq>"tftp"<block_start>data.option_url="tftp://{}/{}/{}".format(data.static_ip data.config_path data.config_file)<block_end><elif_stmt>data.type<eq>"ftp"<block_start>data.option_url="ftp://{}/{}/{}".format(data.static_ip data.config_path data.config_file)<block_end>write_option_to_dhcp_server(ssh_conn_obj data)<line_sep>basic_obj.service_operations(ssh_conn_obj data.dhcp_service_name data.action data.device)<if_stmt><not>verify_dhcpd_service_status(ssh_conn_obj data.dhcpd_pid)<block_start>st.log("{} service not running".format(data.dhcp_service_name))<line_sep>st.report_fail("service_not_running" data.dhcp_service_name)<block_end># write_option_67_to_dhcp_server(ssh_conn_obj, data)
<block_end>data.device_action="reboot"<if>cli_type<eq>"klish"<else>data.device_action<if_stmt>data.device_action<eq>"reboot"<block_start>reboot_type=data.reboot_type<if>"reboot_type"<in>data<and>data.reboot_type<else>"normal"<line_sep>basic_obj.remove_file(dut data.config_db_path)<line_sep>st.reboot(dut reboot_type skip_port_wait=<true>)<line_sep>st.wait_system_status(dut 500)<block_end><elif_stmt>data.device_action<eq>"run"<block_start>ztp_operations(dut data.device_action)<block_end><if_stmt>"band_type"<in>data<and>data.band_type<eq>"inband"<block_start><if_stmt><not>basic_obj.poll_for_system_status(dut)<block_start>st.log("Sytem is not ready ..")<line_sep>st.report_env_fail("system_not_ready")<block_end><if_stmt><not>basic_obj.check_interface_status(dut ztp_params.oob_port "up")<block_start>basic_obj.ifconfig_operation(dut ztp_params.oob_port "down")<block_end>interface_status=basic_obj.check_interface_status(dut ztp_params.inband_port "up")<if_stmt>interface_status<is><not><none><block_start><if_stmt><not>interface_status<block_start>intf_obj.interface_noshutdown(dut ztp_params.inband_port cli_type=cli_type)<block_end><block_end><block_end><if_stmt>"service"<in>data<and>data.service<eq>"disable"<block_start>basic_obj.service_operations_by_systemctl(dut "ztp" "stop")<if_stmt>basic_obj.verify_service_status(dut "ztp")<block_start>st.log("ZTP status is not stopped")<line_sep>st.report_fail("service_not_stopped" "ztp")<block_end>basic_obj.service_operations_by_systemctl(dut "ztp" "start")<block_end><if_stmt><not>poll_ztp_status(dut ["IN-PROGRESS" "Not Started" "SUCCESS"] cli_type=cli_type)<block_start>st.report_fail("ztp_max_polling_interval")<block_end><if_stmt>"check"<in>data<and>data.check<eq>"not"<block_start><if_stmt>verify_ztp_status(dut retry_count iteration delay cli_type=cli_type)<block_start><if_stmt>"logs_path"<in>data<and>"func_name"<in>data<block_start>capture_syslogs(dut data.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end><block_end><else_stmt><block_start>st.log("Iteration count {}".format(iteration))<line_sep>st.log("REBOOT ON SUCCESS - {}".format(reboot_on_success))<if_stmt>reboot_on_success<block_start><if_stmt>"configdb-json"<in>reboot_on_success<block_start>st.wait_system_reboot(dut)<line_sep>st.wait_system_status(dut 300)<block_end>result=verify_ztp_status(dut retry_count iteration delay expect_reboot=expect_reboot reboot_on_success=reboot_on_success cli_type=cli_type)<block_end><else_stmt><block_start>result=verify_ztp_status(dut retry_count iteration delay expect_reboot=expect_reboot cli_type=cli_type)<block_end><if_stmt><not>result<block_start><if_stmt>"logs_path"<in>data<and>"func_name"<in>data<block_start>capture_syslogs(dut data.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end><if_stmt>reboot_on_success<block_start>output=show_ztp_status(dut cli_type=cli_type)<if_stmt>output["status"]<ne>"SUCCESS"<block_start>st.wait(300 "Waiting for device to reboot after success...")<line_sep>st.wait_system_status(dut 300)<block_end># st.wait_system_reboot(dut)
<if_stmt><not>verify_ztp_status(dut retry_count iteration delay cli_type=cli_type)<block_start><if_stmt>"logs_path"<in>data<and>"func_name"<in>data<block_start>capture_syslogs(dut data.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end>st.banner(boot_up_obj.sonic_installer_list(dut))<block_end><block_end>verify_ztp_filename_logs(dut data)<if_stmt>"ztp_log_string"<in>data<and>data.ztp_log_string<block_start><if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path data.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path data.ztp_log_string))<if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path_1 data.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1 data.ztp_log_string))<line_sep>st.report_fail("ztp_log_verification_failed" data.ztp_log_path_1 data.ztp_log_string)<block_end><block_end><block_end><if_stmt>"result"<in>data<and>data.result<eq>"pass"<block_start>st.report_pass("test_case_passed")<block_end><block_end><def_stmt>write_option_239_to_dhcp_server ssh_conn_obj data<block_start>st.log("##################### Writing option 239 to dhcp config file ... ##################")<line_sep>option_239='option provision-url ='<line_sep>provisioning_script_path="http://{}{}/{}".format(data["server_ip"] data["config_path"] data["provision_script"])<line_sep>option_239_config='{} "{}";'.format(option_239 provisioning_script_path)<line_sep>option_67_config="option bootfile-name"<line_sep>basic_obj.write_update_file(ssh_conn_obj option_67_config "##" data["dhcp_config_file"])<if_stmt><not>basic_obj.write_update_file(ssh_conn_obj option_239 option_239_config data["dhcp_config_file"])<block_start>st.log("Written content in file {} not found".format(data["dhcp_config_file"]))<line_sep>st.report_fail("content_not_found")<block_end><block_end><def_stmt>write_option_225_to_dhcp_server ssh_conn_obj data<block_start>option_225="option option-225 ="<line_sep>option_225_path=data["minigraph_path"]<line_sep>option_225_config='{} "{}";'.format(option_225 option_225_path)<line_sep>option_67_config="option bootfile-name"<line_sep>option_239='option provision-url ='<line_sep>basic_obj.write_update_file(ssh_conn_obj option_67_config "##" data["dhcp_config_file"])<line_sep>basic_obj.write_update_file(ssh_conn_obj option_239 "##" data["dhcp_config_file"])<if_stmt><not>basic_obj.write_update_file(ssh_conn_obj option_225 option_225_config data["dhcp_config_file"])<block_start>st.log("Written content in file {} not found".format(data["dhcp_config_file"]))<line_sep>st.report_fail("content_not_found")<block_end><block_end><def_stmt>config_and_verify_option_225 ssh_conn_obj dut ztp_params data cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<if_stmt>data.option_type<eq>"225"<block_start><if_stmt>"func_name"<in>data<block_start>syslog_file_names=["syslog_1_{}".format(data.func_name) "syslog_{}".format(data.func_name)]<block_end>data.search_pattern=r'\s*option option-225\s*\S*\s*"\S+";'<line_sep>data.option_string="option option-225 "# "option dhcp6.boot-file-url "
data.option_url=data.minigraph_path<line_sep>data.option_type="option_67"<line_sep>clear_options_from_dhcp_server(ssh_conn_obj data)<line_sep>data.option_type="option_239"<line_sep>clear_options_from_dhcp_server(ssh_conn_obj data)<line_sep>write_option_to_dhcp_server(ssh_conn_obj data)<line_sep># write_option_225_to_dhcp_server(ssh_conn_obj, data)
basic_obj.service_operations(ssh_conn_obj data.dhcp_service_name data.action data.device)<if_stmt><not>verify_dhcpd_service_status(ssh_conn_obj data.dhcpd_pid)<block_start>st.log("{} service not running".format(data.dhcp_service_name))<line_sep>st.report_fail("service_not_running" data.dhcp_service_name)<block_end>data.device_action="reboot"<if>cli_type<eq>"klish"<else>data.device_action<if_stmt>data.device_action<eq>"reboot"<block_start>reboot_type=data.reboot_type<if>"reboot_type"<in>data<and>data.reboot_type<else>"normal"<line_sep>basic_obj.remove_file(dut data.config_db_path)<line_sep>st.reboot(dut reboot_type skip_port_wait=<true>)<line_sep>st.wait_system_status(dut 400)<block_end><elif_stmt>data.device_action<eq>"run"<block_start>ztp_operations(dut data.device_action)<block_end><if_stmt><not>verify_ztp_status(dut cli_type=cli_type)<block_start><if_stmt>"logs_path"<in>data<and>"func_name"<in>data<block_start>capture_syslogs(dut data.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end>verify_ztp_filename_logs(dut data)<if_stmt>"ztp_log_string"<in>data<and>data.ztp_log_string<block_start><if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path data.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path data.ztp_log_string))<if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path_1 data.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1 data.ztp_log_string))<line_sep>st.report_fail("ztp_log_verification_failed" data.ztp_log_path_1 data.ztp_log_string)<block_end><block_end><block_end><block_end><block_end><def_stmt>verify_ztp_attributes dut property value cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
This is to verify the ztp attributes with the provided value
Author: <NAME> (<EMAIL>)
:param dut: dut object
:param property: status, service, adminmode, filenames, timestamp, source
:param value: This is string except filenames, for file names {'03-test-plugin': 'Not Started', '02-test-plugin':
'Not Started', 'configdb-json': 'Not Started'}
:return: boolean
"""<line_sep>response=show_ztp_status(dut cli_type=cli_type)<if_stmt><not>response<block_start><return><false><block_end><if_stmt>property<in>response<block_start><if_stmt>property<eq>"filenames"<block_start>filenames=response["filenames"][0]<for_stmt>filename,status filenames<block_start><if_stmt>value[filename]<ne>status<block_start><return><false><block_end><block_end><block_end><else_stmt><block_start><if_stmt>response[property]<ne>value<block_start><return><false><block_end><block_end><block_end><else_stmt><block_start><return><false><block_end><return><true><block_end><def_stmt>verify_ztp_filename_logs dut data status="SUCCESS" condition="positive"<block_start>"""
Author: <NAME> (<EMAIL>)
API to verify logs
:param dut:
:param data:
:param status:
:return:
"""<line_sep>filenames=list([str(e)<for>e data.file_names])<if>isinstance(data.file_names list)<else>[data.file_names]<line_sep>log_msg=data.log_msg<if>"log_msg"<in>data<and>data.log_msg<else>"Checking configuration section {} result: {}"<line_sep>match=data.match<if>"match"<in>data<else>""<for_stmt>file_name filenames<block_start>log_string_1=log_msg.format(file_name status)<line_sep>st.log(log_string_1)<if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path log_string_1 match=match)<block_start><if_stmt>condition<eq>"positive"<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path log_string_1))<if_stmt><not>basic_obj.poll_for_error_logs(dut data.ztp_log_path_1 log_string_1 match=match)<block_start>st.log("ZTP log {} verification failed for message {}".format(data.ztp_log_path_1 log_string_1))<line_sep>st.report_fail("ztp_log_verification_failed" data.ztp_log_path_1 log_string_1)<block_end><else_stmt><block_start><return><true><block_end><block_end><block_end><else_stmt><block_start><return><true><block_end><block_end><block_end><def_stmt>config_ztp_backdoor_options dut ztp_cfg={"admin-mode":<true> "restart-ztp-interval":30} dut_ztp_cfg_file="/host/ztp/ztp_cfg.json"<block_start>"""
Author: <NAME> (<EMAIL>)
Function to enable backward options for ZTP
:param dut:
:param ztp_cfg:
:param dut_ztp_cfg_file:
:return:
"""<line_sep>ztp_cfg_file=basic_obj.write_to_json_file(ztp_cfg)<line_sep>st.upload_file_to_dut(dut ztp_cfg_file dut_ztp_cfg_file)<block_end><def_stmt>ztp_status_verbose dut cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
API to get the ztp status verbose output with filename and its details as we are getting the status in ztp status API
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""<line_sep>command="sudo ztp status -v"<if>cli_type<eq>"click"<else>"show ztp-status"<if_stmt>cli_type<eq>"click"<block_start><return>st.show(dut command type=cli_type)<block_end><else_stmt><block_start><return>show_ztp_status(dut cli_type=cli_type)<block_end><block_end><def_stmt>verify_plugin_chronological_order dut cli_type=""<block_start>cli_type=st.get_ui_type(dut cli_type=cli_type)<line_sep>"""
API to verify the plugin chronological order of ztp status
Author: <NAME> (<EMAIL>)
:param dut:
:return:
"""<line_sep>st.log("Verifying timestamp for chronological order ... ")<line_sep>output=ztp_status_verbose(dut cli_type=cli_type)<line_sep>data=list()<if_stmt>cli_type<eq>"click"<block_start><for_stmt>val output<block_start>data.append(val["filetimestamp"])<block_end><block_end><else_stmt><block_start><for_stmt>val output["timestamps"]<block_start><for_stmt>_,timestamp val.items()<block_start>data.append(timestamp)<block_end><block_end>data.sort()<block_end><for_stmt>i,_ enumerate(data)<block_start><if_stmt>i+1<l>len(data)<block_start>result=utils_obj.date_time_delta(data[i] data[i+1] <true>)<line_sep>st.log(result)<if_stmt>result[0]<l>0<or>result[1]<l>0<block_start>st.log("Observed timestamp difference is not as expected ...")<line_sep><return><false><block_end><block_end><block_end><return><true><block_end><def_stmt>verify_dhclient_on_interface dut search_string interface expected_count=2<block_start>"""
API to verify DHCLIENT on provided interface using ps aux command
Author: <NAME> (<EMAIL>)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""<line_sep>st.log("Verifying dhclient for {} interface".format(interface))<line_sep>ps_aux=basic_obj.get_ps_aux(dut search_string)<line_sep># if len(ps_aux) != expected_count:
st.log("Observed {} DHCLIENT entries on {} interface".format(len(ps_aux) interface))<line_sep># return False
dhclient_str="/run/dhclient.{}.pid".format(interface)<if_stmt><not>ps_aux<block_start>st.error("DHCLIENT process not found on DUT ...")<line_sep><return><false><block_end><for_stmt>entry ps_aux<block_start><if_stmt>dhclient_str<in>entry["command"]<block_start>st.log("Required dhclient is found ...")<line_sep><return><true><block_end><block_end><return><false><block_end><def_stmt>create_required_folders conn_obj path_list<block_start>"""
API to create folders as per the provided path in bulk
:param dut:
:param path:
:return:
"""<line_sep>path_list=[path_list]<if>type(path_list)<is>str<else>list([str(e)<for>e path_list])<for_stmt>path path_list<block_start>basic_obj.make_dir(conn_obj path "server")<line_sep>basic_obj.change_permissions(conn_obj path 777 "server")<block_end><block_end><def_stmt>config_dhcpv6_options ssh_conn_obj ztp_params config_params options=dict() cli_type=""<block_start>"""
Common function to configure dhcpv6 options and verify the result on both inband and out of band interfaces
:param ssh_conn_obj:
:param ztp_params:
:param config_params:
:param options:
:return:
"""<line_sep>cli_type=st.get_ui_type(config_params.dut cli_type=cli_type)<line_sep>retry_count=config_params.retry_count<if>"retry_count"<in>config_params<and>config_params.retry_count<else>0<line_sep>iteration=config_params.iteration<if>"iteration"<in>config_params<and>config_params.iteration<else>300<line_sep>delay=config_params.delay<if>"delay"<in>config_params<and>config_params.delay<else>3<line_sep>expect_reboot=<true><if>"expect_reboot"<in>options<and>options["expect_reboot"]<else><false><line_sep>st.log(config_params)<if_stmt>"func_name"<in>config_params<block_start>syslog_file_names=["syslog_1_{}".format(config_params.func_name) "syslog_{}".format(config_params.func_name)]<block_end><if_stmt>"json_content"<in>config_params<block_start>file_path=basic_obj.write_to_json_file(config_params.json_content)<line_sep>st.log(file_path)<if_stmt>file_path<block_start>destination_path="{}{}/{}".format(config_params.home_path ztp_params.config_path config_params.ztp_file)<line_sep>st.log(destination_path)<line_sep>basic_obj.copy_file_from_client_to_server(ssh_conn_obj src_path=file_path dst_path=destination_path)<block_end><block_end>config_params.option_59_url="http://[{}]{}/{}".format(config_params.static_ip ztp_params.config_path config_params.ztp_file)<line_sep>config_params.search_pattern=r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'<line_sep>write_option_59_to_dhcp_server(ssh_conn_obj config_params)<line_sep>basic_obj.service_operations(ssh_conn_obj config_params.dhcp6_service_name "restart" "server")<if_stmt><not>verify_dhcpd_service_status(ssh_conn_obj config_params.dhcpd6_pid)<block_start>st.log("{} service is running which is not expected".format(config_params.dhcp6_service_name))<line_sep>st.report_fail("service_running_not_expected" config_params.dhcp6_service_name)<block_end>reboot_type=config_params.reboot_type<if>"reboot_type"<in>config_params<and>config_params.reboot_type<else>"normal"<if_stmt>"ztp_operation"<in>config_params<block_start>config_params.ztp_operation="reboot"<if>cli_type<eq>"klish"<else>config_params.ztp_operation<if_stmt>config_params.ztp_operation<eq>"reboot"<block_start>basic_obj.remove_file(config_params.dut config_params.config_db_path)<line_sep>st.reboot(config_params.dut reboot_type skip_port_wait=<true>)<block_end><elif_stmt>config_params.ztp_operation<eq>"run"<block_start>ztp_operations(config_params.dut config_params.ztp_operation)<block_end><block_end><else_stmt><block_start>st.log("ZTP operation is not mentioned hence rebooting the device ...")<line_sep>basic_obj.remove_file(config_params.dut config_params.config_db_path)<line_sep>st.reboot(config_params.dut reboot_type skip_port_wait=<true>)<block_end><if_stmt>"reboot_on_success"<in>options<and>options["reboot_on_success"]<block_start>result=verify_ztp_status(config_params.dut retry_count iteration delay expect_reboot=expect_reboot reboot_on_success=options["reboot_on_success"] cli_type=cli_type)<block_end><else_stmt><block_start>result=verify_ztp_status(config_params.dut retry_count iteration delay expect_reboot=expect_reboot cli_type=cli_type)<block_end><if_stmt><not>result<block_start><if_stmt>"logs_path"<in>config_params<and>"func_name"<in>config_params<block_start>capture_syslogs(config_params.dut config_params.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end><if_stmt>"reboot_on_success"<in>options<and>options["reboot_on_success"]<block_start>reboot_obj.config_reload(config_params.dut)<line_sep>st.wait(5)<if_stmt><not>ip_obj.ping(config_params.dut config_params.static_ip family="ipv6")<block_start>st.log("Pinging to DHCP server failed from DUT, issue either with DUT or server")<line_sep># intf_obj.enable_dhcp_on_interface(config_params.dut, config_params.network_port, "v6")
<block_end><if_stmt><not>verify_ztp_status(config_params.dut retry_count iteration delay cli_type=cli_type)<block_start><if_stmt>"logs_path"<in>config_params<and>"func_name"<in>config_params<block_start>capture_syslogs(config_params.dut config_params.logs_path syslog_file_names)<block_end>st.log("ZTP status verification failed")<line_sep>st.report_fail("ztp_status_verification_failed")<block_end><block_end>verify_ztp_filename_logs(config_params.dut config_params)<if_stmt>"ztp_log_string"<in>config_params<and>config_params.ztp_log_string<block_start><if_stmt><not>basic_obj.poll_for_error_logs(config_params.dut config_params.ztp_log_path config_params.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path config_params.ztp_log_string))<if_stmt><not>basic_obj.poll_for_error_logs(config_params.dut config_params.ztp_log_path_1 config_params.ztp_log_string)<block_start>st.log("ZTP log {} verification failed for message {}".format(config_params.ztp_log_path_1 config_params.ztp_log_string))<line_sep>st.report_fail("ztp_log_verification_failed" config_params.ztp_log_path_1 config_params.ztp_log_string)<block_end><block_end><block_end><if_stmt>"result"<in>config_params<and>config_params.result<eq>"pass"<block_start>st.report_pass("test_case_passed")<block_end><block_end><def_stmt>write_option_59_to_dhcp_server connection_obj data<block_start>"""
API to add option 59 in DHCP config file.
:param connection_obj:
:param data:
:return:
"""<line_sep>line_number=basic_obj.get_file_number_with_regex(connection_obj data.search_pattern data.dhcp_config_file)<line_sep>option_59="option dhcp6.boot-file-url "<line_sep>option_59_path=data["option_59_url"]<line_sep>option_59_config="'{} \"{}\";'".format(option_59 option_59_path)<if_stmt>line_number<ge>0<block_start>basic_obj.delete_line_using_line_number(connection_obj line_number data.dhcp_config_file)<block_end>basic_obj.write_to_file(connection_obj option_59_config data.dhcp_config_file device="server")<line_sep># else:
# basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)
# basic_obj.write_to_file_to_line(connection_obj, option_59_config, line_number, data.dhcp_config_file, device="server")
line_number=basic_obj.get_file_number_with_regex(connection_obj data.search_pattern data.dhcp_config_file)<if_stmt>line_number<le>0<block_start>st.log("Written content in file {} not found".format(data["dhcp_config_file"]))<line_sep>st.report_fail("content_not_found")<block_end><block_end><def_stmt>write_option_to_dhcp_server connection_obj data<block_start>"""
Common API to write matched line with new one
:param connection_obj:
:param data:
:return:
"""<line_sep>line_number=basic_obj.get_file_number_with_regex(connection_obj data.search_pattern data.dhcp_config_file)<line_sep>option=data.option_string# "option dhcp6.boot-file-url "
option_path=data.option_url<line_sep>st.log("#####LINE NUMBER{}".format(line_number))<line_sep>option_config="'{} \"{}\";'".format(option option_path)<if_stmt>int(line_number)<g>0# line_number = data.line_number if line_number in data else 60
<block_start>basic_obj.delete_line_using_line_number(connection_obj line_number data.dhcp_config_file)<block_end>basic_obj.write_to_file(connection_obj option_config data.dhcp_config_file device="server")<line_sep># basic_obj.write_to_file_to_line(connection_obj, option_config, line_number, data.dhcp_config_file, device="server")
line_number=basic_obj.get_file_number_with_regex(connection_obj data.search_pattern data.dhcp_config_file)<line_sep>st.log("#####LINE NUMBER{}".format(line_number))<if_stmt>line_number<le>0<block_start>st.log("Written content in file {} not found".format(data["dhcp_config_file"]))<line_sep>st.report_fail("content_not_found")<block_end><block_end><def_stmt>clear_options_from_dhcp_server connection_obj data<block_start>st.log("Clearing OPTIONS from DHCP server")<line_sep>option=""<if_stmt>"option_type"<in>data<and>data.option_type<eq>"option_67"<block_start>option=r'\s*option\s+bootfile-name\s*\S*\s*"\S+";'<block_end><elif_stmt>"option_type"<in>data<and>data.option_type<eq>"option_239"<block_start>option=r'\s*option\s+provision-url\s*\S*\s*"\S+";'<block_end><elif_stmt>"option_type"<in>data<and>data.option_type<eq>"option_59"<block_start>option=r'\s*option\s+dhcp6.boot-file-url\s+"\S+";'<block_end><elif_stmt>"option_type"<in>data<and>data.option_type<eq>"option_225"<block_start>option=r'\s*option option-225\s*\S*\s*"\S+";'<block_end>st.log("OPTION is {}".format(option))<line_sep>st.log("CONFIG FILE is {}".format(data.dhcp_config_file))<if_stmt>option<block_start>line_number=basic_obj.get_file_number_with_regex(connection_obj option data.dhcp_config_file)<if_stmt>line_number<g>0<block_start>basic_obj.delete_line_using_line_number(connection_obj line_number data.dhcp_config_file)<block_end><block_end><block_end><def_stmt>verify_dhcpd_service_status dut process_id<block_start>"""
API to verify DHCLIENT on provided interface using ps aux command
Author: <NAME> (<EMAIL>)
:param dut:
:param search_string:
:param interface:
:param expected_count:
:return:
"""<line_sep>st.log("Verifying DHCPD for {} ".format(process_id))<line_sep>dhcpd_pid="/run/dhcp-server/{}".format(process_id)<line_sep>ps_aux=basic_obj.get_ps_aux(dut dhcpd_pid device="server")<line_sep>st.log(ps_aux)<line_sep>config_string=""<if_stmt>process_id<eq>"dhcpd6.pid"<block_start>config_string="-cf /etc/dhcp/dhcpd6.conf"<block_end><if_stmt>process_id<eq>"dhcpd.pid"<block_start>config_string="-cf /etc/dhcp/dhcpd.conf"<block_end>st.log("Verifying the output with {}".format(config_string))<if_stmt>config_string<not><in>ps_aux<block_start>st.log("Required DHCPD service not found ...")<line_sep><return><false><block_end><return><true><block_end><def_stmt>capture_syslogs dut destination_path file_name<block_start>file_names=list(file_name)<if>isinstance(file_name list)<else>[file_name]<line_sep>syslog_paths=["/var/log/syslog.1" "/var/log/syslog"]<for_stmt>i,syslog_path enumerate(syslog_paths)<block_start>dst_file="{}/{}".format(destination_path file_names[i])<line_sep>st.download_file_from_dut(dut syslog_path dst_file)<block_end><return><true><block_end> |
<import_stmt>sys<line_sep>__all__=['IntegerTypes' 'StringTypes']<if_stmt>sys.version_info<l>(3 )<block_start>IntegerTypes=(int long)<line_sep>StringTypes=(str unicode)<line_sep>long=long<import_stmt>__builtin__<as>builtins<block_end><else_stmt><block_start>IntegerTypes=(int )<line_sep>StringTypes=(str )<line_sep>long=int<import_stmt>builtins<block_end> |
<import_stmt>aioredis<import_stmt>trafaret<as>t<import_stmt>yaml<import_from_stmt>aiohttp web<line_sep>CONFIG_TRAFARET=t.Dict({t.Key('redis'):t.Dict({'port':t.Int() 'host':t.String() 'db':t.Int() 'minsize':t.Int() 'maxsize':t.Int() }) 'host':t.IP 'port':t.Int() })<def_stmt>load_config fname<block_start><with_stmt>open(fname 'rt')<as>f<block_start>data=yaml.load(f)<block_end><return>CONFIG_TRAFARET.check(data)<block_end><async_keyword><def_stmt>init_redis conf loop<block_start>pool=<await>aioredis.create_redis_pool((conf['host'] conf['port']) minsize=conf['minsize'] maxsize=conf['maxsize'] loop=loop )<line_sep><return>pool<block_end>CHARS="abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789"<def_stmt>encode num alphabet=CHARS<block_start><if_stmt>num<eq>0<block_start><return>alphabet[0]<block_end>arr=[]<line_sep>base=len(alphabet)<while_stmt>num<block_start>num,rem=divmod(num base)<line_sep>arr.append(alphabet[rem])<block_end>arr.reverse()<line_sep><return>''.join(arr)<block_end>ShortifyRequest=t.Dict({t.Key('url'):t.URL})<def_stmt>fetch_url data<block_start><try_stmt><block_start>data=ShortifyRequest(data)<block_end><except_stmt>t.DataError<block_start><raise>web.HTTPBadRequest('URL is not valid')<block_end><return>data['url']<block_end> |
### tensorflow==2.3.1
<import_stmt>tensorflow<as>tf<line_sep># Float16 Quantization - Input/Output=float32
height=384<line_sep>width=384<line_sep>converter=tf.lite.TFLiteConverter.from_saved_model('saved_model')<line_sep>converter.optimizations=[tf.lite.Optimize.DEFAULT]<line_sep>converter.target_spec.supported_types=[tf.float16]<line_sep>tflite_model=converter.convert()<with_stmt>open('midas_{}x{}_float16_quant.tflite'.format(height width) 'wb')<as>w<block_start>w.write(tflite_model)<block_end>print('Float16 Quantization complete! - midas_{}x{}_float16_quant.tflite'.format(height width))<line_sep> |
<import_stmt>sys<import_from_stmt>antlr4 *<import_from_stmt>ChatParser ChatParser<import_from_stmt>ChatListener ChatListener<import_from_stmt>antlr4.error.ErrorListener *<import_stmt>io<class_stmt>ChatErrorListener(ErrorListener)<block_start><def_stmt>__init__ self output<block_start>self.output=output<line_sep>self._symbol=''<block_end><def_stmt>syntaxError self recognizer offendingSymbol line column msg e<block_start>self.output.write(msg)<if_stmt>offendingSymbol<is><not><none><block_start>self._symbol=offendingSymbol.text<block_end><else_stmt><block_start>self._symbol=recognizer.getTokenErrorDisplay(offendingSymbol)<line_sep><block_end><block_end>@property<def_stmt>symbol self<block_start><return>self._symbol<block_end><block_end> |
<import_from_stmt>utils *<import_stmt>torch<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>time<import_stmt>torchvision<import_from_stmt>torch.autograd Variable<import_stmt>torchvision.transforms<as>transforms<import_stmt>torchvision.datasets<as>datasets<def_stmt>validate_pgd val_loader model criterion K step configs logger save_image=<false> HE=<false># Mean/Std for normalization
<block_start>mean=torch.Tensor(np.array(configs.TRAIN.mean)[: np.newaxis np.newaxis])<line_sep>mean=mean.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep>std=torch.Tensor(np.array(configs.TRAIN.std)[: np.newaxis np.newaxis])<line_sep>std=std.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep># Initiate the meters
batch_time=AverageMeter()<line_sep>losses=AverageMeter()<line_sep>top1=AverageMeter()<line_sep>top5=AverageMeter()<line_sep>eps=configs.ADV.clip_eps<line_sep>model.eval()<line_sep>end=time.time()<line_sep>logger.info(pad_str(' PGD eps: {}, K: {}, step: {} '.format(eps K step)))<if_stmt>HE<eq><true><block_start>is_HE='_HE'<block_end><else_stmt><block_start>is_HE=''<block_end><if_stmt>configs.pretrained<block_start>is_HE='_pretrained'<block_end><for_stmt>i,(input target) enumerate(val_loader)<block_start>input=input.cuda(non_blocking=<true>)<line_sep>target=target.cuda(non_blocking=<true>)<line_sep>#save original images
<if_stmt>save_image<eq><true><and>i<l>2<block_start>original_images_save=input.clone()<for_stmt>o range(input.size(0))<block_start>torchvision.utils.save_image(original_images_save[o : : :] 'saved_images/original_images'+is_HE+'/{}.png'.format(o+configs.DATA.batch_size<times>i))<block_end><block_end>randn=torch.FloatTensor(input.size()).uniform_(-eps eps).cuda()<line_sep>input<augadd>randn<line_sep>input.clamp_(0 1.0)<line_sep>orig_input=input.clone()<for_stmt>_ range(K)<block_start>invar=Variable(input requires_grad=<true>)<line_sep>in1=invar-mean<line_sep>in1.div_(std)<line_sep>output=model(in1)<line_sep>ascend_loss=criterion(output target)<line_sep>ascend_grad=torch.autograd.grad(ascend_loss invar)[0]<line_sep>pert=fgsm(ascend_grad step)<line_sep># Apply purturbation
input<augadd>pert.data<line_sep>input=torch.max(orig_input-eps input)<line_sep>input=torch.min(orig_input+eps input)<line_sep>input.clamp_(0 1.0)<block_end>#save adv images
<if_stmt>save_image<eq><true><and>i<l>2<block_start>adv_images_save=input.clone()<for_stmt>o range(input.size(0))<block_start>torchvision.utils.save_image(adv_images_save[o : : :] 'saved_images/adv_images'+is_HE+'/{}.png'.format(o+configs.DATA.batch_size<times>i))<block_end><block_end>#save scaled perturbation
perturbation=input-orig_input<line_sep>perturbation.clamp_(-eps eps)<line_sep>scaled_perturbation=(perturbation.clone()+eps)/(2<times>eps)<line_sep>scaled_perturbation.clamp_(0 1.0)<if_stmt>save_image<eq><true><and>i<l>2<block_start><for_stmt>o range(input.size(0))<block_start>torchvision.utils.save_image(scaled_perturbation[o : : :] 'saved_images/scaled_perturbation'+is_HE+'/{}.png'.format(o+configs.DATA.batch_size<times>i))<block_end><block_end>input.sub_(mean).div_(std)<with_stmt>torch.no_grad()# compute output
<block_start>output=model(input)<line_sep>loss=criterion(output target)<line_sep># measure accuracy and record loss
prec1,prec5=accuracy(output target topk=(1 5))<line_sep>losses.update(loss.item() input.size(0))<line_sep>top1.update(prec1[0] input.size(0))<line_sep>top5.update(prec5[0] input.size(0))<line_sep># measure elapsed time
batch_time.update(time.time()-end)<line_sep>end=time.time()<if_stmt>i%configs.TRAIN.print_freq<eq>0<block_start>print('PGD Test: [{0}/{1}]\t'<concat>'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'<concat>'Loss {loss.val:.4f} ({loss.avg:.4f})\t'<concat>'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'<concat>'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i len(val_loader) batch_time=batch_time loss=losses top1=top1 top5=top5))<line_sep>sys.stdout.flush()<block_end><block_end><block_end>print(' PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1 top5=top5))<line_sep><return>top1.avg<block_end><def_stmt>validate val_loader model criterion configs logger# Mean/Std for normalization
<block_start>mean=torch.Tensor(np.array(configs.TRAIN.mean)[: np.newaxis np.newaxis])<line_sep>mean=mean.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep>std=torch.Tensor(np.array(configs.TRAIN.std)[: np.newaxis np.newaxis])<line_sep>std=std.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep># Initiate the meters
batch_time=AverageMeter()<line_sep>losses=AverageMeter()<line_sep>top1=AverageMeter()<line_sep>top5=AverageMeter()<line_sep># switch to evaluate mode
model.eval()<line_sep>end=time.time()<for_stmt>i,(input target) enumerate(val_loader)<block_start><with_stmt>torch.no_grad()<block_start>input=input.cuda(non_blocking=<true>)<line_sep>target=target.cuda(non_blocking=<true>)<line_sep># compute output
input=input-mean<line_sep>input.div_(std)<line_sep>output=model(input)<line_sep>loss=criterion(output target)<line_sep># measure accuracy and record loss
prec1,prec5=accuracy(output target topk=(1 5))<line_sep>losses.update(loss.item() input.size(0))<line_sep>top1.update(prec1[0] input.size(0))<line_sep>top5.update(prec5[0] input.size(0))<line_sep># measure elapsed time
batch_time.update(time.time()-end)<line_sep>end=time.time()<if_stmt>i%configs.TRAIN.print_freq<eq>0<block_start>print('Test: [{0}/{1}]\t'<concat>'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'<concat>'Loss {loss.val:.4f} ({loss.avg:.4f})\t'<concat>'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'<concat>'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i len(val_loader) batch_time=batch_time loss=losses top1=top1 top5=top5))<line_sep>sys.stdout.flush()<block_end><block_end><block_end>print(' Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1 top5=top5))<line_sep><return>top1.avg<block_end><def_stmt>validate_ImagetNet_C val_loader_name model criterion configs logger# Mean/Std for normalization
<block_start>mean=torch.Tensor(np.array(configs.TRAIN.mean)[: np.newaxis np.newaxis])<line_sep>mean=mean.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep>std=torch.Tensor(np.array(configs.TRAIN.std)[: np.newaxis np.newaxis])<line_sep>std=std.expand(3 configs.DATA.crop_size configs.DATA.crop_size).cuda()<line_sep># switch to evaluate mode
model.eval()<line_sep>fil_index=['/1' '/2' '/3' '/4' '/5']<line_sep>avg_return=0<for_stmt>f fil_index<block_start>valdir=os.path.join(configs.data val_loader_name+f)<line_sep>print(' File: ' valdir)<line_sep>val_loader=torch.utils.data.DataLoader(datasets.ImageFolder(valdir transforms.Compose([transforms.Resize(configs.DATA.img_size) transforms.CenterCrop(configs.DATA.crop_size) transforms.ToTensor() ])) batch_size=configs.DATA.batch_size shuffle=<false> num_workers=configs.DATA.workers pin_memory=<true>)<line_sep># Initiate the meters
top1=AverageMeter()<line_sep>end=time.time()<for_stmt>i,(input target) enumerate(val_loader)<block_start><with_stmt>torch.no_grad()<block_start>input=input.cuda(non_blocking=<true>)<line_sep>target=target.cuda(non_blocking=<true>)<line_sep># compute output
input=input-mean<line_sep>input.div_(std)<line_sep>output=model(input)<line_sep># measure accuracy and record loss
prec1,_=accuracy(output target topk=(1 2))<line_sep>top1.update(prec1[0] input.size(0))<line_sep># if i % configs.TRAIN.print_freq == 0:
# print('PGD Test: [{0}/{1}]\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader),top1=top1))
# print('Time: ', time.time() - end)
# sys.stdout.flush()
<block_end><block_end>print('Prec: ' top1.avg.cpu().item())<line_sep>avg_return<augadd>top1.avg.cpu().item()<block_end>print('Avergae Classification Accuracy is: ' avg_return/5.)<line_sep><return><block_end> |
<import_stmt>torch<class_stmt>LossScaler<block_start><def_stmt>__init__ self scale=1<block_start>self.cur_scale=scale<block_end># `params` is a list / generator of torch.Variable
<def_stmt>has_overflow self params<block_start><return><false><block_end># `x` is a torch.Tensor
<def_stmt>_has_inf_or_nan x<block_start><return><false><block_end># `overflow` is boolean indicating whether we overflowed in gradient
<def_stmt>update_scale self overflow<block_start><pass><block_end>@property<def_stmt>loss_scale self<block_start><return>self.cur_scale<block_end><def_stmt>scale_gradient self module grad_in grad_out<block_start><return>tuple(self.loss_scale<times>g<for>g grad_in)<block_end><def_stmt>backward self loss<block_start>scaled_loss=loss<times>self.loss_scale<line_sep>scaled_loss.backward()<block_end><block_end><class_stmt>DynamicLossScaler<block_start><def_stmt>__init__ self init_scale=2<power>32 scale_factor=2. scale_window=1000<block_start>self.cur_scale=init_scale<line_sep>self.cur_iter=0<line_sep>self.last_overflow_iter=-1<line_sep>self.scale_factor=scale_factor<line_sep>self.scale_window=scale_window<block_end># `params` is a list / generator of torch.Variable
<def_stmt>has_overflow self params# return False
<block_start><for_stmt>p params<block_start><if_stmt>p.grad<is><not><none><and>DynamicLossScaler._has_inf_or_nan(p.grad.data)<block_start><return><true><block_end><block_end><return><false><block_end># `x` is a torch.Tensor
<def_stmt>_has_inf_or_nan x<block_start>inf_count=torch.sum(x.abs()<eq>float('inf'))<if_stmt>inf_count<g>0<block_start><return><true><block_end>nan_count=torch.sum(x<ne>x)<line_sep><return>nan_count<g>0<block_end># `overflow` is boolean indicating whether we overflowed in gradient
<def_stmt>update_scale self overflow<block_start><if_stmt>overflow#self.cur_scale /= self.scale_factor
<block_start>self.cur_scale=max(self.cur_scale/self.scale_factor 1)<line_sep>self.last_overflow_iter=self.cur_iter<block_end><else_stmt><block_start><if_stmt>(self.cur_iter-self.last_overflow_iter)%self.scale_window<eq>0<block_start>self.cur_scale<augmul>self.scale_factor<block_end><block_end># self.cur_scale = 1
self.cur_iter<augadd>1<block_end>@property<def_stmt>loss_scale self<block_start><return>self.cur_scale<block_end><def_stmt>scale_gradient self module grad_in grad_out<block_start><return>tuple(self.loss_scale<times>g<for>g grad_in)<block_end><def_stmt>backward self loss<block_start>scaled_loss=loss<times>self.loss_scale<line_sep>scaled_loss.backward()<block_end><block_end> |
<def_stmt>cog <block_start><return>"Cog is alive."<block_end> |
<import_from_stmt>.client Client<class_stmt>Stats(Client)<block_start><def_stmt>__init__ self api_key='YourApiKeyToken'<block_start>Client.__init__(self address='' api_key=api_key)<line_sep>self.url_dict[self.MODULE]='stats'<block_end><def_stmt>get_total_ether_supply self<block_start>self.url_dict[self.ACTION]='ethsupply'<line_sep>self.build_url()<line_sep>req=self.connect()<line_sep><return>req['result']<block_end><def_stmt>get_ether_last_price self<block_start>self.url_dict[self.ACTION]='ethprice'<line_sep>self.build_url()<line_sep>req=self.connect()<line_sep><return>req['result']<block_end><block_end> |
<import_stmt>MiniNero<import_stmt>ed25519<import_stmt>binascii<import_stmt>PaperWallet<import_stmt>cherrypy<import_stmt>os<import_stmt>time<import_stmt>bitmonerod<import_stmt>SimpleXMR2<import_stmt>SimpleServer<line_sep>message="send0d000114545737471em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"<line_sep>message="send0d0114545747771em2WCg9QKxRxbo6S3xKF2K4UDvdu6hMc"<line_sep>sec=raw_input("sec?")<line_sep>print(SimpleServer.Signature(message sec))<line_sep> |
<import_stmt>os<import_stmt>pytest<import_from_stmt>django.urls reverse<import_from_stmt>seahub.options.models UserOptions KEY_FORCE_2FA VAL_FORCE_2FA <import_from_stmt>seahub.test_utils BaseTestCase<import_from_stmt>seahub.two_factor.models TOTPDevice devices_for_user<line_sep>TRAVIS='TRAVIS'<in>os.environ<line_sep>@pytest.mark.skipif(TRAVIS reason="")<class_stmt>TwoFactorAuthViewTest(BaseTestCase)<block_start><def_stmt>setUp self<block_start>self.login_as(self.admin)<block_end><def_stmt>test_can_disable_two_factor_auth self<block_start>totp=TOTPDevice(user=self.admin name="" confirmed=1)<line_sep>totp.save()<line_sep>devices=devices_for_user(self.admin)<line_sep>i=0<for_stmt>device devices_for_user(self.admin)<block_start><if_stmt>device<block_start>i<augadd>1<block_end><block_end><assert_stmt>i<g>0<line_sep>resp=self.client.delete(reverse('two-factor-auth-view' args=[str(self.admin.username)]))<assert_stmt>resp.status_code<eq>200<line_sep>i=0<for_stmt>device devices_for_user(self.admin)<block_start><if_stmt>device<block_start>i<augadd>1<block_end><block_end><assert_stmt>i<eq>0<block_end><def_stmt>tearDown self<block_start><try_stmt><block_start><for_stmt>device devices_for_user(self.admin)<block_start>device.delete()<block_end><block_end><except_stmt><block_start><pass><block_end><block_end><def_stmt>test_force_2fa self<block_start><assert_stmt>len(UserOptions.objects.filter(email=self.user.email option_key=KEY_FORCE_2FA))<eq>0<line_sep>resp=self.client.put(reverse('two-factor-auth-view' args=[self.user.username]) 'force_2fa=1' 'application/x-www-form-urlencoded' )<line_sep>self.assertEqual(200 resp.status_code)<assert_stmt>len(UserOptions.objects.filter(email=self.user.email option_key=KEY_FORCE_2FA))<eq>1<line_sep>resp=self.client.put(reverse('two-factor-auth-view' args=[self.user.username]) 'force_2fa=0' 'application/x-www-form-urlencoded' )<line_sep>self.assertEqual(200 resp.status_code)<assert_stmt>len(UserOptions.objects.filter(email=self.user.email option_key=KEY_FORCE_2FA))<eq>0<block_end><block_end> |
# CircuitPlaygroundExpress_LightSensor
# reads the on-board light sensor and graphs the brighness with NeoPixels
<import_stmt>time<import_from_stmt>adafruit_circuitplayground.express cpx<import_from_stmt>simpleio map_range<line_sep>cpx.pixels.brightness=0.05<while_stmt><true># light value remaped to pixel position
<block_start>peak=map_range(cpx.light 10 325 0 9)<line_sep>print(cpx.light)<line_sep>print(int(peak))<for_stmt>i range(0 9 1)<block_start><if_stmt>i<le>peak<block_start>cpx.pixels[i]=(0 255 0)<block_end><else_stmt><block_start>cpx.pixels[i]=(0 0 0)<block_end><block_end>time.sleep(0.01)<block_end> |
<import_stmt>inspect<import_from_stmt>unittest.mock Mock<import_from_stmt>_pytest.monkeypatch MonkeyPatch<import_from_stmt>rasa.core.policies.ted_policy TEDPolicy<import_from_stmt>rasa.engine.training fingerprinting<import_from_stmt>rasa.nlu.classifiers.diet_classifier DIETClassifier<import_from_stmt>rasa.nlu.selectors.response_selector ResponseSelector<import_from_stmt>tests.engine.training.test_components FingerprintableText<def_stmt>test_fingerprint_stays_same <block_start>key1=fingerprinting.calculate_fingerprint_key(TEDPolicy TEDPolicy.get_default_config() {"input":FingerprintableText("Hi")} )<line_sep>key2=fingerprinting.calculate_fingerprint_key(TEDPolicy TEDPolicy.get_default_config() {"input":FingerprintableText("Hi")} )<assert_stmt>key1<eq>key2<block_end><def_stmt>test_fingerprint_changes_due_to_class <block_start>key1=fingerprinting.calculate_fingerprint_key(DIETClassifier TEDPolicy.get_default_config() {"input":FingerprintableText("Hi")} )<line_sep>key2=fingerprinting.calculate_fingerprint_key(ResponseSelector TEDPolicy.get_default_config() {"input":FingerprintableText("Hi")} )<assert_stmt>key1<ne>key2<block_end><def_stmt>test_fingerprint_changes_due_to_config <block_start>key1=fingerprinting.calculate_fingerprint_key(TEDPolicy {} {"input":FingerprintableText("Hi")} )<line_sep>key2=fingerprinting.calculate_fingerprint_key(ResponseSelector TEDPolicy.get_default_config() {"input":FingerprintableText("Hi")} )<assert_stmt>key1<ne>key2<block_end><def_stmt>test_fingerprint_changes_due_to_inputs <block_start>key1=fingerprinting.calculate_fingerprint_key(TEDPolicy {} {"input":FingerprintableText("Hi")} )<line_sep>key2=fingerprinting.calculate_fingerprint_key(ResponseSelector TEDPolicy.get_default_config() {"input":FingerprintableText("bye")} )<assert_stmt>key1<ne>key2<block_end><def_stmt>test_fingerprint_changes_due_to_changed_source monkeypatch:MonkeyPatch<block_start>key1=fingerprinting.calculate_fingerprint_key(TEDPolicy {} {"input":FingerprintableText("Hi")} )<line_sep>get_source_mock=Mock(return_value="other implementation")<line_sep>monkeypatch.setattr(inspect inspect.getsource.__name__ get_source_mock)<line_sep>key2=fingerprinting.calculate_fingerprint_key(TEDPolicy {} {"input":FingerprintableText("Hi")} )<assert_stmt>key1<ne>key2<line_sep>get_source_mock.assert_called_once_with(TEDPolicy)<block_end> |
<import_from_stmt>.assembler TsvAssembler<line_sep> |
<import_from_stmt>IPython get_ipython<import_from_stmt>IPython.display display<def_stmt>is_ipynb <block_start><return>type(get_ipython()).__module__.startswith('ipykernel.')<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.