content stringlengths 0 1.55M |
|---|
"""Various utility functions."""<import_stmt>ast<import_stmt>collections.abc<import_stmt>typing<as>t<import_stmt>typed_ast.ast3<as>typed_ast3<def_stmt>dict_mirror dict_:dict<block_start><return>{value:key<for>key,value dict_.items()<if>value<is><not><none>}<block_end><def_stmt>flatten_sequence sequence:t.MutableSequence[t.Any]<arrow><none><block_start>"""Transform a given list of lists of lists (...) of lists into a flat list in-place."""<assert_stmt>isinstance(sequence collections.abc.MutableSequence) type(sequence)<for_stmt>i,elem enumerate(sequence)<block_start><if_stmt>isinstance(elem collections.abc.MutableSequence)<block_start>flatten_sequence(elem)<for_stmt>value reversed(elem)<block_start>sequence.insert(i value)<block_end><del_stmt>sequence[i+len(elem)]<block_end><block_end><block_end><def_stmt>make_flatten_syntax ast_module<block_start><def_stmt>flatten_syntax syntax:t.Union[ast_module.AST t.MutableSequence[t.Any]]<arrow><none><block_start>"""Flatten all lists of lists within the given syntax in-place."""<if_stmt>isinstance(syntax (ast_module.Module ast_module.FunctionDef ast_module.ClassDef ast_module.For ast_module.While ast_module.If ast_module.With ast_module.Try ast_module.ExceptHandler ast_module.AsyncFunctionDef ast_module.AsyncFor ast_module.AsyncWith))<block_start><for_stmt>node syntax.body<block_start>flatten_syntax(node)<block_end>flatten_sequence(syntax.body)<line_sep><return><block_end><if_stmt>isinstance(syntax (ast_module.For ast_module.While ast_module.If ast_module.Try ast_module.AsyncFor))<block_start><for_stmt>node syntax.orelse<block_start>flatten_syntax(node)<block_end>flatten_sequence(syntax.orelse)<line_sep><return><block_end><if_stmt>isinstance(syntax ast_module.Try)<block_start><for_stmt>node syntax.handlers<block_start>flatten_syntax(node)<block_end># flatten_sequence(syntax.handlers) # unnecessary
<for_stmt>node syntax.finalbody<block_start>flatten_syntax(node)<block_end>flatten_sequence(syntax.finalbody)<line_sep><return><block_end><if_stmt><not>isinstance(syntax collections.abc.MutableSequence)<block_start><return><block_end><for_stmt>node syntax<block_start>flatten_syntax(node)<block_end>flatten_sequence(syntax)<block_end><return>flatten_syntax<block_end>flatten_syntax={ast_module:make_flatten_syntax(ast_module)<for>ast_module (ast typed_ast3)}<line_sep> |
<import_from_stmt>evaluation_functions *<line_sep>flags=tf.app.flags<line_sep>data_dir='/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/'<line_sep>model_path='/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/tests/primary/old/no_flip/basic/'<concat>'tests_lr_primary_basic_no_flip/0.01/model/deep_heatmaps-80000'<line_sep># define paths
flags.DEFINE_string('img_dir' data_dir 'data directory')<line_sep>flags.DEFINE_string('test_data' 'test' 'test set to use full/common/challenging/test/art')<line_sep>flags.DEFINE_string('model_path' model_path 'model path')<line_sep># parameters used to train network
flags.DEFINE_string('network_type' 'Primary' 'network architecture Fusion/Primary')<line_sep>flags.DEFINE_integer('image_size' 256 'image size')<line_sep>flags.DEFINE_integer('c_dim' 3 'color channels')<line_sep>flags.DEFINE_integer('num_landmarks' 68 'number of face landmarks')<line_sep>flags.DEFINE_integer('scale' 1 'scale for image normalization 255/1/0')<line_sep>flags.DEFINE_float('margin' 0.25 'margin for face crops - % of bb size')<line_sep>flags.DEFINE_string('bb_type' 'gt' "bb to use - 'gt':for ground truth / 'init':for face detector output")<line_sep># choose batch size and debug data size
flags.DEFINE_integer('batch_size' 2 'batch size')<line_sep>flags.DEFINE_bool('debug' <true> 'run in debug mode - use subset of the data')<line_sep>flags.DEFINE_integer('debug_data_size' 4 'subset data size to test in debug mode')<line_sep># statistics parameters
flags.DEFINE_float('max_error' 0.08 'error threshold to be considered as failure')<line_sep>flags.DEFINE_bool('save_log' <true> 'save statistics to log_dir')<line_sep>flags.DEFINE_string('log_path' 'logs/nme_statistics' 'directory for saving NME statistics')<line_sep>FLAGS=flags.FLAGS<def_stmt>main _# create directories if not exist
<block_start><if_stmt><not>tf.gfile.Exists(FLAGS.log_path)<block_start>tf.gfile.MakeDirs(FLAGS.log_path)<block_end>err=evaluate_heatmap_network(model_path=FLAGS.model_path network_type=FLAGS.network_type img_path=FLAGS.img_dir test_data=FLAGS.test_data batch_size=FLAGS.batch_size image_size=FLAGS.image_size margin=FLAGS.margin bb_type=FLAGS.bb_type c_dim=FLAGS.c_dim scale=FLAGS.scale num_landmarks=FLAGS.num_landmarks debug=FLAGS.debug debug_data_size=FLAGS.debug_data_size)<line_sep>print_nme_statistics(errors=err model_path=FLAGS.model_path network_type=FLAGS.network_type test_data=FLAGS.test_data max_error=FLAGS.max_error save_log=FLAGS.save_log log_path=FLAGS.log_path)<block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.app.run()<block_end> |
<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>PIL Image<import_from_stmt>timeit default_timer<as>timer<import_stmt>utils<import_from_stmt>utils contents_of_bbox features_from_image<import_from_stmt>similarity load_brands_compute_cutoffs similar_matches similarity_cutoff draw_matches<def_stmt>detect_logo yolo img_path save_img save_img_path='./' postfix=''<block_start>"""
Call YOLO logo detector on input image, optionally save resulting image.
Args:
yolo: keras-yolo3 initialized YOLO instance
img_path: path to image file
save_img: bool to save annotated image
save_img_path: path to directory where to save image
postfix: string to add to filenames
Returns:
prediction: list of bounding boxes in format (xmin,ymin,xmax,ymax,class_id,confidence)
image: unaltered input image as (H,W,C) array
"""<try_stmt><block_start>image=Image.open(img_path)<if_stmt>image.mode<ne>"RGB"<block_start>image=image.convert("RGB")<block_end>image_array=np.array(image)<block_end><except_stmt><block_start>print('File Open Error! Try again!')<line_sep><return><none> <none><block_end>prediction,new_image=yolo.detect_image(image)<line_sep>img_out=postfix.join(os.path.splitext(os.path.basename(img_path)))<if_stmt>save_img<block_start>new_image.save(os.path.join(save_img_path img_out))<block_end><return>prediction image_array<block_end><def_stmt>match_logo img_test prediction model_preproc outtxt input_features_cdf_cutoff_labels save_img save_img_path='./' timing=<false><block_start>"""
Given an a path to an image and a list of predicted bounding boxes,
extract features and check each against input brand features. Declare
a match if the cosine similarity is smaller than an input-dependent
cutoff. Draw and annotate resulting boxes on image.
Args:
img_test: input image
prediction: bounding box candidates
model_preproc: (model, preprocess) tuple of the feature extractor model
and the preprocessing function to be applied to image before the model
input_features_cdf_cutoff_labels = (feat_input, sim_cutoff, bins, cdf_list, input_labels)
tuple of lists related to input brand, giving pre-computed features,
similarity cutoffs, cumulative similarity distribution and relative bins
specifications, and labels to be drawn when matches are found.
save_img: bool flag to save annotated image
save_img_path: path to directory where to save image
timing: bool flag to output timing information for each step, make plot
Returns:
outtxt: one line detailing input file path and resulting matched bounding
boxes, space-separated in format
(xmin,ymin,xmax,ymax,class_label,logo_confidence,similarity_percentile)
timing: timing for each step of the pipeline, namely image read, logog candidate
extraction, feature computation, matching to input brands
(optional, only if timing=True)
"""<line_sep>start=timer()<line_sep>model,my_preprocess=model_preproc<line_sep>feat_input,sim_cutoff,bins,cdf_list,input_labels=input_features_cdf_cutoff_labels<line_sep># from PIL image to np array
#img_test = np.array(image)
# img_test = cv2.imread(img_path) # could be removed by passing previous PIL image
t_read=timer()-start<line_sep>candidates,i_candidates_too_small=contents_of_bbox(img_test prediction)<line_sep># filter predicted bboxes to discard small logos
prediction=[pred<for>i,pred enumerate(prediction)<if>i<not><in>i_candidates_too_small]<line_sep>t_box=timer()-start<line_sep>features_cand=features_from_image(candidates model my_preprocess)<line_sep>t_feat=timer()-start<line_sep>matches,cos_sim=similar_matches(feat_input features_cand sim_cutoff bins cdf_list)<line_sep>t_match=timer()-start<line_sep>img_path=outtxt<for_stmt>idx matches<block_start>bb=prediction[idx]<line_sep>label=input_labels[matches[idx][0]]<line_sep>print('Logo #{} - {} {} - classified as {} {:.2f}'.format(idx tuple(bb[:2]) tuple(bb[2:4]) label matches[idx][1]))<line_sep>outtxt<augadd>' {},{},{},{},{},{:.2f},{:.3f}'.format(*bb[:4] label bb[-1] matches[idx][1])<block_end>outtxt<augadd>'\n'<line_sep>new_img=draw_matches(img_test input_labels prediction matches)<line_sep>t_draw=timer()-start<if_stmt>save_img<eq><true><block_start>save_img_path=os.path.abspath(save_img_path)<line_sep>saved=Image.fromarray(new_img).save(os.path.join(save_img_path os.path.basename(img_path)))<line_sep># save with opencv, remember to flip RGB->BGR
# saved = cv2.imwrite(os.path.join(save_img_path, os.path.basename(img_path)), new_img[...,::-1])
<block_end>t_save=timer()-start<if_stmt>timing<block_start><return>outtxt (t_read t_box-t_read t_feat-t_box t_match-t_feat t_draw-t_match t_save-t_draw)<block_end><return>outtxt<block_end><def_stmt>detect_video yolo video_path output_path=""<block_start><import_stmt>cv2<line_sep>vid=cv2.VideoCapture(video_path)<if_stmt><not>vid.isOpened()<block_start><raise>IOError("Couldn't open video")<block_end>video_FourCC=cv2.VideoWriter_fourcc(*'mp4v')#int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps=vid.get(cv2.CAP_PROP_FPS)<line_sep>video_size=(int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))<line_sep>isOutput=<true><if>output_path<ne>""<else><false><if_stmt>isOutput<block_start>print(output_path video_FourCC video_fps video_size)<line_sep>out=cv2.VideoWriter(output_path video_FourCC video_fps video_size)<block_end>accum_time=0<line_sep>curr_fps=0<line_sep>fps="FPS: ??"<line_sep>prev_time=timer()<while_stmt>vid.isOpened()<block_start>return_value,frame=vid.read()<if_stmt><not>return_value<block_start><break><block_end># opencv images are BGR, translate to RGB
frame=frame[: : ::-1]<line_sep>image=Image.fromarray(frame)<line_sep>out_pred,image=yolo.detect_image(image)<line_sep>result=np.asarray(image)<if_stmt>isOutput<block_start>out.write(result[: : ::-1])<block_end><block_end>vid.release()<line_sep>out.release()<line_sep>yolo.close_session()<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># Calo geometry service model
#
# removed by tommaso
#
#ECAL conditions
# include "CalibCalorimetry/EcalTrivialCondModules/data/EcalTrivialCondRetriever.cfi"
#
#TPG condition needed by ecalRecHit producer if TT recovery is ON
<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalRecHitTPGConditions_cff *<line_sep>#ECAL reconstruction
<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalWeightUncalibRecHit_cfi *<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalFixedAlphaBetaFitUncalibRecHit_cfi *<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalRecHit_cff *<line_sep>ecalRecHit.cpu.EBuncalibRecHitCollection='ecalFixedAlphaBetaFitUncalibRecHit:EcalUncalibRecHitsEB'<line_sep>ecalRecHit.cpu.EEuncalibRecHitCollection='ecalFixedAlphaBetaFitUncalibRecHit:EcalUncalibRecHitsEE'<line_sep>ecalRecHit.cpu.ChannelStatusToBeExcluded=['kDAC' 'kNoLaser' 'kNoisy' 'kNNoisy' 'kFixedG6' 'kFixedG1' 'kFixedG0' 'kNonRespondingIsolated' 'kDeadVFE' 'kDeadFE' 'kNoDataNoTP']<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalPreshowerRecHit_cfi *<import_from_stmt>RecoLocalCalo.EcalRecProducers.ecalDetIdToBeRecovered_cfi *<line_sep>ecalLocalRecoTaskCosmics=cms.Task(ecalFixedAlphaBetaFitUncalibRecHit ecalWeightUncalibRecHit ecalDetIdToBeRecovered ecalCalibratedRecHitTask ecalPreshowerRecHit)<line_sep>ecalLocalRecoSequenceCosmics=cms.Sequence(ecalLocalRecoTaskCosmics)<line_sep> |
<import_stmt>pytest<import_from_stmt>lightbus.config.structure make_transport_selector_structure ApiConfig RootConfig<line_sep>pytestmark=pytest.mark.unit<def_stmt>test_make_transport_config_structure <block_start>EventTransportSelector=make_transport_selector_structure("event")<assert_stmt>"redis"<in>EventTransportSelector.__annotations__<block_end><def_stmt>test_make_api_config_structure <block_start><assert_stmt>"event_transport"<in>ApiConfig.__annotations__<assert_stmt>"rpc_transport"<in>ApiConfig.__annotations__<assert_stmt>"result_transport"<in>ApiConfig.__annotations__<assert_stmt>"validate"<in>ApiConfig.__annotations__<block_end><def_stmt>test_root_config_service_name <block_start>service_name=RootConfig().service_name<assert_stmt>service_name<assert_stmt>type(service_name)<eq>str<assert_stmt>len(service_name)<g>3<line_sep># No format parameters in there, should have been formatted upon instantiation
<assert_stmt>"{"<not><in>service_name<block_end><def_stmt>test_root_config_process_name <block_start>process_name=RootConfig().process_name<assert_stmt>process_name<assert_stmt>type(process_name)<eq>str<assert_stmt>len(process_name)<g>3<line_sep># No format parameters in there, should have been formatted upon instantiation
<assert_stmt>"{"<not><in>process_name<block_end> |
"""
Input/Output, Files, and Filesystem
"""<import_from_stmt>mathics.version __version__# noqa used in loading to check consistency.
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>pileupVtxDigitizer=cms.PSet(accumulatorType=cms.string("PileupVertexAccumulator") hitsProducer=cms.string('generator') vtxTag=cms.InputTag("generatorSmeared") vtxFallbackTag=cms.InputTag("generator") makeDigiSimLinks=cms.untracked.bool(<false>) saveVtxTimes=cms.bool(<false>))<import_from_stmt>Configuration.Eras.Modifier_phase2_timing_cff phase2_timing<line_sep>phase2_timing.toModify(pileupVtxDigitizer saveVtxTimes=cms.bool(<true>))<line_sep> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RNN models of BasisNet with LSTM cells.
It implements a RNN wrapper with specialized LSTM cell with bases
for the kernels.
"""<import_stmt>functools<import_from_stmt>typing Optional<import_stmt>tensorflow<as>tf<line_sep>CLIENT_SZIE=500000<class_stmt>BasisRNNLayer(tf.keras.layers.Layer)<block_start>"""A RNN wrapper using LSTM cell with Basis kernels."""<def_stmt>__init__ self cell num_units num_basis recurrent_initializer kernel_initializer return_sequences=<false><block_start>super().__init__()<line_sep>self.rnn_cell=cell(num_units=num_units num_basis=num_basis recurrent_initializer=recurrent_initializer kernel_initializer=kernel_initializer)<line_sep>self.rnn=tf.keras.layers.RNN(self.rnn_cell return_sequences=return_sequences)<block_end><def_stmt>call self input_tensor<block_start><return>self.rnn(input_tensor)<block_end><block_end><class_stmt>BasisLSTMCell(tf.keras.layers.Layer)<block_start>"""A LSTM cell with Basis kernels."""<def_stmt>__init__ self num_units num_basis kernel_initializer recurrent_initializer word_emb_size=96 use_bias=<true> activation=<none> <block_start>"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
num_basis: The number of bases to learn.
kernel_initializer: The initializer of the input/output kernels.
recurrent_initializer: The initializer of the recurrent kernels.
word_emb_size: The word embedding size.
use_bias: Add bias or not.
activation: Activation function of the inner states. Default: `tanh`.
"""<line_sep>super().__init__()<line_sep>self._num_basis=num_basis<line_sep>self.kernel_initializer=kernel_initializer<line_sep>self.recurrent_initializer=recurrent_initializer<line_sep>self._num_units=num_units<line_sep>self.word_emb_size=word_emb_size<line_sep>self.activation=activation<or>tf.tanh<line_sep>self.recurrent_activation=tf.sigmoid<line_sep>self.use_bias=use_bias<block_end><def_stmt>build self input_shape# the basis embedding is concatenated to the input embedding,
# then split out in call().
<block_start>weight_shape=[self.word_emb_size self._num_basis 4<times>self._num_units]<line_sep>self.basis_kernel=self.add_weight(shape=weight_shape name='kernel' initializer=self.kernel_initializer )<line_sep>self.basis_recurrent_kernel=self.add_weight(shape=(self._num_units self._num_basis self._num_units<times>4) name='recurrent_kernel' initializer=self.recurrent_initializer )<line_sep>self.bias=tf.Variable([0.0]<times>weight_shape[-1] name='bias')<block_end>@property<def_stmt>state_size self<block_start><return>tf.compat.v1.nn.rnn_cell.LSTMStateTuple(self._num_units self._num_units)<block_end>@property<def_stmt>output_size self<block_start><return>self._num_units<block_end><def_stmt>compose_basis self c_prob<block_start>"""Compose bases into a kernel."""<line_sep>composed_kernel=tf.keras.backend.sum(tf.expand_dims(self.basis_kernel 0)<times>c_prob axis=2)<line_sep>composed_recurrent_kernel=tf.keras.backend.sum(tf.expand_dims(self.basis_recurrent_kernel 0)<times>c_prob axis=2)<line_sep><return>composed_kernel composed_recurrent_kernel<block_end><def_stmt>_compute_carry_and_output_fused self z c_tm1<block_start>"""Computes carry and output using fused kernels."""<line_sep>z0,z1,z2,z3=z<line_sep>i=self.recurrent_activation(z0)<line_sep>f=self.recurrent_activation(z1)<line_sep>c=f<times>c_tm1+i<times>self.activation(z2)<line_sep>o=self.recurrent_activation(z3)<line_sep><return>c o<block_end><def_stmt>call self inputs states<block_start>h_tm1=states[0]# previous memory state
c_tm1=states[1]# previous carry state
inputs,c_prob=tf.split(inputs [self.word_emb_size self._num_basis] -1)<line_sep>c_prob=tf.reshape(c_prob [-1 1 self._num_basis 1])<line_sep>composed_kernel,composed_recurrent_kernel=self.compose_basis(c_prob)<line_sep># inputs:
# [batch_size, 1, self.word_emb_size]
# composed_kernel:
# [batch_size, self.word_emb_size, self._num_units]
# outputs (need to be squeezed):
# [batch_size, 1, self._num_units]
z=tf.matmul(tf.expand_dims(inputs 1) composed_kernel)<line_sep>z<augadd>tf.matmul(tf.expand_dims(h_tm1 1) composed_recurrent_kernel)<if_stmt>self.use_bias<block_start>z=tf.keras.backend.bias_add(z self.bias)<block_end>z=tf.squeeze(z)<line_sep>z=tf.split(z num_or_size_splits=4 axis=1)<line_sep>c,o=self._compute_carry_and_output_fused(z c_tm1)<line_sep>h=o<times>self.activation(c)<line_sep><return>h [h c]<block_end><block_end><class_stmt>TransposableEmbedding(tf.keras.layers.Layer)<block_start>"""A Keras layer implements a transposed projection for output."""<def_stmt>__init__ self embedding_layer<block_start>super().__init__()<line_sep>self.embeddings=embedding_layer.embeddings<block_end># Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
<def_stmt>call self inputs<block_start><return>tf.matmul(inputs self.embeddings transpose_b=<true>)<block_end><block_end><def_stmt>create_basis_recurrent_model vocab_size=10000 num_oov_buckets=1 embedding_size=96 latent_size=670 num_basis=1 seqeunce_length=20 name='rnn' shared_embedding=<false> global_embedding_only=<false> seed=0<block_start>"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_basis: The number of bases to learn.
seqeunce_length: The seqeunce length of an input.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
global_embedding_only: use the global embedding only or not.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""<line_sep>extended_vocab_size=vocab_size+3+num_oov_buckets# For pad/bos/eos/oov.
input_x=tf.keras.layers.Input(shape=(<none> ) name='input_x')<line_sep>input_id=tf.keras.layers.Input(shape=(1 ) dtype=tf.int64 name='input_id')<line_sep>input_embedding=tf.keras.layers.Embedding(input_dim=extended_vocab_size output_dim=embedding_size mask_zero=<true> embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed) )<line_sep>embedded=input_embedding(input_x)<line_sep>projected=embedded<line_sep># Somehow if the vocabulary size is too small,
# no out-of-range error will be reported and the model is still good
basis_embeddings=tf.keras.layers.Embedding(CLIENT_SZIE num_basis name='client_embedding')<if_stmt>global_embedding_only# using id = 0 for the global embedding
<block_start>basis_vec=basis_embeddings(tf.zeros_like(input_id))<block_end><else_stmt><block_start>basis_vec=basis_embeddings(input_id)<block_end># [batch_size, 1, num_basis]
basis_vec=tf.reshape(basis_vec shape=[-1 1 num_basis])<line_sep>basis_prob=tf.keras.layers.Softmax()(basis_vec)<line_sep>basis_tensor=tf.tile(basis_prob tf.constant([1 seqeunce_length 1] tf.int32))<line_sep>projected=tf.concat([projected basis_tensor] -1)<line_sep>recurrent_initializer=tf.keras.initializers.Orthogonal(seed=seed)<line_sep>kernel_initializer=tf.keras.initializers.HeNormal(seed=seed)<line_sep>lstm_layer_builder=functools.partial(BasisRNNLayer cell=BasisLSTMCell num_units=latent_size num_basis=num_basis recurrent_initializer=recurrent_initializer kernel_initializer=kernel_initializer return_sequences=<true> )<line_sep>dense_layer_builder=functools.partial(tf.keras.layers.Dense kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))<line_sep>layer=lstm_layer_builder()<line_sep>processed=layer(projected)<line_sep># A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer=dense_layer_builder(units=embedding_size)<line_sep>projected=dense_layer(processed)<line_sep>projected=tf.concat([projected basis_tensor] -1)<if_stmt>shared_embedding<block_start>transposed_embedding=TransposableEmbedding(input_embedding)<line_sep>logits=transposed_embedding(projected)<block_end><else_stmt><block_start>final_dense_layer=dense_layer_builder(units=extended_vocab_size activation=<none>)<line_sep>logits=final_dense_layer(projected)<block_end><return>tf.keras.Model(inputs=[input_x input_id] outputs=logits name=name)<block_end><def_stmt>create_recurrent_model vocab_size=10000 num_oov_buckets=1 embedding_size=96 latent_size=670 num_layers=1 name='rnn' shared_embedding=<false> seed=0<block_start>"""Constructs zero-padded keras model with the given parameters and cell.
Args:
vocab_size: Size of vocabulary to use.
num_oov_buckets: Number of out of vocabulary buckets.
embedding_size: The size of the embedding.
latent_size: The size of the recurrent state.
num_layers: The number of layers.
name: (Optional) string to name the returned `tf.keras.Model`.
shared_embedding: (Optional) Whether to tie the input and output
embeddings.
seed: A random seed governing the model initialization and layer randomness.
If set to `None`, No random seed is used.
Returns:
`tf.keras.Model`.
"""<line_sep>extended_vocab_size=vocab_size+3+num_oov_buckets# For pad/bos/eos/oov.
input_x=tf.keras.layers.Input(shape=(<none> ) name='input_x')<line_sep># To be consistent with BasisNet pipeline, not using client id
input_id=tf.keras.layers.Input(shape=(1 ) dtype=tf.int64 name='input_id')<line_sep>input_embedding=tf.keras.layers.Embedding(input_dim=extended_vocab_size output_dim=embedding_size mask_zero=<true> embeddings_initializer=tf.keras.initializers.RandomUniform(seed=seed) )<line_sep>embedded=input_embedding(input_x)<line_sep>projected=embedded<line_sep>lstm_layer_builder=functools.partial(tf.keras.layers.LSTM units=latent_size return_sequences=<true> recurrent_initializer=tf.keras.initializers.Orthogonal(seed=seed) kernel_initializer=tf.keras.initializers.HeNormal(seed=seed))<line_sep>dense_layer_builder=functools.partial(tf.keras.layers.Dense kernel_initializer=tf.keras.initializers.GlorotNormal(seed=seed))<for_stmt>_ range(num_layers)<block_start>layer=lstm_layer_builder()<line_sep>processed=layer(projected)<line_sep># A projection changes dimension from rnn_layer_size to input_embedding_size
dense_layer=dense_layer_builder(units=embedding_size)<line_sep>projected=dense_layer(processed)<block_end><if_stmt>shared_embedding<block_start>transposed_embedding=TransposableEmbedding(input_embedding)<line_sep>logits=transposed_embedding(projected)<block_end><else_stmt><block_start>final_dense_layer=dense_layer_builder(units=extended_vocab_size activation=<none>)<line_sep>logits=final_dense_layer(projected)<block_end><return>tf.keras.Model(inputs=[input_x input_id] outputs=logits name=name)<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>nose.tools eq_<import_from_stmt>anytree AnyNode<import_from_stmt>anytree Node<import_from_stmt>anytree NodeMixin<import_from_stmt>anytree PostOrderIter<import_from_stmt>anytree PreOrderIter<import_from_stmt>anytree SymlinkNode<def_stmt>test_symlink <block_start>root=Node("root")<line_sep>s0=Node("sub0" parent=root)<line_sep>s0b=Node("sub0B" parent=s0)<line_sep>s0a=Node("sub0A" parent=s0)<line_sep>s1=Node("sub1" parent=root foo=4)<line_sep>s1a=Node("sub1A" parent=s1)<line_sep>s1b=Node("sub1B" parent=s1)<line_sep>s1c=Node("sub1C" parent=s1)<line_sep>s1ca=Node("sub1Ca" parent=s1c)<line_sep>ln=SymlinkNode(s1 parent=root blub=17)<line_sep>l0=Node("l0" parent=ln)<line_sep>eq_(root.parent <none>)<line_sep>eq_(root.children tuple([s0 s1 ln]))<line_sep>eq_(s0.parent root)<line_sep>eq_(s0.children tuple([s0b s0a]))<line_sep>eq_(s0b.parent s0)<line_sep>eq_(s0b.children tuple())<line_sep>eq_(s0a.parent s0)<line_sep>eq_(s0a.children tuple())<line_sep>eq_(s1.parent root)<line_sep>eq_(s1.children tuple([s1a s1b s1c]))<line_sep>eq_(s1.foo 4)<line_sep>eq_(s1a.parent s1)<line_sep>eq_(s1a.children tuple())<line_sep>eq_(s1b.parent s1)<line_sep>eq_(s1b.children tuple())<line_sep>eq_(s1c.parent s1)<line_sep>eq_(s1c.children tuple([s1ca]))<line_sep>eq_(s1ca.parent s1c)<line_sep>eq_(s1ca.children tuple())<line_sep>eq_(ln.parent root)<line_sep>eq_(ln.children tuple([l0]))<line_sep>eq_(ln.foo 4)<line_sep>eq_(s1.blub 17)<line_sep>eq_(ln.blub 17)<line_sep>ln.bar=9<line_sep>eq_(ln.bar 9)<line_sep>eq_(s1.bar 9)<line_sep>result=[node.name<for>node PreOrderIter(root)]<line_sep>eq_(result ['root' 'sub0' 'sub0B' 'sub0A' 'sub1' 'sub1A' 'sub1B' 'sub1C' 'sub1Ca' 'sub1' 'l0'])<line_sep>result=[node.name<for>node PostOrderIter(root)]<line_sep>eq_(result ['sub0B' 'sub0A' 'sub0' 'sub1A' 'sub1B' 'sub1Ca' 'sub1C' 'sub1' 'l0' 'sub1' 'root'])<block_end> |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: tile"""<import_stmt>akg.tvm<import_stmt>akg.topi<import_stmt>akg.utils<as>utils<line_sep>@utils.check_input_type(akg.tvm.tensor.Tensor (list tuple) (str type(<none>)))<def_stmt>Tile data multiples target=utils.CCE<block_start>"""
Repeats the data in the specified dimensions according to the multiples.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
multiples (Union[list, tuple]): Elements must be int. The number of repetitions.
Returns:
tvm.tensor.Tensor, has the same dtype as data.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""<line_sep>utils.check_supported_target(target)<line_sep>shape=[x.value<for>x data.shape]<line_sep>dtype=data.dtype<line_sep>utils.check_shape(shape)<line_sep>utils.ops_dtype_check(dtype utils.DtypeForDavinci.ALL_TYPES)<line_sep>utils.check_int_list(multiples "multiples")<line_sep>output=akg.topi.tile(data multiples)<line_sep><return>output<block_end> |
<import_stmt>io<import_from_stmt>base64 b64decode<import_stmt>disnake<import_from_stmt>disnake.ext commands<class_stmt>Misc(commands.Cog)<block_start><def_stmt>__init__ self bot<block_start>self.bot:commands.Bot=bot<block_end><def_stmt>_get_file self description:str<arrow>disnake.File# just a white 100x100 png
<block_start>data=b64decode("<KEY>")<line_sep><return>disnake.File(io.BytesIO(data) "image.png" description=description)<block_end>@commands.slash_command()<async_keyword><def_stmt>attachment_desc self inter:disnake.AppCmdInter desc:str="test"<arrow><none><block_start>"""
Send an attachment with the given description (or the default)
Parameters
----------
desc: The attachment description
"""<line_sep><await>inter.response.send_message(file=self._get_file(desc))<block_end>@commands.slash_command()<async_keyword><def_stmt>attachment_desc_edit self inter:disnake.AppCmdInter desc:str="test"<arrow><none><block_start>"""
Send a message with a button, which sends an attachment with the given description (or the default)
Parameters
----------
desc: The attachment description
"""<line_sep>button=disnake.ui.Button(label="edit")<line_sep>button.callback=<lambda>interaction:interaction.response.edit_message(file=self._get_file(desc))<line_sep>view=disnake.ui.View()<line_sep>view.add_item(button)<line_sep><await>inter.response.send_message("." view=view)<block_end><block_end><def_stmt>setup bot<block_start>bot.add_cog(Misc(bot))<line_sep>print(f"> Extension {__name__} is ready\n")<block_end> |
<import_from_stmt>lyrebird config<import_from_stmt>pathlib Path<import_stmt>json<import_stmt>codecs<def_stmt>test_create tmpdir<block_start>custom_config={"myconf":"myval"}<line_sep>conf_path=Path(tmpdir)/'conf.json'<with_stmt>codecs.open(conf_path 'w' 'utf-8')<as>f<block_start>f.write(json.dumps(custom_config indent=4 ensure_ascii=<false>))<block_end>cm=config.ConfigManager(conf_path=conf_path)<assert_stmt>str(cm.conf_file)<eq>str(tmpdir)+'/conf.json'<assert_stmt>cm.conf_file.exists()<assert_stmt>cm.config<assert_stmt>cm.config['myconf']<eq>'myval'<block_end> |
<import_from_stmt>.cellassign CellAssign<import_from_stmt>.gimvi GIMVI<import_from_stmt>.solo SOLO<import_from_stmt>.stereoscope RNAStereoscope SpatialStereoscope<line_sep>__all__=["SOLO" "GIMVI" "RNAStereoscope" "SpatialStereoscope" "CellAssign"]<line_sep> |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_future_stmt> annotations<import_stmt>io<import_stmt>struct<class_stmt>BinWrapper<block_start>"""A utility binary-reader wrapper over any io.BytesIO object."""<line_sep>i64=struct.Struct('!q')<line_sep>i32=struct.Struct('!l')<line_sep>i16=struct.Struct('!h')<line_sep>i8=struct.Struct('!b')<line_sep>ui64=struct.Struct('!Q')<line_sep>ui32=struct.Struct('!L')<line_sep>ui16=struct.Struct('!H')<line_sep>ui8=struct.Struct('!B')<def_stmt>__init__ self buf:io.BytesIO<arrow><none><block_start>self.buf=buf<block_end><def_stmt>write_ui64 self val:int<arrow><none><block_start>self.buf.write(self.ui64.pack(val))<block_end><def_stmt>write_ui32 self val:int<arrow><none><block_start>self.buf.write(self.ui32.pack(val))<block_end><def_stmt>write_ui16 self val:int<arrow><none><block_start>self.buf.write(self.ui16.pack(val))<block_end><def_stmt>write_ui8 self val:int<arrow><none><block_start>self.buf.write(self.ui8.pack(val))<block_end><def_stmt>write_i64 self val:int<arrow><none><block_start>self.buf.write(self.i64.pack(val))<block_end><def_stmt>write_i32 self val:int<arrow><none><block_start>self.buf.write(self.i32.pack(val))<block_end><def_stmt>write_i16 self val:int<arrow><none><block_start>self.buf.write(self.i16.pack(val))<block_end><def_stmt>write_i8 self val:int<arrow><none><block_start>self.buf.write(self.i8.pack(val))<block_end><def_stmt>write_len32_prefixed_bytes self val:bytes<arrow><none><block_start>self.write_ui32(len(val))<line_sep>self.buf.write(val)<block_end><def_stmt>write_bytes self val:bytes<arrow><none><block_start>self.buf.write(val)<block_end><def_stmt>read_ui64 self<arrow>int<block_start>data=self.buf.read(8)<line_sep><return>self.ui64.unpack(data)[0]<block_end><def_stmt>read_ui32 self<arrow>int<block_start>data=self.buf.read(4)<line_sep><return>self.ui32.unpack(data)[0]<block_end><def_stmt>read_ui16 self<arrow>int<block_start>data=self.buf.read(2)<line_sep><return>self.ui16.unpack(data)[0]<block_end><def_stmt>read_ui8 self<arrow>int<block_start>data=self.buf.read(1)<line_sep><return>self.ui8.unpack(data)[0]<block_end><def_stmt>read_i64 self<arrow>int<block_start>data=self.buf.read(8)<line_sep><return>self.i64.unpack(data)[0]<block_end><def_stmt>read_i32 self<arrow>int<block_start>data=self.buf.read(4)<line_sep><return>self.i32.unpack(data)[0]<block_end><def_stmt>read_i16 self<arrow>int<block_start>data=self.buf.read(2)<line_sep><return>self.i16.unpack(data)[0]<block_end><def_stmt>read_i8 self<arrow>int<block_start>data=self.buf.read(1)<line_sep><return>self.i8.unpack(data)[0]<block_end><def_stmt>read_bytes self size:int<arrow>bytes<block_start>data=self.buf.read(size)<if_stmt>len(data)<ne>size<block_start><raise>BufferError(f'cannot read bytes with len={size}')<block_end><return>data<block_end><def_stmt>read_len32_prefixed_bytes self<arrow>bytes<block_start>size=self.read_ui32()<line_sep><return>self.read_bytes(size)<block_end><block_end> |
<import_stmt>unittest2<import_stmt>json<import_from_stmt>datafeeds.resource_library_parser ResourceLibraryParser<class_stmt>TestResourceLibraryParser(unittest2.TestCase)<block_start><def_stmt>test_parse_hall_of_fame self<block_start><with_stmt>open('test_data/hall_of_fame.html' 'r')<as>f<block_start>teams,_=ResourceLibraryParser.parse(f.read())<block_end># Test number of teams
self.assertEqual(len(teams) 14)<line_sep># Test team 987
team=teams[0]<line_sep>self.assertEqual(team["team_id"] "frc987")<line_sep>self.assertEqual(team["team_number"] 987)<line_sep>self.assertEqual(team["year"] 2016)<line_sep>self.assertEqual(team["video"] "wpv-9yd_CJk")<line_sep>self.assertEqual(team["presentation"] "ILxVggTpXhs")<line_sep>self.assertEqual(team["essay"] "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2016/chairmans/week-five/team-987.pdf")<line_sep># Test team 597
team=teams[1]<line_sep>self.assertEqual(team["team_id"] "frc597")<line_sep>self.assertEqual(team["team_number"] 597)<line_sep>self.assertEqual(team["year"] 2015)<line_sep>self.assertEqual(team["video"] "2FKks-d6LOo")<line_sep>self.assertEqual(team["presentation"] "RBXj490clow")<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 27
team=teams[2]<line_sep>self.assertEqual(team["team_id"] "frc27")<line_sep>self.assertEqual(team["team_number"] 27)<line_sep>self.assertEqual(team["year"] 2014)<line_sep>self.assertEqual(team["video"] "BCz2yTVPxbM")<line_sep>self.assertEqual(team["presentation"] "1rE67fTRl98")<line_sep>self.assertEqual(team["essay"] "https://www.firstinspires.org/sites/default/files/uploads/resource_library/frc/game-and-season-info/awards/2015/2014-67-chairmans-handout.pdf")<line_sep># Test team 1538
team=teams[3]<line_sep>self.assertEqual(team["team_id"] "frc1538")<line_sep>self.assertEqual(team["team_number"] 1538)<line_sep>self.assertEqual(team["year"] 2013)<line_sep>self.assertEqual(team["video"] "p62jRCMkoiw")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 1114
team=teams[4]<line_sep>self.assertEqual(team["team_id"] "frc1114")<line_sep>self.assertEqual(team["team_number"] 1114)<line_sep>self.assertEqual(team["year"] 2012)<line_sep>self.assertEqual(team["video"] "VqciMgjw-SY")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 359
team=teams[5]<line_sep>self.assertEqual(team["team_id"] "frc359")<line_sep>self.assertEqual(team["team_number"] 359)<line_sep>self.assertEqual(team["year"] 2011)<line_sep>self.assertEqual(team["video"] "e9IV1chHJtg")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 341
team=teams[6]<line_sep>self.assertEqual(team["team_id"] "frc341")<line_sep>self.assertEqual(team["team_number"] 341)<line_sep>self.assertEqual(team["year"] 2010)<line_sep>self.assertEqual(team["video"] "-AzvT02ZCNk")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 236
team=teams[7]<line_sep>self.assertEqual(team["team_id"] "frc236")<line_sep>self.assertEqual(team["team_number"] 236)<line_sep>self.assertEqual(team["year"] 2009)<line_sep>self.assertEqual(team["video"] "NmzCLohIZLg")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 842
team=teams[8]<line_sep>self.assertEqual(team["team_id"] "frc842")<line_sep>self.assertEqual(team["team_number"] 842)<line_sep>self.assertEqual(team["year"] 2008)<line_sep>self.assertEqual(team["video"] "N0LMLz6LK7U")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 365
team=teams[9]<line_sep>self.assertEqual(team["team_id"] "frc365")<line_sep>self.assertEqual(team["team_number"] 365)<line_sep>self.assertEqual(team["year"] 2007)<line_sep>self.assertEqual(team["video"] "f8MT7pSRXtg")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<line_sep># Test team 111
team=teams[10]<line_sep>self.assertEqual(team["team_id"] "frc111")<line_sep>self.assertEqual(team["team_number"] 111)<line_sep>self.assertEqual(team["year"] 2006)<line_sep>self.assertEqual(team["video"] "SfCjZMMIt0k")<line_sep>self.assertEqual(team["presentation"] <none>)<line_sep>self.assertEqual(team["essay"] <none>)<block_end><block_end> |
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>os<import_stmt>subprocess<import_stmt>sys<line_sep>THIS_DIR=os.path.dirname(os.path.realpath(__file__))<line_sep>ANDROID_DIR=os.path.realpath(os.path.join(THIS_DIR '../..'))<class_stmt>ArgParser(argparse.ArgumentParser)<block_start><def_stmt>__init__ self<block_start>super(ArgParser self).__init__()<line_sep>self.add_argument('--compiler' choices=('clang' 'gcc') default='clang')<line_sep>self.add_argument('--bitness' choices=(32 64) type=int default=32)<line_sep>self.add_argument('--host' action='store_true')<block_end><block_end><def_stmt>gen_test_config bitness compiler host<block_start>testconfig_mk_path=os.path.join(THIS_DIR 'buildcmds/testconfig.mk')<with_stmt>open(testconfig_mk_path 'w')<as>test_config<block_start><if_stmt>compiler<eq>'clang'<block_start>print('LOCAL_CLANG := true' file=test_config)<block_end><elif_stmt>compiler<eq>'gcc'<block_start>print('LOCAL_CLANG := false' file=test_config)<block_end><if_stmt>bitness<eq>32<block_start>print('LOCAL_MULTILIB := 32' file=test_config)<block_end><elif_stmt>bitness<eq>64<block_start>print('LOCAL_MULTILIB := 64' file=test_config)<block_end><if_stmt>compiler<eq>'clang'<block_start>print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc $(CLANG_CXX)' file=test_config)<block_end><else_stmt><block_start><if_stmt>host<block_start>prefix='HOST_'<block_end><else_stmt><block_start>prefix='TARGET_'<block_end>print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc '<concat>'$($(LOCAL_2ND_ARCH_VAR_PREFIX){}CXX)'.format(prefix) file=test_config)<block_end><if_stmt>host<block_start>print('include $(BUILD_HOST_EXECUTABLE)' file=test_config)<block_end><else_stmt><block_start>print('include $(BUILD_EXECUTABLE)' file=test_config)<block_end><block_end><block_end><def_stmt>mmm path<block_start>makefile=os.path.join(path 'Android.mk')<line_sep>main_mk='build/core/main.mk'<line_sep>env=dict(os.environ)<line_sep>env['ONE_SHOT_MAKEFILE']=makefile<line_sep>env['LIBCXX_TESTING']='true'<line_sep>cmd=['make' '-C' ANDROID_DIR '-f' main_mk 'all_modules']<line_sep>subprocess.check_call(cmd env=env)<block_end><def_stmt>gen_build_cmds bitness compiler host<block_start>gen_test_config(bitness compiler host)<line_sep>mmm(os.path.join(THIS_DIR 'buildcmds'))<block_end><def_stmt>main <block_start>args,lit_args=ArgParser().parse_known_args()<line_sep>lit_path=os.path.join(ANDROID_DIR 'external/llvm/utils/lit/lit.py')<line_sep>gen_build_cmds(args.bitness args.compiler args.host)<line_sep>mode_str='host'<if>args.host<else>'device'<line_sep>android_mode_arg='--param=android_mode='+mode_str<line_sep>test_path=os.path.join(THIS_DIR 'test')<line_sep>lit_args=['-sv' android_mode_arg]+lit_args<line_sep>cmd=['python' lit_path]+lit_args+[test_path]<line_sep>sys.exit(subprocess.call(cmd))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BasicTokenizer classes."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>.utils convert_to_unicode clean_text split_on_whitespace split_on_punctuation tokenize_chinese_chars strip_accents <class_stmt>BasicTokenizer(object)<block_start>"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""<def_stmt>__init__ self do_lower_case=<true> never_split=("[UNK]" "[SEP]" "[PAD]" "[CLS]" "[MASK]")<block_start>"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""<line_sep>self.do_lower_case=do_lower_case<line_sep>self.never_split=never_split<block_end><def_stmt>tokenize self text<block_start>"""Tokenizes a piece of text."""<line_sep>text=convert_to_unicode(text)<line_sep>text=clean_text(text)<line_sep>text=tokenize_chinese_chars(text)<line_sep># This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
orig_tokens=split_on_whitespace(text)<line_sep>split_tokens=[]<line_sep>current_positions=[]<for_stmt>token orig_tokens<block_start><if_stmt>self.do_lower_case<and>token<not><in>self.never_split<block_start>token=token.lower()<line_sep>token=strip_accents(token)<block_end>current_positions.append([])<line_sep>current_positions[-1].append(len(split_tokens))<line_sep>split_tokens.extend(split_on_punctuation(token))<line_sep>current_positions[-1].append(len(split_tokens))<block_end><return>split_tokens current_positions<block_end><block_end> |
<import_from_stmt>django.db models<import_from_stmt>djangae patches# noqa
<class_stmt>DeferIterationMarker(models.Model)<block_start>"""
Marker to keep track of sharded defer
iteration tasks
"""<line_sep># Set to True when all shards have been deferred
is_ready=models.BooleanField(default=<false>)<line_sep>shard_count=models.PositiveIntegerField(default=0)<line_sep>shards_complete=models.PositiveIntegerField(default=0)<line_sep>delete_on_completion=models.BooleanField(default=<true>)<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>callback_name=models.CharField(max_length=100)<line_sep>finalize_name=models.CharField(max_length=100)<class_stmt>Meta<block_start>app_label="djangae"<block_end>@property<def_stmt>is_finished self<block_start><return>self.is_ready<and>self.shard_count<eq>self.shards_complete<block_end><def_stmt>__unicode__ self<block_start><return>"Background Task (%s -> %s) at %s"%(self.callback_name self.finalize_name self.created)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_stmt>os<import_stmt>unittest<import_stmt>pytest<import_from_stmt>smart_open open<line_sep>skip_tests="SMART_OPEN_TEST_MISSING_DEPS"<not><in>os.environ<class_stmt>PackageTests(unittest.TestCase)<block_start>@pytest.mark.skipif(skip_tests reason="requires missing dependencies")<def_stmt>test_azure_raises_helpful_error_with_missing_deps self<block_start><with_stmt>pytest.raises(ImportError match=r"pip install smart_open\[azure\]")<block_start>open("azure://foo/bar")<block_end><block_end>@pytest.mark.skipif(skip_tests reason="requires missing dependencies")<def_stmt>test_aws_raises_helpful_error_with_missing_deps self<block_start>match=r"pip install smart_open\[s3\]"<with_stmt>pytest.raises(ImportError match=match)<block_start>open("s3://foo/bar")<block_end><block_end>@pytest.mark.skipif(skip_tests reason="requires missing dependencies")<def_stmt>test_gcs_raises_helpful_error_with_missing_deps self<block_start><with_stmt>pytest.raises(ImportError match=r"pip install smart_open\[gcs\]")<block_start>open("gs://foo/bar")<block_end><block_end><block_end> |
<import_stmt>sublime sublime_plugin<import_stmt>os<import_from_stmt>...libs util<import_from_stmt>...libs JavascriptEnhancementsExecuteOnTerminalCommand<class_stmt>JavascriptEnhancementsGenerateJsdocCommand(JavascriptEnhancementsExecuteOnTerminalCommand sublime_plugin.WindowCommand)<block_start>is_node=<true><line_sep>is_bin_path=<true><def_stmt>prepare_command self<block_start>jsdoc_conf_file=os.path.join(self.settings['project_dir_name'] self.settings['project_settings']['jsdoc']['conf_file'])<if_stmt>os.path.isfile(jsdoc_conf_file)<block_start>self.command=["jsdoc" "-c" jsdoc_conf_file]<block_end><else_stmt><block_start>sublime.error_message("JSDOC ERROR: Can't load "+jsdoc_conf_file+" file!\nConfiguration file REQUIRED!")<line_sep><return><block_end>self._run()<block_end><def_stmt>_run self<block_start>super(JavascriptEnhancementsGenerateJsdocCommand self)._run()<block_end><def_stmt>is_enabled self<block_start><return><true><if>util.is_javascript_project()<else><false><block_end><block_end> |
<import_stmt>torch.nn<as>nn<import_from_stmt>architectures.position_wise_feed_forward_net PositionWiseFeedForwardNet<import_from_stmt>architectures.multi_head_attention MultiHeadAttention<import_from_stmt>architectures.add_and_norm AddAndNorm<class_stmt>TransformerEncoderBlock(nn.Module)<block_start><def_stmt>__init__ self d_model n_heads d_ff dropout_proba<block_start>super(TransformerEncoderBlock self).__init__()<line_sep>self.W_q=nn.Linear(d_model d_model)<line_sep>self.W_k=nn.Linear(d_model d_model)<line_sep>self.W_v=nn.Linear(d_model d_model)<line_sep>self.mha_layer=MultiHeadAttention(d_model n_heads)<line_sep>self.dropout_layer_1=nn.Dropout(dropout_proba)<line_sep>self.add_and_norm_layer_1=AddAndNorm(d_model)<line_sep>self.ffn_layer=PositionWiseFeedForwardNet(d_model d_ff)<line_sep>self.dropout_layer_2=nn.Dropout(dropout_proba)<line_sep>self.add_and_norm_layer_2=AddAndNorm(d_model)<block_end><def_stmt>forward self x mask# x dims: (batch_size, src_seq_len, d_model)
# mask dim: (batch_size, 1, 1, src_seq_len)
<block_start>q=self.W_q(x)# (batch_size, src_seq_len, d_model)
k=self.W_k(x)# (batch_size, src_seq_len, d_model)
v=self.W_v(x)# (batch_size, src_seq_len, d_model)
mha_out=self.mha_layer(q k v mask)# (batch_size, src_seq_len, d_model)
mha_out=self.dropout_layer_1(mha_out)# (batch_size, src_seq_len, d_model)
mha_out=self.add_and_norm_layer_1(x mha_out)# (batch_size, src_seq_len, d_model)
ffn_out=self.ffn_layer(mha_out)# (batch_size, src_seq_len, d_model)
ffn_out=self.dropout_layer_2(ffn_out)# (batch_size, src_seq_len, d_model)
ffn_out=self.add_and_norm_layer_2(mha_out ffn_out)# (batch_size, src_seq_len, d_model)
<return>ffn_out<block_end><block_end><class_stmt>TransformerEncoder(nn.Module)<block_start><def_stmt>__init__ self n_blocks n_heads d_model d_ff dropout_proba=0.1<block_start>super(TransformerEncoder self).__init__()<line_sep>self.encoder_blocks=nn.ModuleList([TransformerEncoderBlock(d_model n_heads d_ff dropout_proba)<for>_ range(n_blocks)])<block_end><def_stmt>forward self x mask<block_start><for_stmt>encoder_block self.encoder_blocks<block_start>x=encoder_block(x mask)<block_end><return>x<block_end><block_end> |
"""Transducers for RxPY.
There are several different implementations of transducers in Python.
This implementation is currently targeted for:
- http://code.sixty-north.com/python-transducers
You should also read the excellent article series "Understanding
Transducers through Python" at:
- http://sixty-north.com/blog/series/understanding-transducers-through-python
Other implementations of transducers in Python are:
- https://github.com/cognitect-labs/transducers-python
"""<import_from_stmt>rx.core Observable AnonymousObservable<import_from_stmt>rx.internal extensionmethod<class_stmt>Observing(object)<block_start>"""An observing transducer."""<def_stmt>__init__ self observer<block_start>self.observer=observer<block_end><def_stmt>initial self<block_start><return>self.observer<block_end><def_stmt>step self obs input<block_start><return>obs.on_next(input)<block_end><def_stmt>complete self obs<block_start><return>obs.on_completed()<block_end><def_stmt>__call__ self result item<block_start><return>self.step(result item)<block_end><block_end>@extensionmethod(Observable)<def_stmt>transduce self transducer<block_start>"""Execute a transducer to transform the observable sequence.
Keyword arguments:
:param Transducer transducer: A transducer to execute.
:returns: An Observable sequence containing the results from the
transducer.
:rtype: Observable
"""<line_sep>source=self<def_stmt>subscribe observer<block_start>xform=transducer(Observing(observer))<def_stmt>on_next v<block_start><try_stmt><block_start>xform.step(observer v)<block_end><except_stmt>Exception<as>e<block_start>observer.on_error(e)<block_end><block_end><def_stmt>on_completed <block_start>xform.complete(observer)<block_end><return>source.subscribe(on_next observer.on_error on_completed)<block_end><return>AnonymousObservable(subscribe)<block_end> |
<import_stmt>json<import_stmt>logging<import_from_stmt>django.urls reverse<import_from_stmt>seahub.test_utils BaseTestCase<import_from_stmt>tests.common.utils randstring<import_from_stmt>seahub.institutions.models Institution InstitutionAdmin<import_from_stmt>seahub.profile.models Profile<line_sep>logger=logging.getLogger(__name__)<class_stmt>AdminInstitutionUsersTest(BaseTestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>_add_institution self name=''<block_start><return>Institution.objects.create(name=name)<block_end><def_stmt>_delete_institution self name=''<block_start><try_stmt><block_start>institution=Institution.objects.get(name=name)<line_sep>institution.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<block_end><block_end><def_stmt>test_can_get self<block_start>self.login_as(self.admin)<line_sep>inst=self._add_institution('int1')<line_sep>url=reverse('api-v2.1-admin-institution-users' args=[inst.id])<line_sep>resp=self.client.get(url)<line_sep>self.assertEqual(200 resp.status_code)<line_sep>json_resp=json.loads(resp.content)<assert_stmt>type(json_resp['user_list'])<is>list<line_sep>inst.delete()<block_end><def_stmt>test_no_permission self<block_start>self.logout()<line_sep>self.login_as(self.admin_no_other_permission)<line_sep>inst=self._add_institution('int1')<line_sep>url=reverse('api-v2.1-admin-institution-users' args=[inst.id])<line_sep>resp=self.client.get(url)<line_sep>self.assertEqual(403 resp.status_code)<block_end><def_stmt>test_can_create self<block_start>self.login_as(self.admin)<line_sep>inst=self._add_institution('int1')<line_sep>url=reverse('api-v2.1-admin-institution-users' args=[inst.id])<line_sep>data={'email':'invalid_email_string' }<line_sep>resp=self.client.post(url data)<line_sep>self.assertEqual(200 resp.status_code)<line_sep>json_resp=json.loads(resp.content)<assert_stmt>type(json_resp['success'])<is>list<assert_stmt>type(json_resp['failed'])<is>list<block_end><block_end><class_stmt>AdminInstitutionUserTest(BaseTestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>_add_institution self name=''<block_start><return>Institution.objects.create(name=name)<block_end><def_stmt>_delete_institution self name=''<block_start><try_stmt><block_start>institution=Institution.objects.get(name=name)<line_sep>institution.delete()<block_end><except_stmt>Exception<as>e<block_start>logger.error(e)<block_end><block_end><def_stmt>_add_user_in_institution self email inst_name<block_start>profile=Profile.objects.get_profile_by_user(email)<if_stmt><not>profile<block_start>profile=Profile.objects.add_or_update(username=email institution=inst_name)<block_end><else_stmt><block_start>profile.institution=inst_name<block_end>profile.save()<block_end><def_stmt>test_can_update self<block_start>self.login_as(self.admin)<line_sep>inst=self._add_institution('int1')<line_sep>self._add_user_in_institution(self.user.email inst.name)<line_sep>url=reverse('api-v2.1-admin-institution-user' args=[inst.id self.user.email])<line_sep>data='is_institution_admin=True'<line_sep>resp=self.client.put(url data 'application/x-www-form-urlencoded')<line_sep>self.assertEqual(200 resp.status_code)<line_sep>json_resp=json.loads(resp.content)<assert_stmt>json_resp['is_institution_admin']<is><true><line_sep>inst.delete()<block_end><def_stmt>test_can_delete self<block_start>self.login_as(self.admin)<line_sep>inst=self._add_institution('int1')<line_sep>self._add_user_in_institution(self.user.email inst.name)<line_sep>url=reverse('api-v2.1-admin-institution-user' args=[inst.id self.user.email])<line_sep>resp=self.client.delete(url)<line_sep>self.assertEqual(200 resp.status_code)<line_sep>inst.delete()<block_end><block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['PublicCertificateArgs' 'PublicCertificate']<line_sep>@pulumi.input_type<class_stmt>PublicCertificateArgs<block_start><def_stmt>__init__ __self__ * app_service_name:pulumi.Input[str] blob:pulumi.Input[str] certificate_location:pulumi.Input[str] certificate_name:pulumi.Input[str] resource_group_name:pulumi.Input[str]<block_start>"""
The set of arguments for constructing a PublicCertificate resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep>pulumi.set(__self__ "app_service_name" app_service_name)<line_sep>pulumi.set(__self__ "blob" blob)<line_sep>pulumi.set(__self__ "certificate_location" certificate_location)<line_sep>pulumi.set(__self__ "certificate_name" certificate_name)<line_sep>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end>@property@pulumi.getter(name="appServiceName")<def_stmt>app_service_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "app_service_name")<block_end>@app_service_name.setter<def_stmt>app_service_name self value:pulumi.Input[str]<block_start>pulumi.set(self "app_service_name" value)<block_end>@property@pulumi.getter<def_stmt>blob self<arrow>pulumi.Input[str]<block_start>"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "blob")<block_end>@blob.setter<def_stmt>blob self value:pulumi.Input[str]<block_start>pulumi.set(self "blob" value)<block_end>@property@pulumi.getter(name="certificateLocation")<def_stmt>certificate_location self<arrow>pulumi.Input[str]<block_start>"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""<line_sep><return>pulumi.get(self "certificate_location")<block_end>@certificate_location.setter<def_stmt>certificate_location self value:pulumi.Input[str]<block_start>pulumi.set(self "certificate_location" value)<block_end>@property@pulumi.getter(name="certificateName")<def_stmt>certificate_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "certificate_name")<block_end>@certificate_name.setter<def_stmt>certificate_name self value:pulumi.Input[str]<block_start>pulumi.set(self "certificate_name" value)<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "resource_group_name" value)<block_end><block_end>@pulumi.input_type<class_stmt>_PublicCertificateState<block_start><def_stmt>__init__ __self__ * app_service_name:Optional[pulumi.Input[str]]=<none> blob:Optional[pulumi.Input[str]]=<none> certificate_location:Optional[pulumi.Input[str]]=<none> certificate_name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> thumbprint:Optional[pulumi.Input[str]]=<none><block_start>"""
Input properties used for looking up and filtering PublicCertificate resources.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""<if_stmt>app_service_name<is><not><none><block_start>pulumi.set(__self__ "app_service_name" app_service_name)<block_end><if_stmt>blob<is><not><none><block_start>pulumi.set(__self__ "blob" blob)<block_end><if_stmt>certificate_location<is><not><none><block_start>pulumi.set(__self__ "certificate_location" certificate_location)<block_end><if_stmt>certificate_name<is><not><none><block_start>pulumi.set(__self__ "certificate_name" certificate_name)<block_end><if_stmt>resource_group_name<is><not><none><block_start>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end><if_stmt>thumbprint<is><not><none><block_start>pulumi.set(__self__ "thumbprint" thumbprint)<block_end><block_end>@property@pulumi.getter(name="appServiceName")<def_stmt>app_service_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "app_service_name")<block_end>@app_service_name.setter<def_stmt>app_service_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "app_service_name" value)<block_end>@property@pulumi.getter<def_stmt>blob self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "blob")<block_end>@blob.setter<def_stmt>blob self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "blob" value)<block_end>@property@pulumi.getter(name="certificateLocation")<def_stmt>certificate_location self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""<line_sep><return>pulumi.get(self "certificate_location")<block_end>@certificate_location.setter<def_stmt>certificate_location self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "certificate_location" value)<block_end>@property@pulumi.getter(name="certificateName")<def_stmt>certificate_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "certificate_name")<block_end>@certificate_name.setter<def_stmt>certificate_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "certificate_name" value)<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@property@pulumi.getter<def_stmt>thumbprint self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The thumbprint of the public certificate.
"""<line_sep><return>pulumi.get(self "thumbprint")<block_end>@thumbprint.setter<def_stmt>thumbprint self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "thumbprint" value)<block_end><block_end><class_stmt>PublicCertificate(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> app_service_name:Optional[pulumi.Input[str]]=<none> blob:Optional[pulumi.Input[str]]=<none> certificate_location:Optional[pulumi.Input[str]]=<none> certificate_name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:PublicCertificateArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>"""
Manages an App Service Public Certificate.
## Example Usage
```python
import pulumi
import base64
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_plan = azure.appservice.Plan("examplePlan",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku=azure.appservice.PlanSkuArgs(
tier="Standard",
size="S1",
))
example_app_service = azure.appservice.AppService("exampleAppService",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
app_service_plan_id=example_plan.id)
example_public_certificate = azure.appservice.PublicCertificate("examplePublicCertificate",
resource_group_name=example_resource_group.name,
app_service_name=example_app_service.name,
certificate_name="example-public-certificate",
certificate_location="Unknown",
blob=(lambda path: base64.b64encode(open(path).read().encode()).decode())("app_service_public_certificate.cer"))
```
## Import
App Service Public Certificates can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:appservice/publicCertificate:PublicCertificate example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Web/sites/site1/publicCertificates/publicCertificate1
```
:param str resource_name: The name of the resource.
:param PublicCertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(PublicCertificateArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> app_service_name:Optional[pulumi.Input[str]]=<none> blob:Optional[pulumi.Input[str]]=<none> certificate_location:Optional[pulumi.Input[str]]=<none> certificate_name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=PublicCertificateArgs.__new__(PublicCertificateArgs)<if_stmt>app_service_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'app_service_name'")<block_end>__props__.__dict__["app_service_name"]=app_service_name<if_stmt>blob<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'blob'")<block_end>__props__.__dict__["blob"]=blob<if_stmt>certificate_location<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'certificate_location'")<block_end>__props__.__dict__["certificate_location"]=certificate_location<if_stmt>certificate_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'certificate_name'")<block_end>__props__.__dict__["certificate_name"]=certificate_name<if_stmt>resource_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'resource_group_name'")<block_end>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["thumbprint"]=<none><block_end>super(PublicCertificate __self__).__init__('azure:appservice/publicCertificate:PublicCertificate' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> app_service_name:Optional[pulumi.Input[str]]=<none> blob:Optional[pulumi.Input[str]]=<none> certificate_location:Optional[pulumi.Input[str]]=<none> certificate_name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> thumbprint:Optional[pulumi.Input[str]]=<none><arrow>'PublicCertificate'<block_start>"""
Get an existing PublicCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_service_name: The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] blob: The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] certificate_location: The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
:param pulumi.Input[str] certificate_name: The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
:param pulumi.Input[str] thumbprint: The thumbprint of the public certificate.
"""<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_PublicCertificateState.__new__(_PublicCertificateState)<line_sep>__props__.__dict__["app_service_name"]=app_service_name<line_sep>__props__.__dict__["blob"]=blob<line_sep>__props__.__dict__["certificate_location"]=certificate_location<line_sep>__props__.__dict__["certificate_name"]=certificate_name<line_sep>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["thumbprint"]=thumbprint<line_sep><return>PublicCertificate(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter(name="appServiceName")<def_stmt>app_service_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the App Service. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "app_service_name")<block_end>@property@pulumi.getter<def_stmt>blob self<arrow>pulumi.Output[str]<block_start>"""
The base64-encoded contents of the certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "blob")<block_end>@property@pulumi.getter(name="certificateLocation")<def_stmt>certificate_location self<arrow>pulumi.Output[str]<block_start>"""
The location of the certificate. Possible values are `CurrentUserMy`, `LocalMachineMy` and `Unknown`.
"""<line_sep><return>pulumi.get(self "certificate_location")<block_end>@property@pulumi.getter(name="certificateName")<def_stmt>certificate_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the public certificate. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "certificate_name")<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the Resource Group where the App Service Public Certificate should exist. Changing this forces a new App Service Public Certificate to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@property@pulumi.getter<def_stmt>thumbprint self<arrow>pulumi.Output[str]<block_start>"""
The thumbprint of the public certificate.
"""<line_sep><return>pulumi.get(self "thumbprint")<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Created on 2017/3/19
@author: will4906
"""<import_stmt>re<def_stmt>handle_item_group item_group<block_start>"""
处理item_group函数
:param item_group:
:return:
"""<line_sep>AND=' AND '<line_sep>OR=' OR '<line_sep>NOT=' NOT '<line_sep>exp_str=""<line_sep>keyand=item_group.__getattribute__('And')<line_sep>keyor=item_group.__getattribute__('Or')<line_sep>keynot=item_group.__getattribute__('Not')<if_stmt>keyand<is><not><none><block_start>parms=keyand.__getattribute__('parm')<for_stmt>parm parms<block_start>exp_str<augadd>AND+parm<block_end>exp_str=exp_str.replace(AND '' 1)<block_end><if_stmt>keyor<is><not><none><block_start>parms=keyor.__getattribute__('parm')<for_stmt>parm parms<block_start>exp_str<augadd>OR+parm<block_end><if_stmt>keyand<is><none><block_start>exp_str=exp_str.replace(OR '' 1)<block_end><block_end><if_stmt>keynot<is><not><none><block_start>parms=keynot.__getattribute__('parm')<for_stmt>parm parms<block_start>exp_str<augadd>NOT+parm<block_end><if_stmt>keyand<is><none><and>keyor<is><none><block_start>exp_str=exp_str.replace(NOT '' 1)<block_end><block_end><return>exp_str<block_end># 处理申请号的函数
<def_stmt>handle_number title request_number<block_start>word_reg='[a-zA-Z]'<if_stmt>isinstance(request_number ItemGroup)<block_start>search_exp=handle_item_group(request_number)<line_sep>is_word=re.search(word_reg search_exp[:2])<if_stmt>is_word<is><not><none><block_start><return>title+'=('+search_exp+'+)'<block_end><else_stmt><block_start><return>title+'=(+'+search_exp+'+)'<block_end><block_end><else_stmt><block_start>is_word=re.search(word_reg request_number[:2])<if_stmt>is_word<is><not><none><block_start><return>title+'=('+request_number+'+)'<block_end><else_stmt><block_start><return>title+'=(+'+request_number+'+)'<block_end><block_end><block_end><def_stmt>handle_date_element title date_element<block_start>"""
处理日期元素的函数
:param title:
:param date_element:
:return:
"""<if_stmt>isinstance(date_element DateSelect)<block_start><return>title+date_element.__getattribute__('search_exp')<block_end><else_stmt><block_start><raise>Exception('We just support DateSelect for date element!')<block_end><block_end><def_stmt>handle_invention_type title invention_type<block_start>"""
处理发明类型的函数
:param title:
:param invention_type:
:return:
"""<line_sep>exp_str=""<if_stmt>isinstance(invention_type Or)<block_start>OR=' OR '<line_sep>keyor=invention_type<if_stmt>keyor<is><not><none><block_start>parms=keyor.__getattribute__('parm')<for_stmt>parm parms<block_start><if_stmt>parm<eq>'I'<or>parm<eq>'U'<or>parm<eq>'D'<block_start>parm='\"'+parm+'\"'<block_end><elif_stmt>parm.find('发明申请')<ne>-1<block_start>parm='\"I\"'<block_end><elif_stmt>parm.find('实用新型')<ne>-1<block_start>parm='\"U\"'<block_end><elif_stmt>parm.find('外观设计')<ne>-1<block_start>parm='\"D\"'<block_end>exp_str<augadd>OR+parm<block_end>exp_str=exp_str.replace(OR '' 1)<block_end><block_end><elif_stmt>isinstance(invention_type str)<block_start><if_stmt>invention_type<eq>'I'<or>invention_type<eq>'U'<or>invention_type<eq>'D'<block_start>exp_str='\"'+invention_type+'\"'<block_end><elif_stmt>invention_type.find('发明申请')<ne>-1<block_start>exp_str='\"I\"'<block_end><elif_stmt>invention_type.find('实用新型')<ne>-1<block_start>exp_str='\"U\"'<block_end><elif_stmt>invention_type.find('外观设计')<ne>-1<block_start>exp_str='\"D\"'<block_end><block_end><else_stmt><block_start><raise>Exception('We just support string or Or for invention_type element!')<block_end><return>title+"=("+exp_str+")"<block_end><def_stmt>default_handle title default<block_start>"""
默认处理函数
:param title:
:param default:
:return:
"""<if_stmt>isinstance(default ItemGroup)<block_start><return>title+'=('+handle_item_group(default)+')'<block_end><elif_stmt>isinstance(default str)<block_start><return>title+'=('+default+')'<block_end><else_stmt><block_start><raise>Exception('We just support string or ItemGroup!')<block_end><block_end><def_stmt>find_element_in_item_group element item_group<block_start>"""
在ItemGroup里面寻找相应的的element
:param element:
:param item_group:
:return:
"""<line_sep>keyand=item_group.__getattribute__('And')<line_sep>keyor=item_group.__getattribute__('Or')<line_sep>keynot=item_group.__getattribute__('Not')<if_stmt>keyand<is><not><none><block_start>parms=keyand.__getattribute__('parm')<try_stmt><block_start><return>parms.index(element)<block_end><except_stmt><block_start><pass><block_end><block_end><if_stmt>keyor<is><not><none><block_start>parms=keyor.__getattribute__('parm')<try_stmt><block_start><return>parms.index(element)<block_end><except_stmt><block_start><pass><block_end><block_end><if_stmt>keynot<is><not><none><block_start>parms=keynot.__getattribute__('parm')<try_stmt><block_start><return>parms.index(element)<block_end><except_stmt><block_start><pass><block_end><block_end><return><none><block_end>title_case={'request_number':handle_number 'request_date':handle_date_element 'publish_number':handle_number 'publish_date':handle_date_element 'invention_name':default_handle 'ipc_class_number':default_handle 'proposer':default_handle 'inventor':default_handle 'priority_number':default_handle 'priority_date':handle_date_element 'abstract':default_handle 'claim':default_handle 'instructions':default_handle 'key_word':default_handle 'locarno_class_number':default_handle 'description_of_the_design':default_handle 'agent':default_handle 'agency':default_handle 'proposer_post_code':default_handle 'proposer_address':default_handle 'proposer_location':default_handle 'FT_class_number':default_handle 'UC_class_number':default_handle 'ECLA_class_number':default_handle 'FI_class_number':default_handle 'English_invention_name':default_handle 'French_invention_name':default_handle 'German_invention_name':default_handle 'other_invention_name':default_handle 'English_abstract':default_handle 'PCT_enters_national_phase_date':handle_date_element 'PCT_international_application_number':handle_number 'French_abstract':default_handle 'German_abstract':default_handle 'other_abstract':default_handle 'PCT_international_application_date':handle_date_element 'PCT_international_publish_number':handle_number 'PCT_international_publish_date':handle_date_element 'CPC_class_number':default_handle 'C-SETS':default_handle 'invention_type':handle_invention_type 'publish_country':default_handle }<line_sep>title_define={'patent_id':'专利id' 'request_number':'申请号' 'request_date':'申请日' 'publish_number':'公开(公告)号' 'publish_date':'公开(公告)日' 'invention_name':'发明名称' 'ipc_class_number':'IPC分类号' 'proposer':'申请(专利权)人' 'inventor':'发明人' 'priority_number':'优先权号' 'priority_date':'优先权日' 'abstract':'摘要' 'claim':'权利要求' 'instructions':'说明书' 'key_word':'关键词' 'locarno_class_number':'外观设计洛迦诺分类号' 'description_of_the_design':'外观设计简要说明' 'agent':'代理人' 'agency':'代理机构' 'proposer_post_code':'申请人邮编' 'proposer_address':'申请人地址' 'proposer_location':'申请人所在国(省)' 'FT_class_number':'FT分类号' 'UC_class_number':'UC分类号' 'ECLA_class_number':'ECLA分类号' 'FI_class_number':'FI分类号' 'English_invention_name':'发明名称(英)' 'French_invention_name':'发明名称(法)' 'German_invention_name':'发明名称(德)' 'other_invention_name':'发明名称(其他)' 'English_abstract':'摘要(英)' 'PCT_enters_national_phase_date':'PCT进入国家阶段日期' 'PCT_international_application_number':'PCT国际申请号' 'French_abstract':'摘要(法)' 'German_abstract':'摘要(德)' 'other_abstract':'摘要(其他)' 'PCT_international_application_date':'PCT国际申请日期' 'PCT_international_publish_number':'PCT国际申请公开号' 'PCT_international_publish_date':'PCT国际申请公开日期' 'CPC_class_number':'CPC分类号' 'C-SETS':'C-SETS' 'invention_type':'发明类型' 'publish_country':'公开国' 'legal_status':'法律状态' 'legal_status_effective_date':'法律状态生效日'}<line_sep># 日期选择器
<class_stmt>DateSelect<block_start><def_stmt>__init__ self select='=' date='2001-01-01' enddate=<none># 符号:'=', '>', '>=', '<', '<=', ':'
<block_start>self.select=select<line_sep># 日期(固定格式),eg: 2001-01-01
self.date=date<line_sep># 结束日期,当符号位为":"时,此变量有效,只从date开始到enddate结束
self.enddate=enddate<line_sep>self.search_exp=''<if_stmt>self.select<ne>':'<block_start>self.search_exp=self.select+self.date<block_end><else_stmt><block_start>self.search_exp='='+self.date+self.select+self.enddate<block_end><block_end><def_stmt>__repr__ self<block_start><return>'DateSelect{select='+str(self.select)+',date='+str(self.date)+',enddate='+str(self.enddate)+'}'<block_end><def_stmt>__str__ self<block_start><return>'DateSelect{select='+str(self.select)+',date='+str(self.date)+',enddate='+str(self.enddate)+'}'<block_end><block_end><class_stmt>ItemGroup<block_start><def_stmt>__init__ self And=<none> Or=<none> Not=<none><block_start>self.And=And<line_sep>self.Or=Or<line_sep>self.Not=Not<block_end><def_stmt>add_or self *parm<block_start><if_stmt>self.Or<is><none><block_start>self.Or=Or(*parm)<block_end><else_stmt><block_start>self.Or.add_parm(*parm)<block_end><block_end><def_stmt>__repr__ self<block_start>whole=''<if_stmt>self.And<is><not><none><block_start>whole<augadd>str(self.And)<block_end><if_stmt>self.Or<is><not><none><block_start>whole<augadd>str(self.Or)<block_end><if_stmt>self.Not<is><not><none><block_start>whole<augadd>str(self.Not)<block_end><return>whole<block_end><block_end><class_stmt>And<block_start><def_stmt>__init__ self *parm<block_start>self.parm=list(parm)<block_end><def_stmt>add_parm self *ps<block_start>self.parm=self.parm+list(ps)<block_end><def_stmt>__repr__ self<block_start>andStr=''<for_stmt>p self.parm<block_start>andStr<augadd>str(p)+';'<block_end><return>andStr<block_end><block_end><class_stmt>Or<block_start><def_stmt>__init__ self *parm<block_start>self.parm=list(parm)<block_end><def_stmt>add_parm self *ps<block_start>self.parm=self.parm+ps<block_end><def_stmt>__repr__ self<block_start>andStr=''<for_stmt>p self.parm<block_start>andStr<augadd>str(p)+';'<block_end><return>andStr<block_end><block_end><class_stmt>Not<block_start><def_stmt>__init__ self *parm<block_start>self.parm=list(parm)<block_end><def_stmt>__repr__ self<block_start>andStr=''<for_stmt>p self.parm<block_start>andStr<augadd>str(p)+';'<block_end><return>andStr<block_end><block_end><class_stmt>SipoItem<block_start>"""
一个用来解析专利网站专利检索表达式的实例
"""<def_stmt>__init__ self **kwargs<block_start>self.startIndex=0<line_sep>self.__queryAnd=And()<line_sep>self.target_parm={}# 经过整理后的目标参数
self.__prepare_item(kwargs)<for_stmt>title,value title_define.items()<block_start>key=kwargs.get(title)<if_stmt>key<is><not><none><block_start>self.__queryAnd.add_parm(title_case.get(title)(value key))<block_end><block_end>self.__itemGroup=ItemGroup(And=self.__queryAnd)<line_sep>self.search_exp_cn=handle_item_group(self.__itemGroup)# 生成的检索表达式
self.target_parm=self.__check_target_parm(kwargs)<block_end><def_stmt>__prepare_item self items<block_start>invention_type=items.get('invention_type')<if_stmt>invention_type<is><not><none><block_start>publish_country=items.get('publish_country')<if_stmt>publish_country<is><none><block_start>items['publish_country']='CN'<block_end><else_stmt><block_start><if_stmt>isinstance(publish_country str)<block_start><if_stmt>publish_country<ne>'CN'<block_start>items['publish_country']=ItemGroup(Or=Or(publish_country 'CN'))<block_end><block_end><elif_stmt>isinstance(publish_country ItemGroup)<block_start><if_stmt>find_element_in_item_group('CN' publish_country)<is><none><block_start>publish_country.add_or('CN')<block_end><block_end><block_end><block_end><block_end><def_stmt>__check_target_parm self parm<block_start>target={}<if_stmt>isinstance(parm dict)<block_start><for_stmt>key,value parm.items()<block_start><if_stmt>key<eq>'invention_type'<block_start><if_stmt>isinstance(value Or)<block_start><for_stmt>index,pvalue enumerate(value.parm)<block_start><if_stmt>pvalue<eq>'"I"'<or>pvalue<eq>'发明申请'<block_start>pvalue={'en':'"I"' 'cn':'发明申请'}<block_end><elif_stmt>pvalue<eq>'"U"'<or>pvalue<eq>'实用新型'<block_start>pvalue={'en':'"U"' 'cn':'实用新型'}<block_end><elif_stmt>pvalue<eq>'"D"'<or>pvalue<eq>'外观设计'<block_start>pvalue={'en':'"D"' 'cn':'外观设计'}<block_end><else_stmt><block_start><raise>Exception('Please check the inventor_type')<block_end>value.parm[index]=pvalue<block_end><block_end><else_stmt><block_start><if_stmt>value<eq>'"I"'<or>value<eq>'发明申请'<block_start>value={'en':'"I"' 'cn':'发明申请'}<block_end><elif_stmt>value<eq>'"U"'<or>value<eq>'实用新型'<block_start>value={'en':'"U"' 'cn':'实用新型'}<block_end><elif_stmt>value<eq>'"D"'<or>value<eq>'外观设计'<block_start>value={'en':'"D"' 'cn':'外观设计'}<block_end><else_stmt><block_start><raise>Exception('Please check the inventor_type')<block_end><block_end><block_end>target[key]=value<block_end><block_end><return>target<block_end><def_stmt>__repr__ self<block_start><return>self.search_exp_cn<block_end><block_end> |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
<import_from_stmt>typing List Optional Union<import_stmt>pandas<as>pd<import_from_stmt>feature_engine.dataframe_checks _is_dataframe<import_from_stmt>feature_engine.imputation.base_imputer BaseImputer<import_from_stmt>feature_engine.variable_manipulation _check_input_parameter_variables<class_stmt>DropMissingData(BaseImputer)<block_start>"""
DropMissingData() will delete rows containing missing values. It provides
similar functionality to pandas.drop_na().
It works for numerical and categorical variables. You can enter the list of
variables for which missing values should be evaluated. Alternatively, the imputer
will evaluate missing data in all variables in the dataframe.
More details in the :ref:`User Guide <drop_missing_data>`.
Parameters
----------
missing_only: bool, default=True
If `True`, rows will be dropped when they show missing data in variables with
missing data in the train set, that is, in the data set used in `fit()`. If
`False`, rows will be dropped if there is missing data in any of the variables.
This parameter only works when `threshold=None`, otherwise it is ignored.
variables: list, default=None
The list of variables to consider for the imputation. If None, the imputer will
evaluate missing data in all variables in the dataframe. Alternatively, the
imputer will evaluate missing data only in the variables in the list.
Note that if `missing_only=True` only variables with missing data in the train
set will be considered to drop a row, which might be a subset of the indicated
list.
threshold: int or float, default=None
Require that percentage of non-NA values in a row to keep it. If
`threshold=1`, all variables need to have data to keep the row. If
`threshold=0.5`, 50% of the variables need to have data to keep the row.
If `threshold=0.01`, 10% of the variables need to have data to keep the row.
If `thresh=None`, rows with NA in any of the variables will be dropped.
Attributes
----------
variables_:
The variables for which missing data will be examined to decide if a row is
dropped. The attribute `variables_` is different from the parameter `variables`
when the latter is `None`, or when only a subset of the indicated variables
show NA in the train set if `missing_only=True`.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Find the variables for which missing data should be evaluated.
transform:
Remove rows with missing data.
fit_transform:
Fit to the data, then transform it.
return_na_data:
Returns a dataframe with the rows that contain missing data.
"""<def_stmt>__init__ self missing_only:bool=<true> threshold:Union[<none> int float]=<none> variables:Union[<none> int str List[Union[str int]]]=<none> <arrow><none><block_start><if_stmt><not>isinstance(missing_only bool)<block_start><raise>ValueError("missing_only takes values True or False. "<concat>f"Got {missing_only} instead.")<block_end><if_stmt>threshold<is><not><none><block_start><if_stmt><not>isinstance(threshold (int float))<or><not>(0<l>threshold<le>1)<block_start><raise>ValueError("threshold must be a value between 0 < x <= 1. "<concat>f"Got {threshold} instead.")<block_end><block_end>self.variables=_check_input_parameter_variables(variables)<line_sep>self.missing_only=missing_only<line_sep>self.threshold=threshold<block_end><def_stmt>fit self X:pd.DataFrame y:Optional[pd.Series]=<none><block_start>"""
Find the variables for which missing data should be evaluated to decide if a
row should be dropped.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training data set.
y: pandas Series, default=None
y is not needed in this imputation. You can pass None or y.
"""<line_sep># check input dataframe
X=_is_dataframe(X)<line_sep># find variables for which indicator should be added
# if threshold, then missing_only is ignored:
<if_stmt>self.threshold<is><not><none><block_start><if_stmt><not>self.variables<block_start>self.variables_=[var<for>var X.columns]<block_end><else_stmt><block_start>self.variables_=self.variables<block_end><block_end># if threshold is None, we have the option to identify
# variables with NA only.
<else_stmt><block_start><if_stmt>self.missing_only<block_start><if_stmt><not>self.variables<block_start>self.variables_=[var<for>var X.columns<if>X[var].isnull().sum()<g>0]<block_end><else_stmt><block_start>self.variables_=[var<for>var self.variables<if>X[var].isnull().sum()<g>0]<block_end><block_end><else_stmt><block_start><if_stmt><not>self.variables<block_start>self.variables_=[var<for>var X.columns]<block_end><else_stmt><block_start>self.variables_=self.variables<block_end><block_end><block_end>self.n_features_in_=X.shape[1]<line_sep><return>self<block_end><def_stmt>transform self X:pd.DataFrame<arrow>pd.DataFrame<block_start>"""
Remove rows with missing data.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_new: pandas dataframe
The complete case dataframe for the selected variables, of shape
[n_samples - n_samples_with_na, n_features]
"""<line_sep>X=self._check_transform_input_and_state(X)<if_stmt>self.threshold<block_start>X.dropna(thresh=len(self.variables_)<times>self.threshold subset=self.variables_ axis=0 inplace=<true> )<block_end><else_stmt><block_start>X.dropna(axis=0 how="any" subset=self.variables_ inplace=<true>)<block_end><return>X<block_end><def_stmt>return_na_data self X:pd.DataFrame<arrow>pd.DataFrame<block_start>"""
Returns the subset of the dataframe with the rows with missing values. That is,
the subset of the dataframe that would be removed with the `transform()` method.
This method may be useful in production, for example if we want to store or log
the removed observations, that is, rows that will not be fed into the model.
Parameters
----------
X_na: pandas dataframe of shape = [n_samples_with_na, features]
The subset of the dataframe with the rows with missing data.
"""<line_sep>X=self._check_transform_input_and_state(X)<if_stmt>self.threshold<block_start>idx=pd.isnull(X[self.variables_]).mean(axis=1)<ge>self.threshold<line_sep>idx=idx[idx]<block_end><else_stmt><block_start>idx=pd.isnull(X[self.variables_]).any(1)<line_sep>idx=idx[idx]<block_end><return>X.loc[idx.index :]<block_end><block_end> |
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance documentation build configuration file
<import_stmt>os<import_stmt>sys<line_sep># If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0 os.path.abspath('../..'))<line_sep>sys.path.insert(0 os.path.abspath('../../bin'))<line_sep># -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions=['stevedore.sphinxext' 'sphinx.ext.viewcode' 'oslo_config.sphinxext' 'oslo_config.sphinxconfiggen' 'oslo_policy.sphinxpolicygen' 'openstackdocstheme' 'sphinxcontrib.apidoc' ]<line_sep># openstackdocstheme options
openstackdocs_repo_name='openstack/glance'<line_sep>openstackdocs_bug_project='glance'<line_sep>openstackdocs_bug_tag='documentation'<line_sep># sphinxcontrib.apidoc options
apidoc_module_dir='../../glance'<line_sep>apidoc_output_dir='contributor/api'<line_sep>apidoc_excluded_paths=['hacking/*' 'hacking' 'tests/*' 'tests' 'db/sqlalchemy/*' 'db/sqlalchemy']<line_sep>apidoc_separate_modules=<true><line_sep>config_generator_config_file=[('../../etc/oslo-config-generator/glance-api.conf' '_static/glance-api') ('../../etc/oslo-config-generator/glance-cache.conf' '_static/glance-cache') ('../../etc/oslo-config-generator/glance-manage.conf' '_static/glance-manage') ('../../etc/oslo-config-generator/glance-scrubber.conf' '_static/glance-scrubber') ]<line_sep>policy_generator_config_file=[('../../etc/glance-policy-generator.conf' '_static/glance') ]<line_sep># The master toctree document.
master_doc='index'<line_sep># General information about the project.
copyright=u'2010-present, OpenStack Foundation.'<line_sep>exclude_patterns=[# The man directory includes some snippet files that are included
# in other documents during the build but that should not be
# included in the toctree themselves, so tell Sphinx to ignore
# them when scanning for input files.
'cli/footer.txt' 'cli/general_options.txt' 'cli/openstack_options.txt' ]<line_sep># If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names=<true><line_sep># If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors=<true><line_sep># The name of the Pygments (syntax highlighting) style to use.
pygments_style='native'<line_sep># A list of ignored prefixes for module index sorting.
modindex_common_prefix=['glance.']<line_sep># -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages=[('cli/glanceapi' 'glance-api' u'Glance API Server' [u'OpenStack'] 1) ('cli/glancecachecleaner' 'glance-cache-cleaner' u'Glance Cache Cleaner' [u'OpenStack'] 1) ('cli/glancecachemanage' 'glance-cache-manage' u'Glance Cache Manager' [u'OpenStack'] 1) ('cli/glancecacheprefetcher' 'glance-cache-prefetcher' u'Glance Cache Pre-fetcher' [u'OpenStack'] 1) ('cli/glancecachepruner' 'glance-cache-pruner' u'Glance Cache Pruner' [u'OpenStack'] 1) ('cli/glancecontrol' 'glance-control' u'Glance Daemon Control Helper ' [u'OpenStack'] 1) ('cli/glancemanage' 'glance-manage' u'Glance Management Utility' [u'OpenStack'] 1) ('cli/glancereplicator' 'glance-replicator' u'Glance Replicator' [u'OpenStack'] 1) ('cli/glancescrubber' 'glance-scrubber' u'Glance Scrubber Service' [u'OpenStack'] 1)]<line_sep># -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme='openstackdocs'<line_sep># Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path=['_static']<line_sep># Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path=['_extra']<line_sep># If false, no module index is generated.
html_use_modindex=<true><line_sep># If false, no index is generated.
html_use_index=<true><line_sep># -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents=[('index' 'Glance.tex' u'Glance Documentation' u'Glance Team' 'manual') ]<line_sep> |
"""
这个是用来测试,以redis为中间件,随意关闭代码会不会造成任务丢失的。
"""<import_stmt>time<import_from_stmt>funboost boost BrokerEnum<line_sep>@boost('test_cost_long_time_fun_queue2' broker_kind=BrokerEnum.REDIS_ACK_ABLE concurrent_num=5)<def_stmt>cost_long_time_fun x<block_start>print(f'正在消费 {x} 中 。。。。')<line_sep>time.sleep(3)<line_sep>print(f'消费完成 {x} ')<block_end><if_stmt>__name__<eq>'__main__'<block_start>cost_long_time_fun.consume()<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>edc_t path<block_start>"""EPICA Dome C Ice Core 800KYr Temperature Estimates
Temperature record, using Deuterium as a proxy, from the EPICA (European
Project for Ice Coring in Antarctica) Dome C ice core covering 0 to 800
kyr BP.
A data frame with 5788 observations on the following 5 variables.
`Bag`
Bag number
`ztop`
Top depth (m)
`Age`
Years before 1950
`Deuterium`
Deuterium dD data
`dT`
Temperature difference from the average of the last 1000 years ~
-54.5degC
http://www.ncdc.noaa.gov/paleo/icecore/antarctica/domec/domec_epica_data.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `edc_t.csv`.
Returns:
Tuple of np.ndarray `x_train` with 5788 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='edc_t.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/DAAG/edcT.csv'<line_sep>maybe_download_and_extract(path url save_file_name='edc_t.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end> |
<def_stmt>test <block_start><import_stmt>spacy.tokens<import_stmt>spacy.lang.de<assert_stmt>isinstance(nlp spacy.lang.de.German) "El objeto nlp debería ser un instance de la clase de alemán."<assert_stmt>isinstance(doc spacy.tokens.Doc) "¿Procesaste el texto con el objeto nlp para crear un doc?"<assert_stmt>"print(doc.text)"<in>__solution__ "¿Imprimiste en pantalla el doc.text?"<line_sep>__msg__.good("Sehr gut! :)")<block_end> |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-20 05:23
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("jobs" "0013_add_job_id_field_in_submission_model")]<line_sep>operations=[migrations.RenameField(model_name="submission" old_name="job_id" new_name="job_name")]<block_end> |
<import_stmt>warnings<as>test_warnings<import_from_stmt>unittest.mock patch<import_stmt>pytest<import_from_stmt>rotkehlchen.accounting.structures.balance Balance<import_from_stmt>rotkehlchen.constants.assets A_AUD A_ETC A_ETH<import_from_stmt>rotkehlchen.errors.asset UnknownAsset<import_from_stmt>rotkehlchen.exchanges.data_structures Location Trade TradeType<import_from_stmt>rotkehlchen.exchanges.independentreserve IR_TO_WORLD Independentreserve independentreserve_asset <import_from_stmt>rotkehlchen.fval FVal<import_from_stmt>rotkehlchen.tests.utils.mock MockResponse<def_stmt>test_location <block_start>exchange=Independentreserve('independentreserve1' 'a' b'a' object() object())<assert_stmt>exchange.location<eq>Location.INDEPENDENTRESERVE<assert_stmt>exchange.name<eq>'independentreserve1'<block_end><def_stmt>test_assets_are_known <block_start>exchange=Independentreserve('independentreserve1' 'a' b'a' object() object())<line_sep>response=exchange._api_query('get' 'Public' 'GetValidPrimaryCurrencyCodes')<for_stmt>currency response<block_start><try_stmt><block_start>independentreserve_asset(currency)<block_end><except_stmt>UnknownAsset<block_start>test_warnings.warn(UserWarning(f'Found unknown primary asset {currency} in IndependentReserve. '<concat>f'Support for it has to be added' ))<block_end><block_end>response=exchange._api_query('get' 'Public' 'GetValidSecondaryCurrencyCodes')<for_stmt>currency response<block_start><try_stmt><block_start>independentreserve_asset(currency)<block_end><except_stmt>UnknownAsset<block_start>test_warnings.warn(UserWarning(f'Found unknown secondary asset {currency} in IndependentReserve. '<concat>f'Support for it has to be added' ))<block_end><block_end><block_end>@pytest.mark.parametrize('should_mock_current_price_queries' [<true>])<def_stmt>test_query_balances function_scope_independentreserve inquirer # pylint: disable=unused-argument
<block_start>"""Test all balances returned by IndependentReserve are proccessed properly"""<line_sep>exchange=function_scope_independentreserve<def_stmt>mock_api_return method url **kwargs# pylint: disable=unused-argument
<block_start><assert_stmt>method<eq>'post'<line_sep>response="""[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aud", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Nzd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Sgd", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xbt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eth", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xrp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ada", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dot", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Uni", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Link", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Usdc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bch", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Ltc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Mkr", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Dai", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Comp", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Snx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Grt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Eos", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Xlm", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Etc", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Bat", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Pmgt", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Yfi", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Aave", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Zrx", "TotalBalance": 150.55},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 150.55, "CurrencyCode": "Omg", "TotalBalance": 150.55}]"""<line_sep># noqa: E501
<return>MockResponse(200 response)<block_end><with_stmt>patch.object(exchange.session 'request' side_effect=mock_api_return)<block_start>balances,msg=exchange.query_balances()<block_end><assert_stmt>msg<eq>''<line_sep>assets_seen={0}<for_stmt>asset,balance balances.items()<block_start><assert_stmt>asset<in>IR_TO_WORLD.values()<assert_stmt>asset<not><in>assets_seen<line_sep>assets_seen.add(asset)<assert_stmt>balance.amount<eq>FVal('150.55')<block_end><block_end>@pytest.mark.parametrize('should_mock_current_price_queries' [<true>])<def_stmt>test_query_some_balances function_scope_independentreserve inquirer # pylint: disable=unused-argument
<block_start>"""Just like test_query_balances but make sure 0 balances are skipped"""<line_sep>exchange=function_scope_independentreserve<def_stmt>mock_api_return method url **kwargs# pylint: disable=unused-argument
<block_start><assert_stmt>method<eq>'post'<line_sep>response="""[{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 1.2, "CurrencyCode": "Aud", "TotalBalance": 2.5},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Nzd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Sgd", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xbt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eth", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xrp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ada", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dot", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Uni", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Link", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Usdc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bch", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Ltc", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Mkr", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Dai", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Comp", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Snx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Grt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Eos", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Xlm", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Etc", "TotalBalance": 100.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Bat", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Pmgt", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Yfi", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Aave", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Zrx", "TotalBalance": 0.0},
{"AccountGuid": "foo", "AccountStatus": "Active", "AvailableBalance": 0.0, "CurrencyCode": "Omg", "TotalBalance": 0.0}]"""<line_sep># noqa: E501
<return>MockResponse(200 response)<block_end><with_stmt>patch.object(exchange.session 'request' side_effect=mock_api_return)<block_start>balances,msg=exchange.query_balances()<block_end><assert_stmt>msg<eq>''<assert_stmt>balances<eq>{A_AUD:Balance(amount=FVal(2.5) usd_value=FVal(3.75)) A_ETC:Balance(amount=FVal(100) usd_value=FVal(150)) }<block_end><def_stmt>test_query_trade_history function_scope_independentreserve<block_start>"""Happy path test for independentreserve trade history querying"""<line_sep>exchange=function_scope_independentreserve<def_stmt>mock_api_return method url **kwargs# pylint: disable=unused-argument
<block_start><assert_stmt>method<eq>'post'<line_sep>response="""{"Data": [
{"AvgPrice": 603.7,
"CreatedTimestampUtc": "2017-11-22T22:54:40.3249401Z",
"FeePercent": 0.005,
"OrderGuid": "foo1",
"OrderType": "MarketOffer",
"Original": {"Outstanding": 0.0, "Volume": 0.5, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 301.85,
"Volume": 0.5
}, {
"AvgPrice": 257.25,
"CreatedTimestampUtc": "2017-07-28T09:39:19.8799244Z",
"FeePercent": 0.005,
"OrderGuid": "foo2",
"OrderType": "MarketBid",
"Original": {"Outstanding": 0.0, "Volume": 2.64117379, "VolumeCurrencyType": "Primary"},
"Outstanding": 0.0,
"Price": null,
"PrimaryCurrencyCode": "Eth",
"SecondaryCurrencyCode": "Aud",
"Status": "Filled",
"Value": 679.44,
"Volume": 2.64117379
}],
"PageSize": 50,
"TotalItems": 2,
"TotalPages": 1}
"""<line_sep># noqa: E501
<return>MockResponse(200 response)<block_end><with_stmt>patch.object(exchange.session 'request' side_effect=mock_api_return)<block_start>trades=exchange.query_trade_history(start_ts=0 end_ts=1565732120 only_cache=<false> )<block_end>expected_trades=[Trade(timestamp=1501234760 location=Location.INDEPENDENTRESERVE base_asset=A_ETH quote_asset=A_AUD trade_type=TradeType.BUY amount=FVal('2.64117379') rate=FVal('257.25') fee=FVal('0.01320586895') fee_currency=A_ETH link='foo2' ) Trade(timestamp=1511391280 location=Location.INDEPENDENTRESERVE base_asset=A_ETH quote_asset=A_AUD trade_type=TradeType.SELL amount=FVal('0.5') rate=FVal('603.7') fee=FVal('0.0025') fee_currency=A_ETH link='foo1' )]<assert_stmt>trades<eq>expected_trades[::-1]<block_end># TODO: Make a test for asset movements.
# Would need more mocking as it would require mocking of multiple calls
|
<import_stmt>pytz<import_from_stmt>ctpbee_api.ctp_mini *<import_from_stmt>ctpbee.constant *<line_sep>STATUS_MINI2VT={THOST_FTDC_OAS_Submitted:Status.SUBMITTING THOST_FTDC_OAS_Accepted:Status.SUBMITTING THOST_FTDC_OAS_Rejected:Status.REJECTED THOST_FTDC_OST_NoTradeQueueing:Status.NOTTRADED THOST_FTDC_OST_PartTradedQueueing:Status.PARTTRADED THOST_FTDC_OST_AllTraded:Status.ALLTRADED THOST_FTDC_OST_Canceled:Status.CANCELLED}<line_sep>DIRECTION_VT2MINI={Direction.LONG:THOST_FTDC_D_Buy Direction.SHORT:THOST_FTDC_D_Sell}<line_sep>DIRECTION_MINI2VT={v:k<for>k,v DIRECTION_VT2MINI.items()}<line_sep>DIRECTION_MINI2VT[THOST_FTDC_PD_Long]=Direction.LONG<line_sep>DIRECTION_MINI2VT[THOST_FTDC_PD_Short]=Direction.SHORT<line_sep>ORDERTYPE_VT2MINI={OrderType.LIMIT:THOST_FTDC_OPT_LimitPrice OrderType.MARKET:THOST_FTDC_OPT_AnyPrice}<line_sep>ORDERTYPE_MINI2VT={v:k<for>k,v ORDERTYPE_VT2MINI.items()}<line_sep>OFFSET_VT2MINI={Offset.OPEN:THOST_FTDC_OF_Open Offset.CLOSE:THOST_FTDC_OFEN_Close Offset.CLOSETODAY:THOST_FTDC_OFEN_CloseToday Offset.CLOSEYESTERDAY:THOST_FTDC_OFEN_CloseYesterday }<line_sep>OFFSET_MINI2VT={v:k<for>k,v OFFSET_VT2MINI.items()}<line_sep>EXCHANGE_MINI2VT={"CFFEX":Exchange.CFFEX "SHFE":Exchange.SHFE "CZCE":Exchange.CZCE "DCE":Exchange.DCE "INE":Exchange.INE}<line_sep>PRODUCT_MINI2VT={THOST_FTDC_PC_Futures:Product.FUTURES THOST_FTDC_PC_Options:Product.OPTION THOST_FTDC_PC_Combination:Product.SPREAD}<line_sep>OPTIONTYPE_MINI2VT={THOST_FTDC_CP_CallOptions:OptionType.CALL THOST_FTDC_CP_PutOptions:OptionType.PUT}<line_sep>CHINA_TZ=pytz.timezone("Asia/Shanghai")<line_sep>symbol_exchange_map={}<line_sep>symbol_name_map={}<line_sep>symbol_size_map={}<line_sep> |
<import_stmt>unittest<import_stmt>update_logs<class_stmt>UpdateLogsTest(unittest.TestCase)<block_start><def_stmt>test_get_new_logs_with_more_next_logs self<block_start>self.assertEqual("56789" update_logs.get_new_logs(prev_logs="01234" next_logs="0123456789"))<block_end><def_stmt>test_get_new_logs_with_more_prev_logs self<block_start>self.assertEqual("" update_logs.get_new_logs(prev_logs="0123456789" next_logs="01234"))<block_end><def_stmt>test_get_new_logs_with_no_common_logs self<block_start>self.assertEqual("56789" update_logs.get_new_logs(prev_logs="01234" next_logs="56789"))<block_end><def_stmt>test_get_new_logs_with_no_prev_logs self<block_start>self.assertEqual("0123456789" update_logs.get_new_logs(prev_logs="" next_logs="0123456789"))<block_end><def_stmt>test_get_new_logs_with_no_next_logs self<block_start>self.assertEqual("" update_logs.get_new_logs(prev_logs="01234" next_logs=""))<block_end><block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<import_from_stmt>. outputs<import_from_stmt>._inputs *<line_sep>__all__=['RegionInstanceGroupManagerArgs' 'RegionInstanceGroupManager']<line_sep>@pulumi.input_type<class_stmt>RegionInstanceGroupManagerArgs<block_start><def_stmt>__init__ __self__ * base_instance_name:pulumi.Input[str] versions:pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] auto_healing_policies:Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]=<none> description:Optional[pulumi.Input[str]]=<none> distribution_policy_target_shape:Optional[pulumi.Input[str]]=<none> distribution_policy_zones:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> name:Optional[pulumi.Input[str]]=<none> named_ports:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]=<none> project:Optional[pulumi.Input[str]]=<none> region:Optional[pulumi.Input[str]]=<none> stateful_disks:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]=<none> target_pools:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> target_size:Optional[pulumi.Input[int]]=<none> update_policy:Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]=<none> wait_for_instances:Optional[pulumi.Input[bool]]=<none> wait_for_instances_status:Optional[pulumi.Input[str]]=<none><block_start>"""
The set of arguments for constructing a RegionInstanceGroupManager resource.
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep>pulumi.set(__self__ "base_instance_name" base_instance_name)<line_sep>pulumi.set(__self__ "versions" versions)<if_stmt>auto_healing_policies<is><not><none><block_start>pulumi.set(__self__ "auto_healing_policies" auto_healing_policies)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>distribution_policy_target_shape<is><not><none><block_start>pulumi.set(__self__ "distribution_policy_target_shape" distribution_policy_target_shape)<block_end><if_stmt>distribution_policy_zones<is><not><none><block_start>pulumi.set(__self__ "distribution_policy_zones" distribution_policy_zones)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>named_ports<is><not><none><block_start>pulumi.set(__self__ "named_ports" named_ports)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>region<is><not><none><block_start>pulumi.set(__self__ "region" region)<block_end><if_stmt>stateful_disks<is><not><none><block_start>pulumi.set(__self__ "stateful_disks" stateful_disks)<block_end><if_stmt>target_pools<is><not><none><block_start>pulumi.set(__self__ "target_pools" target_pools)<block_end><if_stmt>target_size<is><not><none><block_start>pulumi.set(__self__ "target_size" target_size)<block_end><if_stmt>update_policy<is><not><none><block_start>pulumi.set(__self__ "update_policy" update_policy)<block_end><if_stmt>wait_for_instances<is><not><none><block_start>pulumi.set(__self__ "wait_for_instances" wait_for_instances)<block_end><if_stmt>wait_for_instances_status<is><not><none><block_start>pulumi.set(__self__ "wait_for_instances_status" wait_for_instances_status)<block_end><block_end>@property@pulumi.getter(name="baseInstanceName")<def_stmt>base_instance_name self<arrow>pulumi.Input[str]<block_start>"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""<line_sep><return>pulumi.get(self "base_instance_name")<block_end>@base_instance_name.setter<def_stmt>base_instance_name self value:pulumi.Input[str]<block_start>pulumi.set(self "base_instance_name" value)<block_end>@property@pulumi.getter<def_stmt>versions self<arrow>pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]<block_start>"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""<line_sep><return>pulumi.get(self "versions")<block_end>@versions.setter<def_stmt>versions self value:pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]<block_start>pulumi.set(self "versions" value)<block_end>@property@pulumi.getter(name="autoHealingPolicies")<def_stmt>auto_healing_policies self<arrow>Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]<block_start>"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""<line_sep><return>pulumi.get(self "auto_healing_policies")<block_end>@auto_healing_policies.setter<def_stmt>auto_healing_policies self value:Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]<block_start>pulumi.set(self "auto_healing_policies" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>"""
An optional textual description of the instance
group manager.
"""<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter(name="distributionPolicyTargetShape")<def_stmt>distribution_policy_target_shape self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""<line_sep><return>pulumi.get(self "distribution_policy_target_shape")<block_end>@distribution_policy_target_shape.setter<def_stmt>distribution_policy_target_shape self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "distribution_policy_target_shape" value)<block_end>@property@pulumi.getter(name="distributionPolicyZones")<def_stmt>distribution_policy_zones self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""<line_sep><return>pulumi.get(self "distribution_policy_zones")<block_end>@distribution_policy_zones.setter<def_stmt>distribution_policy_zones self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "distribution_policy_zones" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
- Version name.
"""<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter(name="namedPorts")<def_stmt>named_ports self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]<block_start>"""
The named port configuration. See the section below
for details on configuration.
"""<line_sep><return>pulumi.get(self "named_ports")<block_end>@named_ports.setter<def_stmt>named_ports self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]<block_start>pulumi.set(self "named_ports" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@property@pulumi.getter<def_stmt>region self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""<line_sep><return>pulumi.get(self "region")<block_end>@region.setter<def_stmt>region self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "region" value)<block_end>@property@pulumi.getter(name="statefulDisks")<def_stmt>stateful_disks self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]<block_start>"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""<line_sep><return>pulumi.get(self "stateful_disks")<block_end>@stateful_disks.setter<def_stmt>stateful_disks self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]<block_start>pulumi.set(self "stateful_disks" value)<block_end>@property@pulumi.getter(name="targetPools")<def_stmt>target_pools self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""<line_sep><return>pulumi.get(self "target_pools")<block_end>@target_pools.setter<def_stmt>target_pools self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "target_pools" value)<block_end>@property@pulumi.getter(name="targetSize")<def_stmt>target_size self<arrow>Optional[pulumi.Input[int]]<block_start>"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""<line_sep><return>pulumi.get(self "target_size")<block_end>@target_size.setter<def_stmt>target_size self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "target_size" value)<block_end>@property@pulumi.getter(name="updatePolicy")<def_stmt>update_policy self<arrow>Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]<block_start>"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""<line_sep><return>pulumi.get(self "update_policy")<block_end>@update_policy.setter<def_stmt>update_policy self value:Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]<block_start>pulumi.set(self "update_policy" value)<block_end>@property@pulumi.getter(name="waitForInstances")<def_stmt>wait_for_instances self<arrow>Optional[pulumi.Input[bool]]<block_start>"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""<line_sep><return>pulumi.get(self "wait_for_instances")<block_end>@wait_for_instances.setter<def_stmt>wait_for_instances self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "wait_for_instances" value)<block_end>@property@pulumi.getter(name="waitForInstancesStatus")<def_stmt>wait_for_instances_status self<arrow>Optional[pulumi.Input[str]]<block_start>"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep><return>pulumi.get(self "wait_for_instances_status")<block_end>@wait_for_instances_status.setter<def_stmt>wait_for_instances_status self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "wait_for_instances_status" value)<block_end><block_end>@pulumi.input_type<class_stmt>_RegionInstanceGroupManagerState<block_start><def_stmt>__init__ __self__ * auto_healing_policies:Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]=<none> base_instance_name:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> distribution_policy_target_shape:Optional[pulumi.Input[str]]=<none> distribution_policy_zones:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> fingerprint:Optional[pulumi.Input[str]]=<none> instance_group:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> named_ports:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]=<none> project:Optional[pulumi.Input[str]]=<none> region:Optional[pulumi.Input[str]]=<none> self_link:Optional[pulumi.Input[str]]=<none> stateful_disks:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]=<none> statuses:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]=<none> target_pools:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> target_size:Optional[pulumi.Input[int]]=<none> update_policy:Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]=<none> versions:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]=<none> wait_for_instances:Optional[pulumi.Input[bool]]=<none> wait_for_instances_status:Optional[pulumi.Input[str]]=<none><block_start>"""
Input properties used for looking up and filtering RegionInstanceGroupManager resources.
:param pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs'] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs'] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<if_stmt>auto_healing_policies<is><not><none><block_start>pulumi.set(__self__ "auto_healing_policies" auto_healing_policies)<block_end><if_stmt>base_instance_name<is><not><none><block_start>pulumi.set(__self__ "base_instance_name" base_instance_name)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>distribution_policy_target_shape<is><not><none><block_start>pulumi.set(__self__ "distribution_policy_target_shape" distribution_policy_target_shape)<block_end><if_stmt>distribution_policy_zones<is><not><none><block_start>pulumi.set(__self__ "distribution_policy_zones" distribution_policy_zones)<block_end><if_stmt>fingerprint<is><not><none><block_start>pulumi.set(__self__ "fingerprint" fingerprint)<block_end><if_stmt>instance_group<is><not><none><block_start>pulumi.set(__self__ "instance_group" instance_group)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>named_ports<is><not><none><block_start>pulumi.set(__self__ "named_ports" named_ports)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>region<is><not><none><block_start>pulumi.set(__self__ "region" region)<block_end><if_stmt>self_link<is><not><none><block_start>pulumi.set(__self__ "self_link" self_link)<block_end><if_stmt>stateful_disks<is><not><none><block_start>pulumi.set(__self__ "stateful_disks" stateful_disks)<block_end><if_stmt>statuses<is><not><none><block_start>pulumi.set(__self__ "statuses" statuses)<block_end><if_stmt>target_pools<is><not><none><block_start>pulumi.set(__self__ "target_pools" target_pools)<block_end><if_stmt>target_size<is><not><none><block_start>pulumi.set(__self__ "target_size" target_size)<block_end><if_stmt>update_policy<is><not><none><block_start>pulumi.set(__self__ "update_policy" update_policy)<block_end><if_stmt>versions<is><not><none><block_start>pulumi.set(__self__ "versions" versions)<block_end><if_stmt>wait_for_instances<is><not><none><block_start>pulumi.set(__self__ "wait_for_instances" wait_for_instances)<block_end><if_stmt>wait_for_instances_status<is><not><none><block_start>pulumi.set(__self__ "wait_for_instances_status" wait_for_instances_status)<block_end><block_end>@property@pulumi.getter(name="autoHealingPolicies")<def_stmt>auto_healing_policies self<arrow>Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]<block_start>"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""<line_sep><return>pulumi.get(self "auto_healing_policies")<block_end>@auto_healing_policies.setter<def_stmt>auto_healing_policies self value:Optional[pulumi.Input['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]<block_start>pulumi.set(self "auto_healing_policies" value)<block_end>@property@pulumi.getter(name="baseInstanceName")<def_stmt>base_instance_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""<line_sep><return>pulumi.get(self "base_instance_name")<block_end>@base_instance_name.setter<def_stmt>base_instance_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "base_instance_name" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>"""
An optional textual description of the instance
group manager.
"""<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter(name="distributionPolicyTargetShape")<def_stmt>distribution_policy_target_shape self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""<line_sep><return>pulumi.get(self "distribution_policy_target_shape")<block_end>@distribution_policy_target_shape.setter<def_stmt>distribution_policy_target_shape self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "distribution_policy_target_shape" value)<block_end>@property@pulumi.getter(name="distributionPolicyZones")<def_stmt>distribution_policy_zones self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""<line_sep><return>pulumi.get(self "distribution_policy_zones")<block_end>@distribution_policy_zones.setter<def_stmt>distribution_policy_zones self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "distribution_policy_zones" value)<block_end>@property@pulumi.getter<def_stmt>fingerprint self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The fingerprint of the instance group manager.
"""<line_sep><return>pulumi.get(self "fingerprint")<block_end>@fingerprint.setter<def_stmt>fingerprint self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "fingerprint" value)<block_end>@property@pulumi.getter(name="instanceGroup")<def_stmt>instance_group self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The full URL of the instance group created by the manager.
"""<line_sep><return>pulumi.get(self "instance_group")<block_end>@instance_group.setter<def_stmt>instance_group self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "instance_group" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
- Version name.
"""<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter(name="namedPorts")<def_stmt>named_ports self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]<block_start>"""
The named port configuration. See the section below
for details on configuration.
"""<line_sep><return>pulumi.get(self "named_ports")<block_end>@named_ports.setter<def_stmt>named_ports self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerNamedPortArgs']]]]<block_start>pulumi.set(self "named_ports" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@property@pulumi.getter<def_stmt>region self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""<line_sep><return>pulumi.get(self "region")<block_end>@region.setter<def_stmt>region self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "region" value)<block_end>@property@pulumi.getter(name="selfLink")<def_stmt>self_link self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The URL of the created resource.
"""<line_sep><return>pulumi.get(self "self_link")<block_end>@self_link.setter<def_stmt>self_link self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "self_link" value)<block_end>@property@pulumi.getter(name="statefulDisks")<def_stmt>stateful_disks self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]<block_start>"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""<line_sep><return>pulumi.get(self "stateful_disks")<block_end>@stateful_disks.setter<def_stmt>stateful_disks self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]<block_start>pulumi.set(self "stateful_disks" value)<block_end>@property@pulumi.getter<def_stmt>statuses self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]<block_start>"""
The status of this managed instance group.
"""<line_sep><return>pulumi.get(self "statuses")<block_end>@statuses.setter<def_stmt>statuses self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]<block_start>pulumi.set(self "statuses" value)<block_end>@property@pulumi.getter(name="targetPools")<def_stmt>target_pools self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""<line_sep><return>pulumi.get(self "target_pools")<block_end>@target_pools.setter<def_stmt>target_pools self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "target_pools" value)<block_end>@property@pulumi.getter(name="targetSize")<def_stmt>target_size self<arrow>Optional[pulumi.Input[int]]<block_start>"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""<line_sep><return>pulumi.get(self "target_size")<block_end>@target_size.setter<def_stmt>target_size self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "target_size" value)<block_end>@property@pulumi.getter(name="updatePolicy")<def_stmt>update_policy self<arrow>Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]<block_start>"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""<line_sep><return>pulumi.get(self "update_policy")<block_end>@update_policy.setter<def_stmt>update_policy self value:Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]<block_start>pulumi.set(self "update_policy" value)<block_end>@property@pulumi.getter<def_stmt>versions self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]<block_start>"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""<line_sep><return>pulumi.get(self "versions")<block_end>@versions.setter<def_stmt>versions self value:Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]<block_start>pulumi.set(self "versions" value)<block_end>@property@pulumi.getter(name="waitForInstances")<def_stmt>wait_for_instances self<arrow>Optional[pulumi.Input[bool]]<block_start>"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""<line_sep><return>pulumi.get(self "wait_for_instances")<block_end>@wait_for_instances.setter<def_stmt>wait_for_instances self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "wait_for_instances" value)<block_end>@property@pulumi.getter(name="waitForInstancesStatus")<def_stmt>wait_for_instances_status self<arrow>Optional[pulumi.Input[str]]<block_start>"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep><return>pulumi.get(self "wait_for_instances_status")<block_end>@wait_for_instances_status.setter<def_stmt>wait_for_instances_status self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "wait_for_instances_status" value)<block_end><block_end><class_stmt>RegionInstanceGroupManager(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> auto_healing_policies:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]]=<none> base_instance_name:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> distribution_policy_target_shape:Optional[pulumi.Input[str]]=<none> distribution_policy_zones:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> name:Optional[pulumi.Input[str]]=<none> named_ports:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]]=<none> project:Optional[pulumi.Input[str]]=<none> region:Optional[pulumi.Input[str]]=<none> stateful_disks:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]]=<none> target_pools:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> target_size:Optional[pulumi.Input[int]]=<none> update_policy:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]]=<none> versions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]]=<none> wait_for_instances:Optional[pulumi.Input[bool]]=<none> wait_for_instances_status:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:RegionInstanceGroupManagerArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template.
To get more information about regionInstanceGroupManagers, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
* How-to Guides
* [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
> **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
## Example Usage
### With Top Level Instance Template (`Google` Provider)
```python
import pulumi
import pulumi_gcp as gcp
autohealing = gcp.compute.HealthCheck("autohealing",
check_interval_sec=5,
timeout_sec=5,
healthy_threshold=2,
unhealthy_threshold=10,
http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
request_path="/healthz",
port=8080,
))
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
distribution_policy_zones=[
"us-central1-a",
"us-central1-f",
],
versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
)],
target_pools=[google_compute_target_pool["appserver"]["id"]],
target_size=2,
named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
name="custom",
port=8888,
)],
auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
health_check=autohealing.id,
initial_delay_sec=300,
))
```
### With Multiple Versions
```python
import pulumi
import pulumi_gcp as gcp
appserver = gcp.compute.RegionInstanceGroupManager("appserver",
base_instance_name="app",
region="us-central1",
target_size=5,
versions=[
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver"]["id"],
),
gcp.compute.RegionInstanceGroupManagerVersionArgs(
instance_template=google_compute_instance_template["appserver-canary"]["id"],
target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
fixed=1,
),
),
])
```
## Import
Instance group managers can be imported using the `name`, e.g.
```sh
$ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
```
:param str resource_name: The name of the resource.
:param RegionInstanceGroupManagerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(RegionInstanceGroupManagerArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> auto_healing_policies:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]]=<none> base_instance_name:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> distribution_policy_target_shape:Optional[pulumi.Input[str]]=<none> distribution_policy_zones:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> name:Optional[pulumi.Input[str]]=<none> named_ports:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]]=<none> project:Optional[pulumi.Input[str]]=<none> region:Optional[pulumi.Input[str]]=<none> stateful_disks:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]]=<none> target_pools:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> target_size:Optional[pulumi.Input[int]]=<none> update_policy:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]]=<none> versions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]]=<none> wait_for_instances:Optional[pulumi.Input[bool]]=<none> wait_for_instances_status:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=RegionInstanceGroupManagerArgs.__new__(RegionInstanceGroupManagerArgs)<line_sep>__props__.__dict__["auto_healing_policies"]=auto_healing_policies<if_stmt>base_instance_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'base_instance_name'")<block_end>__props__.__dict__["base_instance_name"]=base_instance_name<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["distribution_policy_target_shape"]=distribution_policy_target_shape<line_sep>__props__.__dict__["distribution_policy_zones"]=distribution_policy_zones<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["named_ports"]=named_ports<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["region"]=region<line_sep>__props__.__dict__["stateful_disks"]=stateful_disks<line_sep>__props__.__dict__["target_pools"]=target_pools<line_sep>__props__.__dict__["target_size"]=target_size<line_sep>__props__.__dict__["update_policy"]=update_policy<if_stmt>versions<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'versions'")<block_end>__props__.__dict__["versions"]=versions<line_sep>__props__.__dict__["wait_for_instances"]=wait_for_instances<line_sep>__props__.__dict__["wait_for_instances_status"]=wait_for_instances_status<line_sep>__props__.__dict__["fingerprint"]=<none><line_sep>__props__.__dict__["instance_group"]=<none><line_sep>__props__.__dict__["self_link"]=<none><line_sep>__props__.__dict__["statuses"]=<none><block_end>super(RegionInstanceGroupManager __self__).__init__('gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> auto_healing_policies:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]]=<none> base_instance_name:Optional[pulumi.Input[str]]=<none> description:Optional[pulumi.Input[str]]=<none> distribution_policy_target_shape:Optional[pulumi.Input[str]]=<none> distribution_policy_zones:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> fingerprint:Optional[pulumi.Input[str]]=<none> instance_group:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> named_ports:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]]=<none> project:Optional[pulumi.Input[str]]=<none> region:Optional[pulumi.Input[str]]=<none> self_link:Optional[pulumi.Input[str]]=<none> stateful_disks:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]]=<none> statuses:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]]]=<none> target_pools:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> target_size:Optional[pulumi.Input[int]]=<none> update_policy:Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]]=<none> versions:Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]]=<none> wait_for_instances:Optional[pulumi.Input[bool]]=<none> wait_for_instances_status:Optional[pulumi.Input[str]]=<none><arrow>'RegionInstanceGroupManager'<block_start>"""
Get an existing RegionInstanceGroupManager resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
:param pulumi.Input[str] base_instance_name: The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
:param pulumi.Input[str] description: An optional textual description of the instance
group manager.
:param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
:param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
:param pulumi.Input[str] fingerprint: The fingerprint of the instance group manager.
:param pulumi.Input[str] instance_group: The full URL of the instance group created by the manager.
:param pulumi.Input[str] name: - Version name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
for details on configuration.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
:param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
:param pulumi.Input[str] self_link: The URL of the created resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatusArgs']]]] statuses: The status of this managed instance group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
:param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
:param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
:param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
:param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_RegionInstanceGroupManagerState.__new__(_RegionInstanceGroupManagerState)<line_sep>__props__.__dict__["auto_healing_policies"]=auto_healing_policies<line_sep>__props__.__dict__["base_instance_name"]=base_instance_name<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["distribution_policy_target_shape"]=distribution_policy_target_shape<line_sep>__props__.__dict__["distribution_policy_zones"]=distribution_policy_zones<line_sep>__props__.__dict__["fingerprint"]=fingerprint<line_sep>__props__.__dict__["instance_group"]=instance_group<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["named_ports"]=named_ports<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["region"]=region<line_sep>__props__.__dict__["self_link"]=self_link<line_sep>__props__.__dict__["stateful_disks"]=stateful_disks<line_sep>__props__.__dict__["statuses"]=statuses<line_sep>__props__.__dict__["target_pools"]=target_pools<line_sep>__props__.__dict__["target_size"]=target_size<line_sep>__props__.__dict__["update_policy"]=update_policy<line_sep>__props__.__dict__["versions"]=versions<line_sep>__props__.__dict__["wait_for_instances"]=wait_for_instances<line_sep>__props__.__dict__["wait_for_instances_status"]=wait_for_instances_status<line_sep><return>RegionInstanceGroupManager(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter(name="autoHealingPolicies")<def_stmt>auto_healing_policies self<arrow>pulumi.Output[Optional['outputs.RegionInstanceGroupManagerAutoHealingPolicies']]<block_start>"""
The autohealing policies for this managed instance
group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
"""<line_sep><return>pulumi.get(self "auto_healing_policies")<block_end>@property@pulumi.getter(name="baseInstanceName")<def_stmt>base_instance_name self<arrow>pulumi.Output[str]<block_start>"""
The base instance name to use for
instances in this group. The value must be a valid
[RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
are lowercase letters, numbers, and hyphens (-). Instances are named by
appending a hyphen and a random four-character string to the base instance
name.
"""<line_sep><return>pulumi.get(self "base_instance_name")<block_end>@property@pulumi.getter<def_stmt>description self<arrow>pulumi.Output[Optional[str]]<block_start>"""
An optional textual description of the instance
group manager.
"""<line_sep><return>pulumi.get(self "description")<block_end>@property@pulumi.getter(name="distributionPolicyTargetShape")<def_stmt>distribution_policy_target_shape self<arrow>pulumi.Output[str]<block_start>"""
The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
"""<line_sep><return>pulumi.get(self "distribution_policy_target_shape")<block_end>@property@pulumi.getter(name="distributionPolicyZones")<def_stmt>distribution_policy_zones self<arrow>pulumi.Output[Sequence[str]]<block_start>"""
The distribution policy for this managed instance
group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
"""<line_sep><return>pulumi.get(self "distribution_policy_zones")<block_end>@property@pulumi.getter<def_stmt>fingerprint self<arrow>pulumi.Output[str]<block_start>"""
The fingerprint of the instance group manager.
"""<line_sep><return>pulumi.get(self "fingerprint")<block_end>@property@pulumi.getter(name="instanceGroup")<def_stmt>instance_group self<arrow>pulumi.Output[str]<block_start>"""
The full URL of the instance group created by the manager.
"""<line_sep><return>pulumi.get(self "instance_group")<block_end>@property@pulumi.getter<def_stmt>name self<arrow>pulumi.Output[str]<block_start>"""
- Version name.
"""<line_sep><return>pulumi.get(self "name")<block_end>@property@pulumi.getter(name="namedPorts")<def_stmt>named_ports self<arrow>pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerNamedPort']]]<block_start>"""
The named port configuration. See the section below
for details on configuration.
"""<line_sep><return>pulumi.get(self "named_ports")<block_end>@property@pulumi.getter<def_stmt>project self<arrow>pulumi.Output[str]<block_start>"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""<line_sep><return>pulumi.get(self "project")<block_end>@property@pulumi.getter<def_stmt>region self<arrow>pulumi.Output[str]<block_start>"""
The region where the managed instance group resides. If not provided, the provider region is used.
"""<line_sep><return>pulumi.get(self "region")<block_end>@property@pulumi.getter(name="selfLink")<def_stmt>self_link self<arrow>pulumi.Output[str]<block_start>"""
The URL of the created resource.
"""<line_sep><return>pulumi.get(self "self_link")<block_end>@property@pulumi.getter(name="statefulDisks")<def_stmt>stateful_disks self<arrow>pulumi.Output[Optional[Sequence['outputs.RegionInstanceGroupManagerStatefulDisk']]]<block_start>"""
Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
"""<line_sep><return>pulumi.get(self "stateful_disks")<block_end>@property@pulumi.getter<def_stmt>statuses self<arrow>pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerStatus']]<block_start>"""
The status of this managed instance group.
"""<line_sep><return>pulumi.get(self "statuses")<block_end>@property@pulumi.getter(name="targetPools")<def_stmt>target_pools self<arrow>pulumi.Output[Optional[Sequence[str]]]<block_start>"""
The full URL of all target pools to which new
instances in the group are added. Updating the target pools attribute does
not affect existing instances.
"""<line_sep><return>pulumi.get(self "target_pools")<block_end>@property@pulumi.getter(name="targetSize")<def_stmt>target_size self<arrow>pulumi.Output[int]<block_start>"""
- The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
"""<line_sep><return>pulumi.get(self "target_size")<block_end>@property@pulumi.getter(name="updatePolicy")<def_stmt>update_policy self<arrow>pulumi.Output['outputs.RegionInstanceGroupManagerUpdatePolicy']<block_start>"""
The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
"""<line_sep><return>pulumi.get(self "update_policy")<block_end>@property@pulumi.getter<def_stmt>versions self<arrow>pulumi.Output[Sequence['outputs.RegionInstanceGroupManagerVersion']]<block_start>"""
Application versions managed by this instance group. Each
version deals with a specific instance template, allowing canary release scenarios.
Structure is documented below.
"""<line_sep><return>pulumi.get(self "versions")<block_end>@property@pulumi.getter(name="waitForInstances")<def_stmt>wait_for_instances self<arrow>pulumi.Output[Optional[bool]]<block_start>"""
Whether to wait for all instances to be created/updated before
returning. Note that if this is set to true and the operation does not succeed, the provider will
continue trying until it times out.
"""<line_sep><return>pulumi.get(self "wait_for_instances")<block_end>@property@pulumi.getter(name="waitForInstancesStatus")<def_stmt>wait_for_instances_status self<arrow>pulumi.Output[Optional[str]]<block_start>"""
When used with `wait_for_instances` it specifies the status to wait for.
When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
"""<line_sep><return>pulumi.get(self "wait_for_instances_status")<block_end><block_end> |
<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.examples.tutorials.mnist input_data<def_stmt>weight_variable shape<block_start>initial=tf.truncated_normal(shape stddev=0.1)<line_sep><return>tf.Variable(initial)<block_end><def_stmt>bias_variable shape<block_start>initial=tf.constant(0.1 shape=shape)<line_sep><return>tf.Variable(initial)<block_end><def_stmt>conv2d x W<block_start><return>tf.nn.conv2d(x W strides=[1 1 1 1] padding='SAME')<block_end><def_stmt>max_pool_2x2 x<block_start><return>tf.nn.max_pool(x ksize=[1 2 2 1] strides=[1 2 2 1] padding='SAME')<block_end><def_stmt>train_and_save_model filename<block_start>mnist=input_data.read_data_sets("MNIST_data/" one_hot=<true>)<line_sep># Placeholders:
x=tf.placeholder(tf.float32 [<none> 784])<line_sep>y_=tf.placeholder(tf.float32 [<none> 10])<line_sep># Model Parameters
W_conv1=weight_variable([5 5 1 32])<line_sep>b_conv1=bias_variable([32])<line_sep>x_image=tf.reshape(x [-1 28 28 1])# x is a [picture_ct, 28*28], so x_image is [picture_ct, 28, 28, 1]
h_conv1=tf.nn.relu(conv2d(x_image W_conv1)+b_conv1)<line_sep>h_pool1=max_pool_2x2(h_conv1)<line_sep>W_conv2=weight_variable([5 5 32 64])<line_sep>b_conv2=bias_variable([64])<line_sep>h_conv2=tf.nn.relu(conv2d(h_pool1 W_conv2)+b_conv2)<line_sep>h_pool2=max_pool_2x2(h_conv2)<line_sep>W_fc1=weight_variable([7<times>7<times>64 1024])<line_sep>b_fc1=bias_variable([1024])<line_sep>h_pool2_flat=tf.reshape(h_pool2 [-1 7<times>7<times>64])<line_sep>h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat W_fc1)+b_fc1)<line_sep>keep_prob=tf.placeholder_with_default(1.0 ())<line_sep>h_fc1_drop=tf.nn.dropout(h_fc1 keep_prob)<line_sep>W_fc2=weight_variable([1024 10])<line_sep>b_fc2=bias_variable([10])<line_sep>y_conv=tf.matmul(h_fc1_drop W_fc2)+b_fc2<line_sep>correct_prediction=tf.equal(tf.argmax(y_conv 1) tf.argmax(y_ 1))<line_sep>correct_count=tf.count_nonzero(correct_prediction)<line_sep>accuracy=tf.reduce_mean(tf.cast(correct_prediction tf.float32))<line_sep># Set up training criterion
cross_entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv labels=y_))<line_sep>train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)<line_sep># Initializer step
init_op=tf.global_variables_initializer()# must be after adamoptimizer, since that creates more vars
# Configure saver
saver=tf.train.Saver()<line_sep>tf.add_to_collection('mnist' x)<line_sep>tf.add_to_collection('mnist' y_)<line_sep>tf.add_to_collection('mnist' keep_prob)<line_sep>tf.add_to_collection('mnist' y_conv)<line_sep>tf.add_to_collection('mnist' correct_count)<line_sep>tf.add_to_collection('mnist' cross_entropy)<line_sep># Train the model
<with_stmt>tf.Session()<as>sess<block_start>sess.run(init_op)<for_stmt>i range(20000)<block_start>batch=mnist.train.next_batch(50)<if_stmt>i%100<eq>0<block_start>train_accuracy=accuracy.eval(feed_dict={x:batch[0] y_:batch[1] keep_prob:1.0})<line_sep>print("Step {}: Training accuracy {}".format(i train_accuracy))<block_end>sess.run(train_step feed_dict={x:batch[0] y_:batch[1] keep_prob:0.5})<block_end>save_path=saver.save(sess filename)<line_sep>print('Model saved to: {}'.format(filename))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>train_and_save_model('./mnist-model')<block_end> |
'''
.. module:: eosfactory.core.vscode
:platform: Unix, Darwin
:synopsis: Default configuration items of a contract project.
.. moduleauthor:: Tokenika
'''<import_stmt>json<import_stmt>argparse<import_stmt>eosfactory.core.config<as>config<line_sep>INCLUDE_PATH="includePath"<line_sep>LIBS="libs"<line_sep>CODE_OPTIONS="codeOptions"<line_sep>TEST_OPTIONS="testOptions"<def_stmt>get_includes <block_start>includes=config.eosio_cpp_includes()<line_sep>retval=[]<line_sep>root=config.wsl_root()<for_stmt>include includes<block_start>retval.append(root+include)<block_end>retval.append("${workspaceFolder}")<line_sep>retval.append("${workspaceFolder}/include")<line_sep><return>retval<block_end>LIB_LIST=[]<line_sep>OPTIONS=[]<line_sep>TASKS='''
{
"version": "2.0.0",
"tasks": [
{
"label": "Compile",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}' --compile"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Build",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"osx": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"linux": {
"command": "mkdir -p build; python3 -m eosfactory.build '${workspaceFolder}'"
},
"problemMatcher": [],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
]
},
{
"label": "Test",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/test1.py"
},
"osx": {
"command": "python3 ./tests/test1.py"
},
"linux": {
"command": "python3 ./tests/test1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "Unittest",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "python3 ./tests/unittest1.py"
},
"osx": {
"command": "python3 ./tests/unittest1.py"
},
"linux": {
"command": "python3 ./tests/unittest1.py"
},
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
},
{
"label": "EOSIO API",
"type": "shell",
"windows": {
"options": {
"shell": {
"executable": "bash.exe",
"args": [
"-c"
]
}
},
"command": "explorer.exe"
},
"osx": {
"command": "open"
},
"linux": {
"command": "sensible-browser"
},
"args": [
"https://developers.eos.io/"
],
"presentation": {
"reveal": "always",
"panel": "dedicated"
},
"problemMatcher": [
]
}
]
}
'''<def_stmt>c_cpp_properties <block_start>includes=get_includes()<line_sep>retval="""
{
"configurations": [
{
"%s": %s,
"%s": %s,
"%s": %s,
"%s": %s,
"defines": [],
"intelliSenseMode": "clang-x64",
"browse": {
"path": %s,
"limitSymbolsToIncludedHeaders": true,
"databaseFilename": ""
}
}
],
"version": 4
}
"""%(INCLUDE_PATH json.dumps(includes indent=4) LIBS json.dumps(LIB_LIST indent=4) CODE_OPTIONS json.dumps(OPTIONS indent=4) TEST_OPTIONS json.dumps(OPTIONS indent=4) json.dumps(includes indent=4))<line_sep><return>retval<block_end><def_stmt>main c_cpp_properties_path=<none><block_start><if_stmt>c_cpp_properties_path<block_start>config.update_vscode(c_cpp_properties_path)<block_end><else_stmt><block_start>print(c_cpp_properties())<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--c_cpp_prop_path" default="")<line_sep>args=parser.parse_args()<line_sep>main(args.c_cpp_prop_path)<block_end> |
<import_from_stmt>tests.functional.services.policy_engine.utils.api.conf policy_engine_api_conf <import_from_stmt>tests.functional.services.utils http_utils<def_stmt>get_vulnerabilities vulnerability_ids=[] affected_package=<none> affected_package_version=<none> namespace=<none> <block_start><if_stmt><not>vulnerability_ids<block_start><raise>ValueError("Cannot fetch vulnerabilities without ids")<block_end>query={"id":",".join(vulnerability_ids) "affected_package":affected_package "affected_package_version":affected_package_version "namespace":namespace }<line_sep>vulnerabilities_resp=http_utils.http_get(["query" "vulnerabilities"] query config=policy_engine_api_conf)<if_stmt>vulnerabilities_resp.code<ne>200<block_start><raise>http_utils.RequestFailedError(vulnerabilities_resp.url vulnerabilities_resp.code vulnerabilities_resp.body )<block_end><return>vulnerabilities_resp<block_end> |
# coding: utf-8
<import_from_stmt>django.apps AppConfig<class_stmt>ThemingConfig(AppConfig)<block_start>name='admin_tools.theming'<block_end> |
<import_stmt>platform<as>platform_module<import_stmt>pytest<import_from_stmt>cibuildwheel.__main__ get_build_identifiers<import_from_stmt>cibuildwheel.environment parse_environment<import_from_stmt>cibuildwheel.options Options _get_pinned_docker_images<import_from_stmt>.utils get_default_command_line_arguments<line_sep>PYPROJECT_1="""
[tool.cibuildwheel]
build = ["cp38*", "cp37*"]
environment = {FOO="BAR"}
test-command = "pyproject"
manylinux-x86_64-image = "manylinux1"
environment-pass = ["<PASSWORD>"]
[tool.cibuildwheel.macos]
test-requires = "else"
[[tool.cibuildwheel.overrides]]
select = "cp37*"
test-command = "pyproject-override"
manylinux-x86_64-image = "manylinux2014"
"""<def_stmt>test_options_1 tmp_path monkeypatch<block_start><with_stmt>tmp_path.joinpath("pyproject.toml").open("w")<as>f<block_start>f.write(PYPROJECT_1)<block_end>args=get_default_command_line_arguments()<line_sep>args.package_dir=str(tmp_path)<line_sep>monkeypatch.setattr(platform_module "machine" <lambda>:"x86_64")<line_sep>options=Options(platform="linux" command_line_arguments=args)<line_sep>identifiers=get_build_identifiers(platform="linux" build_selector=options.globals.build_selector architectures=options.globals.architectures )<line_sep>override_display="""\
test_command: 'pyproject'
cp37-manylinux_x86_64: 'pyproject-override'"""<line_sep>print(options.summary(identifiers))<assert_stmt>override_display<in>options.summary(identifiers)<line_sep>default_build_options=options.build_options(identifier=<none>)<assert_stmt>default_build_options.environment<eq>parse_environment('FOO="BAR"')<line_sep>all_pinned_docker_images=_get_pinned_docker_images()<line_sep>pinned_x86_64_docker_image=all_pinned_docker_images["x86_64"]<line_sep>local=options.build_options("cp38-manylinux_x86_64")<assert_stmt>local.manylinux_images<is><not><none><assert_stmt>local.test_command<eq>"pyproject"<assert_stmt>local.manylinux_images["x86_64"]<eq>pinned_x86_64_docker_image["manylinux1"]<line_sep>local=options.build_options("cp37-manylinux_x86_64")<assert_stmt>local.manylinux_images<is><not><none><assert_stmt>local.test_command<eq>"pyproject-override"<assert_stmt>local.manylinux_images["x86_64"]<eq>pinned_x86_64_docker_image["manylinux2014"]<block_end><def_stmt>test_passthrough tmp_path monkeypatch<block_start><with_stmt>tmp_path.joinpath("pyproject.toml").open("w")<as>f<block_start>f.write(PYPROJECT_1)<block_end>args=get_default_command_line_arguments()<line_sep>args.package_dir=str(tmp_path)<line_sep>monkeypatch.setattr(platform_module "machine" <lambda>:"x86_64")<line_sep>monkeypatch.setenv("EXAMPLE_ENV" "ONE")<line_sep>options=Options(platform="linux" command_line_arguments=args)<line_sep>default_build_options=options.build_options(identifier=<none>)<assert_stmt>default_build_options.environment.as_dictionary(prev_environment={})<eq>{"FOO":"BAR" "EXAMPLE_ENV":"ONE" }<block_end>@pytest.mark.parametrize("env_var_value" ["normal value" '"value wrapped in quotes"' "an unclosed single-quote: '" 'an unclosed double-quote: "' "string\nwith\ncarriage\nreturns\n" "a trailing backslash \\" ] )<def_stmt>test_passthrough_evil tmp_path monkeypatch env_var_value<block_start>args=get_default_command_line_arguments()<line_sep>args.package_dir=str(tmp_path)<line_sep>monkeypatch.setattr(platform_module "machine" <lambda>:"x86_64")<line_sep>monkeypatch.setenv("CIBW_ENVIRONMENT_PASS_LINUX" "ENV_VAR")<line_sep>options=Options(platform="linux" command_line_arguments=args)<line_sep>monkeypatch.setenv("ENV_VAR" env_var_value)<line_sep>parsed_environment=options.build_options(identifier=<none>).environment<assert_stmt>parsed_environment.as_dictionary(prev_environment={})<eq>{"ENV_VAR":env_var_value}<block_end> |
<import_from_stmt>dydx3.starkex.helpers fact_to_condition<import_from_stmt>dydx3.starkex.helpers generate_private_key_hex_unsafe<import_from_stmt>dydx3.starkex.helpers get_transfer_erc20_fact<import_from_stmt>dydx3.starkex.helpers nonce_from_client_id<import_from_stmt>dydx3.starkex.helpers private_key_from_bytes<import_from_stmt>dydx3.starkex.helpers private_key_to_public_hex<import_from_stmt>dydx3.starkex.helpers private_key_to_public_key_pair_hex<class_stmt>TestHelpers()<block_start><def_stmt>test_nonce_from_client_id self<block_start><assert_stmt>nonce_from_client_id('')<eq>2018687061<assert_stmt>nonce_from_client_id('1')<eq>3079101259<assert_stmt>nonce_from_client_id('a')<eq>2951628987<assert_stmt>nonce_from_client_id('A really long client ID used to identify an order or withdrawal' )<eq>2913863714<assert_stmt>nonce_from_client_id('A really long client ID used to identify an order or withdrawal!' )<eq>230317226<block_end><def_stmt>test_get_transfer_erc20_fact self<block_start><assert_stmt>get_transfer_erc20_fact(recipient='0x1234567890123456789012345678901234567890' token_decimals=3 human_amount=123.456 token_address='0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa' salt=int('0x1234567890abcdef' 16) ).hex()<eq>('34052387b5efb6132a42b244cff52a85a507ab319c414564d7a89207d4473672')<block_end><def_stmt>test_fact_to_condition self<block_start>fact=bytes.fromhex('cf9492ae0554c642b57f5d9cabee36fb512dd6b6629bdc51e60efb3118b8c2d8')<line_sep>condition=fact_to_condition('0xe4a295420b58a4a7aa5c98920d6e8a0ef875b17a' fact )<assert_stmt>hex(condition)<eq>('0x4d794792504b063843afdf759534f5ed510a3ca52e7baba2e999e02349dd24')<block_end><def_stmt>test_generate_private_key_hex_unsafe self<block_start><assert_stmt>(generate_private_key_hex_unsafe()<ne>generate_private_key_hex_unsafe())<block_end><def_stmt>test_private_key_from_bytes self<block_start><assert_stmt>(private_key_from_bytes(b'0')<eq>'<KEY>')<assert_stmt>(private_key_from_bytes(b'a')<eq>'0x1d61128b46faa109512e0e00fe9adf5ff52047ed61718eeeb7c0525dfcd2f8e')<assert_stmt>(private_key_from_bytes(b'really long input data for key generation with the '<concat>b'keyPairFromData() function')<eq>'0x7c4946831bde597b73f1d5721af9c67731eafeb75c1b8e92ac457a61819a29')<block_end><def_stmt>test_private_key_to_public_hex self<block_start><assert_stmt>private_key_to_public_hex('<KEY>' )<eq>('<KEY>')<block_end><def_stmt>test_private_key_to_public_key_pair_hex self<block_start>x,y=private_key_to_public_key_pair_hex('<KEY>' )<assert_stmt>x<eq>('<KEY>')<assert_stmt>y<eq>('<KEY>')<block_end><block_end> |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>caffe2.python core<import_from_stmt>hypothesis given<import_stmt>caffe2.python.hypothesis_test_util<as>hu<import_stmt>hypothesis.strategies<as>st<import_stmt>numpy<as>np<def_stmt>entropy p<block_start>q=1.-p<line_sep><return>-p<times>np.log(p)-q<times>np.log(q)<block_end><def_stmt>jsd p q<block_start><return>[entropy(p/2.+q/2.)-entropy(p)/2.-entropy(q)/2.]<block_end><def_stmt>jsd_grad go o pq_list<block_start>p,q=pq_list<line_sep>m=(p+q)/2.<line_sep><return>[np.log(p<times>(1-m)/(1-p)/m)/2.<times>go <none>]<block_end><class_stmt>TestJSDOps(hu.HypothesisTestCase)<block_start>@given(n=st.integers(10 100) **hu.gcs_cpu_only)<def_stmt>test_bernoulli_jsd self n gc dc<block_start>p=np.random.rand(n).astype(np.float32)<line_sep>q=np.random.rand(n).astype(np.float32)<line_sep>op=core.CreateOperator("BernoulliJSD" ["p" "q"] ["l"])<line_sep>self.assertReferenceChecks(device_option=gc op=op inputs=[p q] reference=jsd output_to_grad='l' grad_reference=jsd_grad )<block_end><block_end> |
<import_from_stmt>pandapower.shortcircuit.calc_sc calc_sc<import_from_stmt>pandapower.shortcircuit.toolbox *<line_sep> |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Aer simulation"""<import_stmt>qiskit<import_from_stmt>.base QiskitTestCase<class_stmt>TestAerSimulation(QiskitTestCase)<block_start>"""Tests for Aer simulation"""<def_stmt>test_execute_in_aer self<block_start>"""Test executing a circuit in an Aer simulator"""<line_sep>qr=qiskit.QuantumRegister(1)<line_sep>cr=qiskit.ClassicalRegister(1)<line_sep>circuit=qiskit.QuantumCircuit(qr cr)<line_sep>circuit.h(qr[0])<line_sep>circuit.measure(qr cr)<line_sep>backend=qiskit.Aer.get_backend('qasm_simulator')<line_sep>shots=2000<line_sep>results=qiskit.execute(circuit backend shots=shots).result()<line_sep>self.assertDictAlmostEqual({'0':1000 '1':1000} results.get_counts() delta=100)<block_end><block_end> |
<import_from_stmt>ivy ivy_module<as>im<import_from_stmt>ivy.ivy_compiler ivy_from_string<import_from_stmt>ivy.tk_ui new_ui<import_from_stmt>ivy ivy_utils<as>iu<line_sep>prog="""#lang ivy1.6
type t
individual x(X:t) : t
object foo(me:t) = {
after init {
x(me) := me;
assert false
}
}
isolate iso_foo(me:t) = foo(me) with x(me)
"""<with_stmt>im.Module()<block_start>iu.set_parameters({'mode':'induction' 'isolate':'iso_foo' 'show_compiled':'true'})<line_sep>main_ui=new_ui()<line_sep>ui=main_ui.add(ivy_from_string(prog))<line_sep>main_ui.tk.update_idletasks()<line_sep>main_ui.answer("OK")<line_sep>ui.check_safety_node(ui.node(0))<assert_stmt><not>ui.node(0).safe<block_end># ui.check_inductiveness()
# # ui = ui.cti
# cg = ui.current_concept_graph
# cg.show_relation(cg.relation('link(X,Y)'),'+')
# cg.gather()
# main_ui.answer("OK")
# cg.strengthen()
# main_ui.answer("OK")
# ui.check_inductiveness()
# # cg.show_relation(cg.relation('semaphore'),'+')
# cg.gather()
# main_ui.answer("View")
# cg.bmc_conjecture(bound=1)
# main_ui.mainloop()
|
<import_from_stmt>.base BaseField<class_stmt>IntegerField(BaseField)<block_start><pass><block_end> |
'''
A command library help user upload their results to dashboard.
'''<line_sep>#!/usr/bin/env python
<import_stmt>argparse<import_from_stmt>..file_utils get_resource_file_path get_resource_list<import_from_stmt>. cli_constant<as>cli<def_stmt>entry args<block_start>'''Entrance of show resources path and whether resource is cached or not'''<line_sep>resource_names=get_resource_list()<line_sep>parser=argparse.ArgumentParser(prog="cotk resources" description="check resources site and whether s specific resource cache is available")<line_sep>parser.add_argument("--show_all" action="store_true" help="Show path of all resources")<line_sep>parser.add_argument("--show_stored" action="store_true" help="Show path of all stored resource")<line_sep>parser.add_argument("--show" type=str help="Show path of a specific resource")<line_sep>cargs=parser.parse_args(args)<if_stmt>cargs.show_all<block_start>cli.LOGGER.info("{:30}\t{:100}".format("Resource IDs" "Cache paths"))<for_stmt>resource resource_names<block_start>cache_path=get_resource_file_path("resources://"+resource download=<false>)<if_stmt>cache_path<is><not><none><block_start>cli.LOGGER.info("{:30}\t{:100}".format(resource cache_path))<block_end><else_stmt><block_start>cli.LOGGER.info("{:30}\t{:100}".format(resource "Not cached"))<block_end><block_end><block_end><elif_stmt>cargs.show_stored<block_start>cli.LOGGER.info("{:30}\t{:100}".format("Resource IDs" "Cache paths"))<for_stmt>resource resource_names<block_start>cache_path=get_resource_file_path("resources://"+resource download=<false>)<if_stmt>cache_path<is><not><none><block_start>cli.LOGGER.info("{:30}\t{:100}".format(resource cache_path))<block_end><block_end><block_end><elif_stmt>cargs.show<is><not><none><block_start><if_stmt>cargs.show[:12]<ne>("resources://")<block_start><raise>RuntimeError('Please input a string starting with "resources://"')<block_end><if_stmt>cargs.show[12:]<not><in>resource_names<block_start><raise>RuntimeError("Unkown resource name {}".format(cargs.show[12:]))<block_end>cache_path=get_resource_file_path(cargs.show download=<false>)<if_stmt>cache_path<is><not><none><block_start>cli.LOGGER.info("{:30}\t{:100}".format("Resource IDs" "Cache paths"))<line_sep>cli.LOGGER.info("{:30}\t{:100}".format(cargs.show cache_path))<block_end><else_stmt><block_start>cli.LOGGER.info("resource {} is not cached.".format(cargs.show))<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Unkown params.")<block_end><block_end> |
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>importlib<import_from_stmt>qlib.data.ops ElemOperator PairOperator<import_from_stmt>qlib.config C<import_from_stmt>qlib.data.cache H<import_from_stmt>qlib.data.data Cal<import_from_stmt>qlib.contrib.ops.high_freq get_calendar_day<class_stmt>DayLast(ElemOperator)<block_start>"""DayLast Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value equals the last value of its day
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>_calendar=get_calendar_day(freq=freq)<line_sep>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>series.groupby(_calendar[series.index]).transform("last")<block_end><block_end><class_stmt>FFillNan(ElemOperator)<block_start>"""FFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a forward fill nan feature
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>series.fillna(method="ffill")<block_end><block_end><class_stmt>BFillNan(ElemOperator)<block_start>"""BFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a backfoward fill nan feature
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>series.fillna(method="bfill")<block_end><block_end><class_stmt>Date(ElemOperator)<block_start>"""Date Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value is the date corresponding to feature.index
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>_calendar=get_calendar_day(freq=freq)<line_sep>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>pd.Series(_calendar[series.index] index=series.index)<block_end><block_end><class_stmt>Select(PairOperator)<block_start>"""Select Operator
Parameters
----------
feature_left : Expression
feature instance, select condition
feature_right : Expression
feature instance, select value
Returns
----------
feature:
value(feature_right) that meets the condition(feature_left)
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>series_condition=self.feature_left.load(instrument start_index end_index freq)<line_sep>series_feature=self.feature_right.load(instrument start_index end_index freq)<line_sep><return>series_feature.loc[series_condition]<block_end><block_end><class_stmt>IsNull(ElemOperator)<block_start>"""IsNull Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
A series indicating whether the feature is nan
"""<def_stmt>_load_internal self instrument start_index end_index freq<block_start>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>series.isnull()<block_end><block_end><class_stmt>Cut(ElemOperator)<block_start>"""Cut Operator
Parameters
----------
feature : Expression
feature instance
l : int
l > 0, delete the first l elements of feature (default is None, which means 0)
r : int
r < 0, delete the last -r elements of feature (default is None, which means 0)
Returns
----------
feature:
A series with the first l and last -r elements deleted from the feature.
Note: It is deleted from the raw data, not the sliced data
"""<def_stmt>__init__ self feature l=<none> r=<none><block_start>self.l=l<line_sep>self.r=r<if_stmt>(self.l<is><not><none><and>self.l<le>0)<or>(self.r<is><not><none><and>self.r<ge>0)<block_start><raise>ValueError("Cut operator l shoud > 0 and r should < 0")<block_end>super(Cut self).__init__(feature)<block_end><def_stmt>_load_internal self instrument start_index end_index freq<block_start>series=self.feature.load(instrument start_index end_index freq)<line_sep><return>series.iloc[self.l:self.r]<block_end><def_stmt>get_extended_window_size self<block_start>ll=0<if>self.l<is><none><else>self.l<line_sep>rr=0<if>self.r<is><none><else>abs(self.r)<line_sep>lft_etd,rght_etd=self.feature.get_extended_window_size()<line_sep>lft_etd=lft_etd+ll<line_sep>rght_etd=rght_etd+rr<line_sep><return>lft_etd rght_etd<block_end><block_end> |
<import_stmt>consus<line_sep>c1=consus.Client()<line_sep>t1=c1.begin_transaction()<line_sep>t1.commit()<line_sep>c2=consus.Client(b'127.0.0.1')<line_sep>t2=c1.begin_transaction()<line_sep>t2.commit()<line_sep>c3=consus.Client('127.0.0.1')<line_sep>t3=c1.begin_transaction()<line_sep>t3.commit()<line_sep>c4=consus.Client(b'127.0.0.1' 1982)<line_sep>t4=c1.begin_transaction()<line_sep>t4.commit()<line_sep>c5=consus.Client('127.0.0.1' 1982)<line_sep>t5=c1.begin_transaction()<line_sep>t5.commit()<line_sep>c6=consus.Client(b'127.0.0.1:1982')<line_sep>t6=c1.begin_transaction()<line_sep>t6.commit()<line_sep>c7=consus.Client('127.0.0.1:1982')<line_sep>t7=c1.begin_transaction()<line_sep>t7.commit()<line_sep>c8=consus.Client(b'[::]:1982,127.0.0.1:1982')<line_sep>t8=c1.begin_transaction()<line_sep>t8.commit()<line_sep>c9=consus.Client('[::]:1982,127.0.0.1:1982')<line_sep>t9=c1.begin_transaction()<line_sep>t9.commit()<line_sep> |
<import_stmt>hyperopt<import_stmt>csv<import_stmt>json<import_stmt>traceback<import_stmt>os.path<import_from_stmt>pprint pprint<import_stmt>datetime<import_stmt>time<import_stmt>numpy.random<import_stmt>threading<import_stmt>queue<import_stmt>copy<import_stmt>tempfile<import_stmt>random<import_stmt>subprocess<import_stmt>concurrent.futures<import_stmt>tempfile<import_stmt>functools<import_stmt>math<import_stmt>atexit<import_stmt>jsonschema<import_stmt>pkg_resources<import_from_stmt>hypermax.execution Execution<import_from_stmt>hypermax.hyperparameter Hyperparameter<import_from_stmt>hypermax.results_analyzer ResultsAnalyzer<import_from_stmt>hypermax.algorithms.atpe_optimizer ATPEOptimizer<import_from_stmt>hypermax.algorithms.human_guided_optimizer_wrapper HumanGuidedOptimizerWrapper<import_from_stmt>hypermax.algorithms.tpe_optimizer TPEOptimizer<import_from_stmt>hypermax.algorithms.random_search_optimizer RandomSearchOptimizer<import_from_stmt>hypermax.algorithms.adaptive_bayesian_hyperband_optimizer AdaptiveBayesianHyperband<import_from_stmt>hypermax.configuration Configuration<class_stmt>Optimizer<block_start>resultInformationKeys=['trial' 'status' 'loss' 'time' 'log' 'error']<def_stmt>__init__ self configuration<block_start>self.config=Configuration(configuration)<line_sep>self.searchConfig=configuration.get('search' {})<line_sep># jsonschema.validate(self.searchConfig, self.configurationSchema())
self.space=self.config.createHyperparameterSpace()<line_sep>self.threadExecutor=concurrent.futures.ThreadPoolExecutor()<line_sep>self.resultsAnalyzer=ResultsAnalyzer(configuration)<line_sep>self.results=[]<line_sep>self.resultFutures=[]<line_sep>self.best=<none><line_sep>self.bestLoss=<none><line_sep>self.thread=threading.Thread(target=<lambda>:self.optimizationThread() daemon=<true><if>configuration.get("ui" {}).get("enabled" <true>)<else><false>)<line_sep>self.totalTrials=self.searchConfig.get("iterations")<line_sep>self.trialsSinceResultsUpload=<none><line_sep>self.resultsExportFuture=<none><line_sep>self.currentTrials=[]<line_sep>self.allWorkers=set(range(self.config.data['function'].get('parallel' 1)))<line_sep>self.occupiedWorkers=set()<line_sep>self.trialNumber=0<line_sep>self.lastATPEParameters=<none><line_sep>self.lastLockedParameters=<none><line_sep>self.atpeParamDetails=<none><line_sep>self.tpeOptimizer=TPEOptimizer()<line_sep>self.atpeOptimizer=ATPEOptimizer()<line_sep>self.abhOptimizer=AdaptiveBayesianHyperband(self.atpeOptimizer self.searchConfig.get("min_budget" 1) self.searchConfig.get("max_budget" 100) self.searchConfig.get("eta" 3))<line_sep>self.humanGuidedATPEOptimizer=HumanGuidedOptimizerWrapper(self.atpeOptimizer)<line_sep>self.randomSearchOptimizer=RandomSearchOptimizer()<block_end><def_stmt>__del__ self<block_start><if_stmt>self.threadExecutor<block_start>self.threadExecutor.shutdown(wait=<true>)<block_end><block_end>@classmethod<def_stmt>configurationSchema self<block_start>""" This method returns the configuration schema for the optimization module. The schema
is a standard JSON-schema object."""<line_sep><return>{"type":"object" "properties":{"method":{"type":"string" "enum":['atpe' 'tpe' 'random']} "iterations":{"type":"number"} "budget":{"type":"number"}} "required":['method' 'iterations']}<block_end><def_stmt>completed self<block_start><return>len(self.results)<block_end><def_stmt>sampleNext self<block_start><if_stmt>self.searchConfig['method']<eq>'tpe'<block_start><return>self.tpeOptimizer.recommendNextParameters(self.config.data['hyperparameters'] self.results self.currentTrials)<block_end><elif_stmt>self.searchConfig['method']<eq>'random'<block_start><return>self.randomSearchOptimizer.recommendNextParameters(self.config.data['hyperparameters'] self.results self.currentTrials)<block_end><elif_stmt>self.searchConfig['method']<eq>'atpe'<block_start>params=self.humanGuidedATPEOptimizer.recommendNextParameters(self.config.data['hyperparameters'] self.results self.currentTrials)<line_sep>self.lastATPEParameters=self.atpeOptimizer.lastATPEParameters<line_sep>self.lastLockedParameters=self.atpeOptimizer.lastLockedParameters<line_sep>self.atpeParamDetails=self.atpeOptimizer.atpeParamDetails<line_sep><return>params<block_end><elif_stmt>self.searchConfig['method']<eq>'abh'<block_start>params=self.abhOptimizer.recommendNextParameters(self.config.data['hyperparameters'] self.results self.currentTrials)<line_sep>self.lastATPEParameters=self.atpeOptimizer.lastATPEParameters<line_sep>self.lastLockedParameters=self.atpeOptimizer.lastLockedParameters<line_sep>self.atpeParamDetails=self.atpeOptimizer.atpeParamDetails<line_sep><return>params<block_end><block_end><def_stmt>computeCurrentBest self<block_start>best=<none><line_sep>bestLoss=<none><for_stmt>result self.results<block_start><if_stmt>(best<is><none><and>result['loss']<is><not><none>)<or>(result['loss']<is><not><none><and>result['loss']<l>bestLoss)<block_start>best=result<line_sep>bestLoss=result['loss']<block_end><block_end>self.best=best<line_sep>self.bestLoss=bestLoss<block_end><def_stmt>startOptmizationJob self<block_start>availableWorkers=list(sorted(self.allWorkers.difference(self.occupiedWorkers)))<line_sep>sampleWorker=availableWorkers[0]<line_sep>sample=<none><while_stmt>sample<is><none># Hedge against any exceptions in the atpe optimizer.
<block_start><try_stmt><block_start>sample=self.sampleNext()<block_end><except_stmt>Exception<block_start>traceback.print_exc()<line_sep><pass><block_end><block_end><def_stmt>testSample params trial worker<block_start>currentTrial={"start":datetime.datetime.now() "trial":trial "worker":worker "params":copy.deepcopy(params)}<line_sep>self.currentTrials.append(currentTrial)<line_sep>start=datetime.datetime.now()<line_sep>execution=Execution(self.config.data['function'] parameters=params worker_n=worker)<line_sep>modelResult=execution.run()<line_sep>end=datetime.datetime.now()<line_sep>result=Hyperparameter(self.config.data['hyperparameters']).convertToFlatValues(params)<for_stmt>key params.keys()<block_start><if_stmt>key.startswith("$")<block_start>result[key]=params[key]<block_end><block_end>result['trial']=trial<line_sep>self.resultsAnalyzer.makeDirs(os.path.join(self.resultsAnalyzer.directory "logs"))<if_stmt>'loss'<in>modelResult<block_start>result['loss']=modelResult['loss']<block_end><elif_stmt>'accuracy'<in>modelResult<block_start>result['loss']=modelResult['accuracy']<block_end><if_stmt>'status'<in>modelResult<block_start>result['status']=modelResult['status']<block_end><else_stmt><block_start>result['status']='ok'<block_end><if_stmt>'log'<in>modelResult<block_start>fileName=os.path.join(self.resultsAnalyzer.directory "logs" "trial_"+str(trial)+".txt")<with_stmt>open(fileName "wt")<as>file<block_start>file.write(modelResult['log'])<block_end>result['log']=fileName<block_end><else_stmt><block_start>result['log']=''<block_end><if_stmt>'error'<in>modelResult<block_start>result['error']=modelResult['error']<block_end><else_stmt><block_start>result['error']=''<block_end><if_stmt>'time'<in>modelResult<block_start>result['time']=modelResult['time']<block_end><else_stmt><block_start>result['time']=(end-start).total_seconds()<block_end>self.currentTrials.remove(currentTrial)<line_sep><return>result<block_end><def_stmt>onCompletion worker future<block_start>self.occupiedWorkers.remove(worker)<line_sep>self.results.append(future.result())<line_sep>self.computeCurrentBest()<if_stmt><not>self.config.data.get("ui" {}).get("enabled" <true>)<block_start>pprint(future.result())<block_end><if_stmt>self.resultsExportFuture<is><none><or>(self.resultsExportFuture.done()<and>len(self.results)<g>5)<block_start>self.resultsExportFuture=self.threadExecutor.submit(<lambda>:self.outputResultsWithBackup(self.config.data.get("results" {}).get("graphs" <true>)))<block_end><else_stmt><block_start>self.outputResultsWithBackup(<false>)<block_end><if_stmt>'hypermax_results'<in>self.config.data<block_start><if_stmt>self.trialsSinceResultsUpload<is><none><or>self.trialsSinceResultsUpload<ge>self.config.data['hypermax_results']['upload_frequency']<block_start>self.saveResultsToHypermaxResultsRepository()<line_sep>self.trialsSinceResultsUpload=1<block_end><else_stmt><block_start>self.trialsSinceResultsUpload<augadd>1<block_end><block_end><block_end>self.occupiedWorkers.add(sampleWorker)<line_sep>sampleFuture=self.threadExecutor.submit(testSample sample self.trialNumber sampleWorker)<line_sep>sampleFuture.add_done_callback(functools.partial(onCompletion sampleWorker))<line_sep>self.trialNumber<augadd>1<line_sep><return>sampleFuture<block_end><def_stmt>runOptimizationThread self<block_start>self.thread.start()<block_end><def_stmt>outputResultsWithBackup self graphs workers=1<block_start>self.resultsAnalyzer.outputResultsFolder(self graphs workers=workers)<line_sep>directory_head,directory_tail=os.path.split(self.resultsAnalyzer.directory)<line_sep>backup_directory=os.path.join(directory_head ".backup_"+directory_tail+"~")<line_sep>self.resultsAnalyzer.outputResultsFolder(self graphs directory=backup_directory workers=workers)<block_end><def_stmt>optimizationThread self# Make sure we output basic results if the process is killed for some reason.
<block_start>atexit.register(<lambda>:self.outputResultsWithBackup(<false>))<line_sep>futures=[]<for_stmt>worker range(min(len(self.allWorkers) self.totalTrials-len(self.results)))<block_start>futures.append(self.startOptmizationJob())<line_sep>time.sleep(1.0)<block_end><while_stmt>(len(self.results)+len(self.currentTrials))<l>self.totalTrials<block_start>completedFuture=list(concurrent.futures.wait(futures return_when=concurrent.futures.FIRST_COMPLETED)[0])[0]<line_sep>futures.remove(completedFuture)<line_sep>time.sleep(0.05)<line_sep>futures.append(self.startOptmizationJob())<block_end>concurrent.futures.wait(futures)<line_sep># We are completed, so we can allocate a full contingent of workers
self.outputResultsWithBackup(<true> workers=4)<block_end><def_stmt>exportGuidanceJSON self fileName<block_start><with_stmt>open(fileName 'wt')<as>file<block_start>json.dump(self.humanGuidedATPEOptimizer.guidanceOptions file indent=4 sort_keys=<true>)<block_end><block_end><def_stmt>importGuidanceJSON self fileName<block_start><with_stmt>open(fileName 'rt')<as>file<block_start>self.humanGuidedATPEOptimizer.guidanceOptions=json.load(file)<block_end><block_end><def_stmt>exportResultsCSV self fileName<block_start>allKeys=set()<for_stmt>result self.results<block_start><for_stmt>key result<block_start>allKeys.add(key)<block_end><block_end>fieldNames=self.resultInformationKeys+sorted(allKeys.difference(set(self.resultInformationKeys)))# Make sure we keep the order of the field names consistent when writing the csv
<with_stmt>open(fileName 'wt')<as>file<block_start>writer=csv.DictWriter(file fieldnames=fieldNames<if>len(self.results)<g>0<else>[] dialect='unix')<line_sep>writer.writeheader()<line_sep>writer.writerows(self.results)<block_end><block_end><def_stmt>importResultsCSV self fileName<block_start><with_stmt>open(fileName)<as>file<block_start>reader=csv.DictReader(file)<line_sep>results=list(reader)<line_sep>newResults=[]<for_stmt>result results<block_start>newResult={}<for_stmt>key,value result.items()<block_start><if_stmt>value<is><not><none><and>value<ne>""<block_start><try_stmt><block_start><if_stmt>'.'<in>value<or>'e'<in>value<block_start>newResult[key]=float(value)<block_end><else_stmt><block_start>newResult[key]=int(value)<block_end><block_end><except_stmt>ValueError<block_start>newResult[key]=value<block_end><block_end><elif_stmt>key<eq>'loss'<block_start>newResult[key]=<none><block_end><elif_stmt>key<eq>'log'<block_start>newResult[key]=''<block_end><else_stmt><block_start>newResult[key]=<none><block_end><block_end>newResults.append(newResult)<block_end>self.results=newResults<block_end>self.computeCurrentBest()<line_sep>self.trialNumber=len(self.results)<block_end><def_stmt>saveResultsToHypermaxResultsRepository self<block_start><try_stmt><block_start>hypermaxResultsConfig=self.config.data['hypermax_results']<with_stmt>tempfile.TemporaryDirectory()<as>directory<block_start>process=subprocess.run(['git' 'clone' '<EMAIL>:electricbrainio/hypermax-results.git'] cwd=directory stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>hypermaxResultsDirectory=os.path.join(directory 'hypermax-results' hypermaxResultsConfig['name'])<line_sep>self.resultsAnalyzer.outputResultsFolder(self detailed=<false> directory=hypermaxResultsDirectory)<with_stmt>open(os.path.join(hypermaxResultsDirectory "metadata.json") 'wt')<as>file<block_start>json.dump(self.config.data['hypermax_results'] file indent=4)<block_end>process=subprocess.run(['git' 'add' hypermaxResultsDirectory] cwd=os.path.join(directory 'hypermax-results'))<line_sep>process=subprocess.run(['git' 'commit' '-m' 'Hypermax automatically storing results for model '+hypermaxResultsConfig['name']+' with '+str(len(self.results))+" trials."] cwd=os.path.join(directory 'hypermax-results') stdout=subprocess.PIPE stderr=subprocess.PIPE)<line_sep>process=subprocess.run(['git push'] cwd=os.path.join(directory 'hypermax-results') stdout=subprocess.PIPE stderr=subprocess.PIPE shell=<true>)<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end><block_end><block_end> |
<import_from_stmt>lucid.misc.io.showing show<import_from_stmt>lucid.misc.io.loading load<import_from_stmt>lucid.misc.io.saving save CaptureSaveContext batch_save<import_from_stmt>lucid.misc.io.scoping io_scope scope_url<line_sep> |
<class_stmt>SearchModule<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>search_for_competition_by_name self competitions query<block_start>m,answer=self.search(competitions attribute_name="caption" query=query)<if_stmt>m<eq>0<block_start><return><false><block_end><return>answer<block_end><def_stmt>search_for_competition_by_code self competitions query<block_start><return>self.search_by_code(competitions attribute_name="league" query=query)<block_end><def_stmt>search_for_team_by_name self teams query<block_start>m,answer=self.search(teams attribute_name="name" query=query)<if_stmt>m<eq>0<block_start><return><false><block_end><return>answer<block_end><def_stmt>search_for_team_by_code self teams query<block_start><return>self.search_by_code(teams attribute_name="code" query=query)<block_end><def_stmt>search_for_player_by_name self players query<block_start>m,answer=self.search(players attribute_name="name" query=query)<if_stmt>m<eq>0<block_start><return><false><block_end><return>answer<block_end><def_stmt>search_for_team_from_standing_by_name self teams query<block_start>m,answer=self.search(teams attribute_name="team_name" query=query)<if_stmt>m<eq>0<block_start><return><false><block_end><return>answer<block_end>@staticmethod<def_stmt>search_by_code dataset attribute_name query<block_start>search=query.lower()<for_stmt>index,data enumerate(dataset)<block_start>code=getattr(data attribute_name).lower()<if_stmt>code<eq>search<block_start><return>dataset[index]<block_end><block_end><return><false><block_end>@staticmethod<def_stmt>search dataset attribute_name query<block_start>values=[0<for>_ range(0 len(dataset))]<line_sep>search=query.lower().split()<line_sep>upper_threshold=len(search)<for_stmt>index,data enumerate(dataset)<block_start>data_name=getattr(data attribute_name).lower()<line_sep>search_array=data_name.split()<for_stmt>index2,text enumerate(search_array)<block_start><if_stmt>index2<ge>upper_threshold<block_start><break><block_end>threshold=len(search[index2])<for_stmt>i range(0 len(text))<block_start><if_stmt>i<ge>threshold-1<block_start><break><block_end><if_stmt>text[i]<eq>search[index2][i]<block_start>values[index]<augadd>1<block_end><block_end><block_end><block_end>max_value=max(values)<line_sep>max_index=values.index(max_value)<line_sep><return>max_value dataset[max_index]<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
@author : <NAME>
@github : https://github.com/tianpangji
@software : PyCharm
@file : crud.py
@create : 2020/12/9 20:44
"""<import_from_stmt>django.contrib.contenttypes.models ContentType<import_from_stmt>easyaudit.models CRUDEvent<import_from_stmt>rest_framework serializers<class_stmt>CRUDSerializer(serializers.ModelSerializer)<block_start>event_type_display=serializers.SerializerMethodField()<line_sep>datetime=serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S' read_only=<true>)<line_sep>username=serializers.SerializerMethodField()<line_sep>content_type_display=serializers.SerializerMethodField()<class_stmt>Meta<block_start>model=CRUDEvent<line_sep>fields=['id' 'event_type_display' 'datetime' 'username' 'content_type_display' 'object_id' 'changed_fields']<block_end><def_stmt>get_event_type_display self obj<block_start><return>obj.get_event_type_display()<block_end><def_stmt>get_username self obj<block_start><try_stmt><block_start>username=obj.user.username<block_end><except_stmt>AttributeError<block_start>username='未知'<block_end><return>username<block_end><def_stmt>get_content_type_display self obj<block_start>content_type=ContentType.objects.get(id=obj.content_type_id)<line_sep><return>content_type.app_label+'.'+content_type.model<block_end><def_stmt>to_representation self instance<block_start>ret=super().to_representation(instance)<if_stmt>ret.get('changed_fields')<eq>'null'<block_start>ret['changed_fields']=''<block_end><return>ret<block_end><block_end> |
<import_stmt>time<import_from_stmt>selenium webdriver<import_from_stmt>lxml etree<line_sep>driver=webdriver.PhantomJS(executable_path='./phantomjs-2.1.1-macosx/bin/phantomjs')<line_sep># 获取第一页的数据
<def_stmt>get_html <block_start>url="https://detail.tmall.com/item.htm?id=531993957001&skuId=3609796167425&user_id=268451883&cat_id=2&is_b=1&rn=71b9b0aeb233411c4f59fe8c610bc34b"<line_sep>driver.get(url)<line_sep>time.sleep(5)<line_sep>driver.execute_script('window.scrollBy(0,3000)')<line_sep>time.sleep(2)<line_sep>driver.execute_script('window.scrollBy(0,5000)')<line_sep>time.sleep(2)<line_sep># 累计评价
btnNext=driver.find_element_by_xpath('//*[@id="J_TabBar"]/li[3]/a')<line_sep>btnNext.click()<line_sep>html=driver.page_source<line_sep><return>html<block_end><def_stmt>get_comments html<block_start>source=etree.HTML(html)<line_sep>commens=source.xpath("//*[@id='J_TabBar']/li[3]/a/em/text()")<line_sep>print('评论数:' commens)<line_sep># 将评论转为int类型
commens=(int(commens[0])/20)+1<line_sep># 获取到总评论
print('评论数:' int(commens))<line_sep><return>int(commens)<block_end><def_stmt>parse_html html<block_start>html=etree.HTML(html)<line_sep>commentlist=html.xpath("//*[@class='rate-grid']/table/tbody")<for_stmt>comment commentlist# 评论
<block_start>vercomment=comment.xpath("./tr/td[@class='tm-col-master']/div[@class='tm-rate-content']/div[@class='tm-rate-fulltxt']/text()")<line_sep># 机器类型
verphone=comment.xpath("./tr/td[@class='col-meta']/div[@class='rate-sku']/p[@title]/text()")<line_sep>print(vercomment)<line_sep>print(verphone)<line_sep># 用户(头尾各一个字,中间用****代替)
veruser=comment.xpath("./tr/td[@class='col-author']/div[@class='rate-user-info']/text()")<line_sep>print(veruser)<block_end><block_end><def_stmt>next_button_work num<block_start><if_stmt>num<ne>0<block_start>driver.execute_script('window.scrollBy(0,3000)')<line_sep>time.sleep(2)<try_stmt><block_start>driver.find_element_by_css_selector('#J_Reviews > div > div.rate-page > div > a:last-child').click()<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end>time.sleep(2)<line_sep>driver.execute_script('window.scrollBy(0,3000)')<line_sep>time.sleep(2)<line_sep>driver.execute_script('window.scrollBy(0,5000)')<line_sep>time.sleep(2)<line_sep>html=driver.page_source<line_sep>parse_html(html)<block_end><block_end><def_stmt>selenuim_work html<block_start>parse_html(html)<line_sep>next_button_work(1)<line_sep><pass><block_end><def_stmt>gettotalpagecomments comments<block_start>html=get_html()<for_stmt>i range(0 comments)<block_start>selenuim_work(html)<block_end><block_end>data=get_html()<line_sep># 得到评论
commens=get_comments(data)<line_sep># 根据评论内容进行遍历
gettotalpagecomments(commens)<line_sep> |
<import_from_stmt>django.urls path re_path<import_from_stmt>cities.views CityCreateView CityListView CityDetailView<line_sep>app_name='cities'<line_sep>urlpatterns=[path('' CityListView.as_view() name="list") re_path(r'^(?P<pk>\d+)/$' CityDetailView.as_view() name="detail") re_path(r'^create/$' CityCreateView.as_view() name="create") ]<line_sep> |
<import_stmt>unittest<import_stmt>os<import_stmt>re<import_stmt>theano<import_from_stmt>theano tensor<class_stmt>FunctionName(unittest.TestCase)<block_start><def_stmt>test_function_name self<block_start>x=tensor.vector('x')<line_sep>func=theano.function([x] x+1.)<line_sep>regex=re.compile(os.path.basename('.*test_function_name.pyc?:13'))<assert_stmt>(regex.match(func.name)<is><not><none>)<block_end><block_end> |
<import_from_stmt>uuid UUID<import_from_stmt>datetime datetime<def_stmt>uuid_from_string string<block_start><return>UUID('{s}'.format(s=string))<block_end><def_stmt>format_timestamp string<block_start><if_stmt>isinstance(string str)<block_start><return>datetime.strptime(string '%Y-%m-%dT%H:%M:%S.%fZ')<block_end><if_stmt>isinstance(string datetime)<block_start><return>string<block_end><block_end> |
<import_from_stmt>. pool<import_from_stmt>. multiprocesslogging<import_from_stmt>. tools<line_sep> |
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>argparse<import_stmt>json<import_stmt>mock<import_stmt>pytest<import_stmt>staticconf<import_stmt>staticconf.testing<import_stmt>yaml<import_stmt>clusterman.config<as>config<import_from_stmt>clusterman.config POOL_NAMESPACE<import_from_stmt>tests.conftest mock_open<line_sep>@pytest.fixture<def_stmt>mock_config_files <block_start><with_stmt>staticconf.testing.PatchConfiguration({"cluster_config_directory":"/nail/whatever"}) mock_open(config.get_pool_config_path("cluster-A" "pool-1" "mesos") contents=yaml.dump({"resource_groups":"cluster-A" "other_config":18 }) ) mock_open(config.get_pool_config_path("cluster-A" "pool-2" "mesos") contents=yaml.dump({"resource_groups":"cluster-A" "other_config":20 }) ) mock_open(config.get_pool_config_path("cluster-A" "pool-2" "kubernetes") contents=yaml.dump({"resource_groups":"cluster-A" "other_config":29 }) ) mock_open(config.get_pool_config_path("cluster-B" "pool-1" "mesos") contents=yaml.dump({"resource_groups":"cluster-B" "other_config":200 "autoscale_signal":{"branch_or_tag":"v42"} }) ) mock_open("/etc/no_cfg/clusterman.json" contents=json.dumps({"accessKeyId":"foo" "secretAccessKey":"bar" "region":"nowhere-useful" }) )<block_start><yield><block_end><block_end>@pytest.fixture(autouse=<true>)<def_stmt>mock_config_namespaces # To avoid polluting staticconf for other tests, and clear out stuff from conftest that mocks configuration
<block_start><with_stmt>staticconf.testing.MockConfiguration({} namespace=POOL_NAMESPACE.format(pool="pool-1" scheduler="mesos") ) staticconf.testing.MockConfiguration({} namespace=POOL_NAMESPACE.format(pool="pool-2" scheduler="mesos") ) staticconf.testing.MockConfiguration({"clusters":{"cluster-A":{"mesos_url_api":"service.leader" "aws_region":"us-test-3" } } "aws":{"access_key_file":"/etc/no_cfg/clusterman.json" } } namespace=staticconf.config.DEFAULT )<block_start><yield><block_end><block_end>@pytest.mark.parametrize("cluster,pool,scheduler,tag" [("cluster-A" "pool-1" "mesos" <none>) ("cluster-A" "pool-2" "mesos" "v52") ("cluster-A" "pool-2" "kubernetes" <none>) ("cluster-A" <none> "mesos" <none>) ] )<def_stmt>test_setup_config_cluster cluster pool scheduler tag mock_config_files<block_start>args=argparse.Namespace(env_config_path="/nail/etc/config.yaml" cluster=cluster pool=pool scheduler=scheduler signals_branch_or_tag=tag )<with_stmt>mock.patch("clusterman.config.load_cluster_pool_config" autospec=<true> )<as>mock_pool_load mock.patch("clusterman.config._load_module_configs" )<as>mock_load_module_configs<block_start>config.setup_config(args)<assert_stmt>mock_load_module_configs.call_args<eq>mock.call("/nail/etc/config.yaml")<assert_stmt>staticconf.read_string("aws.region")<eq>"us-test-3"<if_stmt>pool<block_start><assert_stmt>mock_pool_load.call_args<eq>mock.call(cluster pool scheduler tag)<block_end><else_stmt><block_start><assert_stmt>mock_pool_load.call_count<eq>0<if_stmt>tag<block_start><assert_stmt>staticconf.read_string("autoscale_signal.branch_or_tag")<eq>tag<block_end><block_end><block_end><block_end><def_stmt>test_setup_config_region_and_cluster <block_start>args=argparse.Namespace(env_config_path="/nail/etc/config.yaml" cluster="foo" aws_region="bar" )<with_stmt>mock.patch("clusterman.config._load_module_configs") pytest.raises(argparse.ArgumentError)<block_start>config.setup_config(args)<block_end><block_end>@mock.patch("clusterman.config._load_module_configs")<def_stmt>test_setup_config_region mock_load_module_configs mock_config_files<block_start>args=argparse.Namespace(env_config_path="/nail/etc/config.yaml" aws_region="fake-region-A" )<line_sep>config.setup_config(args)<assert_stmt>staticconf.read_string("aws.region")<eq>"fake-region-A"<assert_stmt>mock_load_module_configs.call_args<eq>mock.call("/nail/etc/config.yaml")<block_end>@pytest.mark.parametrize("cluster,pool,pool_other_config" [("cluster-B" "pool-1" 200)])<def_stmt>test_load_cluster_pool_config cluster pool pool_other_config mock_config_files<block_start>config.load_cluster_pool_config(cluster pool "mesos" <none>)<line_sep>pool_namespace=POOL_NAMESPACE.format(pool=pool scheduler="mesos")<assert_stmt>staticconf.read_int("other_config" namespace=pool_namespace)<eq>pool_other_config<assert_stmt>staticconf.read_string("resource_groups" namespace=pool_namespace)<eq>cluster<block_end> |
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>permissions PermissionNamespace<line_sep>namespace=PermissionNamespace('acls' _('Access control lists'))<line_sep>permission_acl_edit=namespace.add_permission(name='acl_edit' label=_('Edit ACLs'))<line_sep>permission_acl_view=namespace.add_permission(name='acl_view' label=_('View ACLs'))<line_sep> |
<import_stmt>pytest<line_sep>pytestmark=[pytest.mark.django_db]<def_stmt>test_order_without_items order<block_start>order=order()<assert_stmt>order.item<is><none><block_end><def_stmt>test_order_with_record order record<block_start>order=order(record=record)<assert_stmt>order.item<eq>record<block_end><def_stmt>test_order_with_course order course<block_start>order=order(course=course)<assert_stmt>order.item<eq>course<block_end><def_stmt>test_order_with_bundle order bundle<block_start>order=order(bundle=bundle)<assert_stmt>order.item<eq>bundle<block_end> |
<import_from_stmt>expects expect contain be_an<class_stmt>Bacon<block_start><ellipsis><block_end>sanduiche='sanduiche com queijo'<line_sep>expect(sanduiche).to(contain('queijo'))<line_sep>expect(sanduiche).to_not(be_an(Bacon))<line_sep> |
__all__=("Node" "DefinitionNode" "ExecutableDefinitionNode" "TypeSystemDefinitionNode" "TypeSystemExtensionNode" "TypeDefinitionNode" "TypeExtensionNode" "SelectionNode" "ValueNode" "TypeNode" )<class_stmt>Node<block_start>__slots__=()<block_end><class_stmt>DefinitionNode(Node)<block_start>__slots__=()<block_end><class_stmt>ExecutableDefinitionNode(DefinitionNode)<block_start>__slots__=()<block_end><class_stmt>TypeSystemDefinitionNode(DefinitionNode)<block_start>__slots__=()<block_end><class_stmt>TypeSystemExtensionNode(DefinitionNode)<block_start>__slots__=()<block_end><class_stmt>TypeDefinitionNode(TypeSystemDefinitionNode)<block_start>__slots__=()<block_end><class_stmt>TypeExtensionNode(TypeSystemExtensionNode)<block_start>__slots__=()<block_end><class_stmt>SelectionNode(Node)<block_start>__slots__=()<block_end><class_stmt>ValueNode(Node)<block_start>__slots__=()<block_end><class_stmt>TypeNode(Node)<block_start>__slots__=()<block_end> |
<import_stmt>torch<import_from_stmt>transformers TransfoXLLMHeadModel TransfoXLTokenizer<line_sep>device=torch.device("cuda:0"<if>torch.cuda.is_available()<else>"cpu")<line_sep># Instantiate pre-trained model-specific tokenizer and the model itself
tokenizer=TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')<line_sep>model=TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103').to(device)<line_sep># Initial input sequence
text="The company was founded in"<line_sep>tokens_tensor=torch.tensor(tokenizer.encode(text)).unsqueeze(0).to(device)<line_sep>mems=<none># recurrence mechanism
predicted_tokens=list()<for_stmt>i range(50)# stop at 50 predicted tokens
# Generate predictions
<block_start>predictions,mems=model(tokens_tensor mems=mems)<line_sep># Get most probable word index
predicted_index=torch.topk(predictions[0 -1 :] 1)[1]<line_sep># Extract the word from the index
predicted_token=tokenizer.decode(predicted_index)<line_sep># break if [EOS] reached
<if_stmt>predicted_token<eq>tokenizer.eos_token<block_start><break><block_end># Store the current token
predicted_tokens.append(predicted_token)<line_sep># Append new token to the existing sequence
tokens_tensor=torch.cat((tokens_tensor predicted_index.unsqueeze(1)) dim=1)<block_end>print('Initial sequence: '+text)<line_sep>print('Predicted output: '+" ".join(predicted_tokens))<line_sep> |
<import_from_stmt>tensorflow.keras.layers Conv2D Dense Flatten MaxPooling2D TimeDistributed <def_stmt>VGG16 inputs<block_start>x=Conv2D(64 (3 3) activation='relu' padding='same' name='block1_conv1')(inputs)<line_sep>x=Conv2D(64 (3 3) activation='relu' padding='same' name='block1_conv2')(x)<line_sep>x=MaxPooling2D((2 2) strides=(2 2) name='block1_pool')(x)<line_sep>x=Conv2D(128 (3 3) activation='relu' padding='same' name='block2_conv1')(x)<line_sep>x=Conv2D(128 (3 3) activation='relu' padding='same' name='block2_conv2')(x)<line_sep>x=MaxPooling2D((2 2) strides=(2 2) name='block2_pool')(x)<line_sep>x=Conv2D(256 (3 3) activation='relu' padding='same' name='block3_conv1')(x)<line_sep>x=Conv2D(256 (3 3) activation='relu' padding='same' name='block3_conv2')(x)<line_sep>x=Conv2D(256 (3 3) activation='relu' padding='same' name='block3_conv3')(x)<line_sep>x=MaxPooling2D((2 2) strides=(2 2) name='block3_pool')(x)<line_sep># 第四个卷积部分
# 14,14,512
x=Conv2D(512 (3 3) activation='relu' padding='same' name='block4_conv1')(x)<line_sep>x=Conv2D(512 (3 3) activation='relu' padding='same' name='block4_conv2')(x)<line_sep>x=Conv2D(512 (3 3) activation='relu' padding='same' name='block4_conv3')(x)<line_sep>x=MaxPooling2D((2 2) strides=(2 2) name='block4_pool')(x)<line_sep># 第五个卷积部分
# 7,7,512
x=Conv2D(512 (3 3) activation='relu' padding='same' name='block5_conv1')(x)<line_sep>x=Conv2D(512 (3 3) activation='relu' padding='same' name='block5_conv2')(x)<line_sep>x=Conv2D(512 (3 3) activation='relu' padding='same' name='block5_conv3')(x)<line_sep><return>x<block_end><def_stmt>vgg_classifier_layers x# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
<block_start>x=TimeDistributed(Flatten(name='flatten'))(x)<line_sep>x=TimeDistributed(Dense(4096 activation='relu') name='fc1')(x)<line_sep>x=TimeDistributed(Dense(4096 activation='relu') name='fc2')(x)<line_sep><return>x<block_end> |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/always-newbie161/probml-notebooks/blob/jax_vdvae/notebooks/vdvae_jax_cifar_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cTSe7I6g45v8"
# This notebook shows demo working with vdvae in jax and the code used is from [vdvae-jax](https://github.com/j-towns/vdvae-jax) from [<NAME>](https://j-towns.github.io/)
# + [markdown] id="PxtpxTPEMS4C"
# ## Setup
# + id="ipHVirxUHTDJ"
<import_from_stmt>google.colab auth<line_sep>auth.authenticate_user()<line_sep># + colab={"base_uri": "https://localhost:8080/"} id="Z6gM2ytSHnO0" outputId="3e63de9d-6808-4cd9-eb1f-08996a6a7fed"
project_id='probml'<line_sep># !gcloud config set project {project_id}
# + id="a3__DVx74sso" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="579bc832-9028-49f3-c164-c426d32f66a6"
'''
this should be the format of the checkpoint filetree:
checkpoint_path >> model(optimizer)_checkpoint_file.
checkpoint_path_ema >> ema_checkpoint_file
'''<line_sep>checkpoint_path='/content/vdvae_cifar10_2.86/latest_cifar10'<line_sep># checkpoints are downloaded at these paths.
# vdvae_cifar10_2.86/latest_cifar10 - optimizer+mode
# vdvae_cifar10_2.86/latest_cifar10_ema - ema_params'
# + id="4_RnWXhwIV85" colab={"base_uri": "https://localhost:8080/"} cellView="form" outputId="de8dedaf-bdd3-4fb7-99ee-7cfe96229d1c"
#@title Download checkpoints
# !gsutil cp -r gs://gsoc_bucket/vdvae_cifar10_2.86 ./
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10_ema
# + colab={"base_uri": "https://localhost:8080/"} id="z3fThb8PIYHG" outputId="8406f5b2-cb50-42f5-aa78-4dc4f85afb02"
# !git clone https://github.com/j-towns/vdvae-jax.git
# + colab={"base_uri": "https://localhost:8080/"} id="053XPypoMobJ" outputId="0e415f07-00a4-4815-c2c5-288236ac2c98"
# %cd vdvae-jax
# + colab={"base_uri": "https://localhost:8080/"} id="X1hY6VqmNApP" outputId="41014f01-32bf-4377-85e5-e18328d2161a"
# !pip install --quiet flax
# + id="y013geSvWQUg"
<import_stmt>os<try_stmt><block_start>os.environ['COLAB_TPU_ADDR']<import_stmt>jax.tools.colab_tpu<line_sep>jax.tools.colab_tpu.setup_tpu()<block_end><except_stmt><block_start><pass><block_end># + colab={"base_uri": "https://localhost:8080/"} id="XDzBF1uZXOlu" outputId="929c368c-4610-49b0-bc94-76b891bc9b0e"
<import_stmt>jax<line_sep>jax.local_devices()<line_sep># + [markdown] id="KrFas8alNwJ0"
# ## Model
# (for cifar10)
# + [markdown] id="4Mr89HhnTbaF"
# ### Setting up hyperparams
# + id="B0QZ6aKoP08z"
<import_from_stmt>hps HPARAMS_REGISTRY Hyperparams add_vae_arguments<import_from_stmt>train_helpers setup_save_dirs<import_stmt>argparse<import_stmt>dataclasses<line_sep>H=Hyperparams()<line_sep>parser=argparse.ArgumentParser()<line_sep>parser=add_vae_arguments(parser)<line_sep>parser.set_defaults(hps='cifar10' conv_precision='highest')<line_sep>H=dataclasses.replace(H **vars(parser.parse_args([])))<line_sep>hparam_sets=[x<for>x H.hps.split(',')<if>x]<for_stmt>hp_set hparam_sets<block_start>hps=HPARAMS_REGISTRY[hp_set]<line_sep>parser.set_defaults(**hps)<block_end>H=dataclasses.replace(H **vars(parser.parse_args([])))<line_sep>H=setup_save_dirs(H)<line_sep># + [markdown] id="NisrtOPlfmef"
# This model is a hierarchical model with multiple stochastic blocks with multiple deterministic layers. You can know about model skeleton by observing the encoder and decoder "strings"
#
# **How to understand the string:**
# * blocks are comma seperated
# * `axb` implies there are `b` res blocks(set of Conv layers) for dimensions `axa`
# * `amb` implies it is a mixin block which increases the inter-image dims from `a` to `b` using **nearest neighbour upsampling** (used in decoder)
# * `adb` implies it's a block with avg-pooling layer which reduces the dims from `a` to `b`(used in encoder)
#
# for more understanding refer to this [paper](https://arxiv.org/abs/2011.10650)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="-OyvG1KbP2qT" outputId="bc0a16e1-0cbb-4951-c5ef-e8310bc9deb4"
hparams=dataclasses.asdict(H)<for_stmt>k ['enc_blocks' 'dec_blocks' 'zdim' 'n_batch' 'device_count']<block_start>print(f'{k}:{hparams[k]}')<block_end># + id="FGD3wwRxvF3Y"
<import_from_stmt>utils logger<import_from_stmt>jax.interpreters.xla DeviceArray<line_sep>log=logger(H.logdir)<if_stmt>H.log_wandb<block_start><import_stmt>wandb<def_stmt>logprint *args pprint=<false> **kwargs<block_start><if_stmt>len(args)<g>0<block_start>log(*args)<block_end>wandb.log({k:np.array(x)<if>type(x)<is>DeviceArray<else>x<for>k,x kwargs.items()})<block_end>wandb.init(config=dataclasses.asdict(H))<block_end><else_stmt><block_start>logprint=log<block_end># + colab={"base_uri": "https://localhost:8080/"} id="cABtXQvqSG2Z" outputId="2c43dea8-4c53-44cc-dd91-0c7577d07a7e"
<import_stmt>numpy<as>np<import_from_stmt>jax lax<import_stmt>torch<import_stmt>imageio<import_from_stmt>PIL Image<import_stmt>glob<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torchvision transforms<line_sep>np.random.seed(H.seed)<line_sep>torch.manual_seed(H.seed)<line_sep>H=dataclasses.replace(H conv_precision={'default':lax.Precision.DEFAULT 'high':lax.Precision.HIGH 'highest':lax.Precision.HIGHEST}[H.conv_precision] seed_init=H.seed seed_sample=H.seed+1 seed_train=H.seed+2+H.host_id seed_eval=H.seed+2+H.host_count+H.host_id )<line_sep>print('training model on ' H.dataset)<line_sep># + [markdown] id="Gs8bNNXpTMxZ"
# ### Downloading cifar10 dataset
# + colab={"base_uri": "https://localhost:8080/"} id="4An20_C-SvCT" outputId="023f5c9a-87fd-4ad8-abc3-0945b9fe4374"
# !./setup_cifar10.sh
# + [markdown] id="Js-LK-vojdSw"
# ### Setting up the model, data and the preprocess fn.
# + id="AylLXttfTSca"
<import_from_stmt>data set_up_data<line_sep>H,data_train,data_valid_or_test,preprocess_fn=set_up_data(H)<line_sep># + colab={"base_uri": "https://localhost:8080/"} id="GWsr1xszZ_te" outputId="a5ba8d4e-b088-46ec-ac31-b4fbd250618d"
<import_from_stmt>train_helpers load_vaes<line_sep>H=dataclasses.replace(H restore_path=checkpoint_path)<line_sep>optimizer,ema_params,start_epoch=load_vaes(H logprint)<line_sep># + colab={"base_uri": "https://localhost:8080/"} id="PEH8BtbmaK4O" outputId="f32e3fa2-746e-404b-bbae-aaca80078568"
start_epoch# no.of.epochs trained
# + colab={"base_uri": "https://localhost:8080/"} id="9nAJ3EGLICEh" outputId="6a47c0b6-aaf0-45a3-8a1c-b0c6bb6b3d40"
# Hparams for the current model
hparams=dataclasses.asdict(H)<for_stmt>i,k enumerate(sorted(hparams))<block_start>logprint(f'type=hparam, key={k}, value={getattr(H k)}')<block_end># + [markdown] id="HS2o9uFqjgyv"
# ### Evaluation
# + colab={"base_uri": "https://localhost:8080/"} id="jhiF_NjEuWQv" outputId="b0d88a47-5af0-4452-d1c0-88d90ef1a71e"
<import_from_stmt>train run_test_eval<line_sep>run_test_eval(H ema_params data_valid_or_test preprocess_fn logprint)<line_sep># + [markdown] id="tppWoc_hypdn"
# ### Function to save and show of batch of images given as a numpy array.
#
#
# + id="AJbKzeuzzGcS"
<def_stmt>zoom_in fname shape<block_start>im=Image.open(fname)<line_sep>resized_im=im.resize(shape)<line_sep>resized_im.save(fname)<block_end><def_stmt>save_n_show images order image_shape fname zoom=<true> show=<false><block_start>n_rows,n_images=order<line_sep>im=images.reshape((n_rows n_images *image_shape)).transpose([0 2 1 3 4]).reshape([n_rows<times>image_shape[0] n_images<times>image_shape[1] 3])<line_sep>print(f'printing samples to {fname}')<line_sep>imageio.imwrite(fname im)<if_stmt>zoom<block_start>zoom_in(fname (640 64))<block_end># w=640, h=64
<if_stmt>show<block_start>display(Image.open(fname))<block_end><block_end># + [markdown] id="9TlNptkdd5ME"
# ## Generations
# + id="EcnvaTn3iJfo"
n_images=10<line_sep>num_temperatures=3<line_sep>image_shape=[H.image_size H.image_size H.image_channels]<line_sep>H=dataclasses.replace(H num_images_visualize=n_images num_temperatures_visualize=num_temperatures)<line_sep># + [markdown] id="LDHUzIgBbjuX"
# Images will be saved in the following dir
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="EhJ17q1dfSNu" outputId="fb923dee-dc4d-4e68-e2c5-20f3f41874c1"
H.save_dir<line_sep># + [markdown] id="Xm_BYJYjiuzt"
# As the model params are replicated over multiple devices, unreplicated copy of them is made to use it for sampling and generations.
# + id="VJbqZRxWilR9"
<import_from_stmt>jax random<import_from_stmt>vae VAE<import_from_stmt>flax jax_utils<import_from_stmt>functools partial<line_sep>rng=random.PRNGKey(H.seed_sample)<line_sep>ema_apply=partial(VAE(H).apply {'params':jax_utils.unreplicate(ema_params)})<line_sep>forward_uncond_samples=partial(ema_apply method=VAE(H).forward_uncond_samples)<line_sep># + colab={"base_uri": "https://localhost:8080/"} id="XF5dvNqeRcIC" outputId="477884a0-d016-43c3-96ac-26b3cfd65d55"
temperatures=[1.0 0.9 0.8 0.7]<for_stmt>t temperatures[:H.num_temperatures_visualize]<block_start>im=forward_uncond_samples(n_images rng t=t)<line_sep>im=np.asarray(im)<line_sep>save_n_show(im [1 n_images] image_shape f'{H.save_dir}/generations-tem-{t}.png')<block_end># + colab={"base_uri": "https://localhost:8080/", "height": 0} id="RdypV3PJfyfN" outputId="bc5042cf-54c7-4380-e2f2-d36ab4951d65"
<for_stmt>t temperatures[:H.num_temperatures_visualize]<block_start>print("="<times>25)<line_sep>print(f"Generation of {n_images} new images for t={t}")<line_sep>print("="<times>25)<line_sep>fname=f'{H.save_dir}/generations-tem-{t}.png'<line_sep>display(Image.open(fname))<block_end># + [markdown] id="89M1-l8Ogd2k"
# ## Reconstructions
# + id="014yXaJfgfhq"
n_images=10<line_sep>image_shape=[H.image_size H.image_size H.image_channels]<line_sep># + [markdown] id="z5xtClDEYTI-"
# Preprocessing images before getting the latents
# + id="81EExYe0glPu"
<import_from_stmt>train get_sample_for_visualization<line_sep>viz_batch_original,viz_batch_processed=get_sample_for_visualization(data_valid_or_test preprocess_fn n_images H.dataset)<line_sep># + [markdown] id="eDENCERSiMm6"
# Getting the partial functions from the model methods
# + id="vPpzIoM_hQHK"
forward_get_latents=partial(ema_apply method=VAE(H).forward_get_latents)<line_sep>forward_samples_set_latents=partial(ema_apply method=VAE(H).forward_samples_set_latents)<line_sep># + [markdown] id="AnNFN7S7YZe1"
# Getting latents of different levels.
# + id="nt2_Zjqlha1U"
zs=[s['z']<for>s forward_get_latents(viz_batch_processed rng)]<line_sep># + [markdown] id="7RA8e6qJYcqF"
# No of latent observations used depends on `H.num_variables_visualize `, altering it gives different resolutions of the reconstructions.
# + id="ThgwoF6ihe9e"
recons=[]<line_sep>lv_points=np.floor(np.linspace(0 1 H.num_variables_visualize+2)<times>len(zs)).astype(int)[1:-1]<for_stmt>i lv_points<block_start>recons.append(forward_samples_set_latents(n_images zs[:i] rng t=0.1))<block_end># + [markdown] id="iawVwy7XYp9Z"
# Original Images
# + colab={"base_uri": "https://localhost:8080/", "height": 115} id="ih0D1sfRhy6F" outputId="8696bbaf-2a7c-4d89-9d7d-ebea19d37e7a"
orig_im=np.array(viz_batch_original)<line_sep>print("Original test images")<line_sep>save_n_show(orig_im [1 n_images] image_shape f'{H.save_dir}/orig_test.png' show=<true>)<line_sep># + [markdown] id="vbFgprJuYr7R"
# Reconstructions.
# + colab={"base_uri": "https://localhost:8080/", "height": 809} id="Ol7rNCgfh57R" outputId="e8d562cf-206e-42ae-a84b-5a5fd02489e8"
<for_stmt>i,r enumerate(recons)<block_start>r=np.array(r)<line_sep>print("="<times>25)<line_sep>print(f"Generation of {n_images} new images for {i+1}x resolution")<line_sep>print("="<times>25)<line_sep>fname=f'{H.save_dir}/recon_test-res-{i+1}x.png'<line_sep>save_n_show(r [1 n_images] image_shape fname show=<true>)<block_end> |
<import_from_stmt>project VideoApp<if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>kivy.core.video Video<assert_stmt>Video<is><not><none><line_sep>VideoApp().run()<block_end> |
<import_stmt>argparse<import_stmt>multiprocessing<import_from_stmt>pathlib Path<import_from_stmt>jnas_metadata_loader load_from_directory<import_from_stmt>jnas_metadata_loader.jnas_metadata JnasMetadata<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('jnas' type=Path)<line_sep>parser.add_argument('output' type=Path)<line_sep>parser.add_argument('--format' default='{sex}{text_id}_{mic}_atr_{subset}{sen_id}.wav')<line_sep>argument=parser.parse_args()<line_sep>jnas=argument.jnas# type: Path
output=argument.output# type: Path
jnas_list=load_from_directory(str(jnas))<line_sep>atr_list=jnas_list.subset_news_or_atr('B')<line_sep>output.mkdir(exist_ok=<true>)<def_stmt>process d:JnasMetadata<block_start>p=d.path<line_sep>out=output/argument.format.format(**d._asdict())<line_sep>out.symlink_to(p)<block_end>pool=multiprocessing.Pool()<line_sep>pool.map(process atr_list)<line_sep> |
<import_stmt>time<import_stmt>mxnet<as>mx<line_sep>benchmark_dataiter=mx.io.ImageRecordIter(path_imgrec="../data/test.rec" data_shape=(1 28 28) batch_size=64 mean_r=128 scale=0.00390625 )<line_sep>mod=mx.mod.Module.load('mnist_lenet' 35 context=mx.gpu(2))<line_sep>mod.bind(data_shapes=benchmark_dataiter.provide_data label_shapes=benchmark_dataiter.provide_label for_training=<false>)<line_sep>start=time.time()<for_stmt>i,batch enumerate(benchmark_dataiter)<block_start>mod.forward(batch)<block_end>time_elapsed=time.time()-start<line_sep>msg='{} batches iterated!\nAverage forward time per batch: {:.6f} ms'<line_sep>print(msg.format(i+1 1000<times>time_elapsed/float(i)))<line_sep> |
"""
SQL Storage for Policies.
"""<import_stmt>logging<import_from_stmt>sqlalchemy and_ or_ literal func<import_from_stmt>sqlalchemy.exc IntegrityError<import_from_stmt>sqlalchemy.orm.exc FlushError<import_from_stmt>.model PolicyModel PolicyActionModel PolicyResourceModel PolicySubjectModel<import_from_stmt>..abc Storage<import_from_stmt>...checker StringExactChecker StringFuzzyChecker RegexChecker RulesChecker<import_from_stmt>...exceptions PolicyExistsError UnknownCheckerType<import_from_stmt>...policy TYPE_STRING_BASED TYPE_RULE_BASED<line_sep>log=logging.getLogger(__name__)<class_stmt>SQLStorage(Storage)<block_start>"""Stores all policies in SQL Database"""<def_stmt>__init__ self scoped_session<block_start>"""
Initialize SQL Storage
:param scoped_session: SQL Alchemy scoped session
"""<line_sep>self.session=scoped_session<line_sep>self.dialect=self.session.bind.engine.dialect.name<block_end><def_stmt>add self policy<block_start><try_stmt><block_start>policy_model=PolicyModel.from_policy(policy)<line_sep>self.session.add(policy_model)<line_sep>self.session.commit()<block_end><except_stmt>IntegrityError<block_start>self.session.rollback()<line_sep>log.error('Error trying to create already existing policy with UID=%s.' policy.uid)<line_sep><raise>PolicyExistsError(policy.uid)<block_end># todo - figure out why FlushError is raised instead of IntegrityError on PyPy tests
<except_stmt>FlushError<as>e<block_start><if_stmt>'conflicts with persistent instance'<in>str(e)<block_start>self.session.rollback()<line_sep>log.error('Error trying to create already existing policy with UID=%s.' policy.uid)<line_sep><raise>PolicyExistsError(policy.uid)<block_end><block_end>log.info('Added Policy: %s' policy)<block_end><def_stmt>get self uid<block_start>policy_model=self.session.query(PolicyModel).get(uid)<if_stmt><not>policy_model<block_start><return><none><block_end><return>policy_model.to_policy()<block_end><def_stmt>get_all self limit offset<block_start>self._check_limit_and_offset(limit offset)<line_sep>cur=self.session.query(PolicyModel).order_by(PolicyModel.uid.asc()).slice(offset offset+limit)<for_stmt>policy_model cur<block_start><yield>policy_model.to_policy()<block_end><block_end><def_stmt>find_for_inquiry self inquiry checker=<none><block_start>cur=self._get_filtered_cursor(inquiry checker)<for_stmt>policy_model cur<block_start><yield>policy_model.to_policy()<block_end><block_end><def_stmt>update self policy<block_start><try_stmt><block_start>policy_model=self.session.query(PolicyModel).get(policy.uid)<if_stmt><not>policy_model<block_start><return><block_end>policy_model.update(policy)<line_sep>self.session.commit()<block_end><except_stmt>IntegrityError<block_start>self.session.rollback()<line_sep><raise><block_end>log.info('Updated Policy with UID=%s. New value is: %s' policy.uid policy)<block_end><def_stmt>delete self uid<block_start>self.session.query(PolicyModel).filter(PolicyModel.uid<eq>uid).delete()<line_sep>log.info('Deleted Policy with UID=%s.' uid)<block_end><def_stmt>_get_filtered_cursor self inquiry checker<block_start>"""
Returns cursor with proper query-filter based on the checker type.
"""<line_sep>cur=self.session.query(PolicyModel)<if_stmt>isinstance(checker StringFuzzyChecker)<block_start><return>cur.filter(PolicyModel.type<eq>TYPE_STRING_BASED PolicyModel.actions.any(PolicyActionModel.action_string.like('%{}%'.format(inquiry.action))) PolicyModel.resources.any(PolicyResourceModel.resource_string.like('%{}%'.format(inquiry.resource))) PolicyModel.subjects.any(PolicySubjectModel.subject_string.like('%{}%'.format(inquiry.subject))))<block_end><elif_stmt>isinstance(checker StringExactChecker)<block_start><return>cur.filter(PolicyModel.type<eq>TYPE_STRING_BASED PolicyModel.actions.any(PolicyActionModel.action_string<eq>inquiry.action) PolicyModel.resources.any(PolicyResourceModel.resource_string<eq>inquiry.resource) PolicyModel.subjects.any(PolicySubjectModel.subject_string<eq>inquiry.subject))<block_end><elif_stmt>isinstance(checker RegexChecker)<block_start><if_stmt><not>self._supports_regex_operator()<block_start><return>cur.filter(PolicyModel.type<eq>TYPE_STRING_BASED)<block_end><return>cur.filter(PolicyModel.type<eq>TYPE_STRING_BASED PolicyModel.actions.any(or_(and_(PolicyActionModel.action_regex.is_(<none>) PolicyActionModel.action_string<eq>inquiry.action) and_(PolicyActionModel.action_regex.isnot(<none>) self._regex_operation(inquiry.action PolicyActionModel.action_regex))) ) PolicyModel.resources.any(or_(and_(PolicyResourceModel.resource_regex.is_(<none>) PolicyResourceModel.resource_string<eq>inquiry.resource) and_(PolicyResourceModel.resource_regex.isnot(<none>) self._regex_operation(inquiry.resource PolicyResourceModel.resource_regex))) ) PolicyModel.subjects.any(or_(and_(PolicySubjectModel.subject_regex.is_(<none>) PolicySubjectModel.subject_string<eq>inquiry.subject) and_(PolicySubjectModel.subject_regex.isnot(<none>) self._regex_operation(inquiry.subject PolicySubjectModel.subject_regex))) ))<block_end><elif_stmt>isinstance(checker RulesChecker)<block_start><return>cur.filter(PolicyModel.type<eq>TYPE_RULE_BASED)<block_end><elif_stmt><not>checker<block_start><return>cur<block_end><else_stmt><block_start>log.error('Provided Checker type is not supported.')<line_sep><raise>UnknownCheckerType(checker)<block_end><block_end><def_stmt>_supports_regex_operator self<block_start>"""
Does database support regex operator?
"""<line_sep><return>self.dialect<in>['mysql' 'postgresql' 'oracle']<block_end><def_stmt>_regex_operation self left right<block_start>"""
Get database-specific regex operation.
Don't forget to check if there is a support for regex operator before using it.
"""<if_stmt>self.dialect<eq>'mysql'<block_start><return>literal(left).op('REGEXP BINARY' is_comparison=<true>)(right)<block_end><elif_stmt>self.dialect<eq>'postgresql'<block_start><return>literal(left).op('~' is_comparison=<true>)(right)<block_end><elif_stmt>self.dialect<eq>'oracle'<block_start><return>func.REGEXP_LIKE(left right)<block_end><return><none><block_end><block_end> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__=['partial' 'pmax' 'pmean' 'pmin' 'psum']<import_from_stmt>functools partial<import_stmt>jax<import_from_stmt>jax lax<def_stmt>pmax x:jax.interpreters.pxla.ShardedDeviceArray axis_name:str='device'<block_start>"""Compute a multi-device reduce max on x over the device axis axis_name."""<line_sep><return>lax.pmax(x axis_name)<block_end><def_stmt>pmean x:jax.interpreters.pxla.ShardedDeviceArray axis_name:str='device'<block_start>"""Compute a multi-device reduce mean on x over the device axis axis_name."""<line_sep><return>lax.pmean(x axis_name)<block_end><def_stmt>pmin x:jax.interpreters.pxla.ShardedDeviceArray axis_name:str='device'<block_start>"""Compute a multi-device reduce min on x over the device axis axis_name."""<line_sep><return>lax.pmin(x axis_name)<block_end><def_stmt>psum x:jax.interpreters.pxla.ShardedDeviceArray axis_name:str='device'<block_start>"""Compute a multi-device reduce sum on x over the device axis axis_name."""<line_sep><return>lax.psum(x axis_name)<block_end> |
<import_from_stmt>.utils connect_foirequest<def_stmt>connect_campaign sender **kwargs<block_start>reference=kwargs.get("reference")<if_stmt><not>reference<block_start><return><block_end><if_stmt>"@"<in>reference<block_start>parts=reference.split("@" 1)<block_end><else_stmt><block_start>parts=reference.split(":" 1)<block_end><if_stmt>len(parts)<ne>2<block_start><return><block_end>namespace=parts[0]<line_sep>connect_foirequest(sender namespace)<block_end> |
"""
Methods to search an ImageCollection with brute force, exhaustive search.
"""<import_stmt>cgi<import_stmt>abc<import_stmt>cPickle<import_stmt>numpy<as>np<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>sklearn.metrics.pairwise manhattan_distances euclidean_distances additive_chi2_kernel<import_stmt>pyflann<import_from_stmt>scipy.spatial cKDTree<import_stmt>util<import_from_stmt>image Image<import_from_stmt>rayleigh.util TicToc<line_sep>tt=TicToc()<class_stmt>SearchableImageCollection(object)<block_start>"""
Initialize with a rayleigh.ImageCollection, a distance_metric, and the
number of dimensions to reduce the histograms to.
Parameters
----------
image_collection : rayleigh.ImageCollection
dist_metric : string
must be in self.DISTANCE_METRICS
sigma : nonnegative float
Amount of smoothing applied to histograms.
If 0, none.
num_dimensions : int
number of dimensions to reduce the histograms to, using PCA.
If 0, do not reduce dimensions.
"""<def_stmt>__init__ self image_collection dist_metric sigma num_dimensions<block_start>self.ic=image_collection<line_sep>self.id_ind_map=self.ic.get_id_ind_map()<line_sep>self.distance_metric=dist_metric<if_stmt>self.distance_metric<not><in>self.DISTANCE_METRICS<block_start><raise>Exception("Unsupported distance metric.")<block_end>self.num_dimensions=num_dimensions<line_sep>self.hists_reduced=self.ic.get_hists()<line_sep>self.sigma=sigma<if_stmt>self.sigma<g>0<block_start>self.smooth_histograms()<block_end><if_stmt>self.num_dimensions<g>0<block_start>self.reduce_dimensionality()<block_end><block_end>@staticmethod<def_stmt>load filename<block_start>"""
Load ImageCollection from filename.
"""<line_sep><return>cPickle.load(open(filename))<block_end><def_stmt>save self filename<block_start>"""
Save self to filename.
"""<line_sep>cPickle.dump(self open(filename 'w') 2)<block_end><def_stmt>smooth_histograms self<block_start>"""
Smooth histograms with a Gaussian.
"""<for_stmt>i range(self.hists_reduced.shape[0])<block_start>color_hist=self.hists_reduced[i :]<line_sep>self.hists_reduced[i :]=util.smooth_histogram(color_hist self.ic.palette self.sigma)<block_end><block_end><def_stmt>reduce_dimensionality self<block_start>"""
Compute and store PCA dimensionality-reduced histograms.
"""<line_sep>tt.tic('reduce_dimensionality')<line_sep>self.pca=PCA(n_components=self.num_dimensions whiten=<true>)<line_sep>self.pca.fit(self.hists_reduced)<line_sep>self.hists_reduced=self.pca.transform(self.hists_reduced)<line_sep>tt.toc('reduce_dimensionality')<block_end><def_stmt>get_image_hist self img_id<block_start>"""
Return the smoothed image histogram of the image with the given id.
Parameters
----------
img_id : string
Returns
-------
color_hist : ndarray
"""<line_sep>img_ind=self.id_ind_map[img_id]<line_sep>color_hist=self.hists_reduced[img_ind :]<line_sep><return>color_hist<block_end><def_stmt>search_by_image_in_dataset self img_id num=20<block_start>"""
Search images in database for similarity to the image with img_id in
the database.
See search_by_color_hist() for implementation.
Parameters
----------
img_id : string
num : int, optional
Returns
-------
query_img_data : dict
results : list
list of dicts of nearest neighbors to query
"""<line_sep>query_img_data=self.ic.get_image(img_id no_hist=<true>)<line_sep>color_hist=self.get_image_hist(img_id)<line_sep>results,time_elapsed=self.search_by_color_hist(color_hist num reduced=<true>)<line_sep><return>query_img_data results time_elapsed<block_end><def_stmt>search_by_image self image_filename num=20<block_start>"""
Search images in database by color similarity to image.
See search_by_color_hist().
"""<line_sep>query_img=Image(image_filename)<line_sep>color_hist=util.histogram_colors_smoothed(query_img.lab_array self.ic.palette sigma=self.sigma direct=<false>)<line_sep>results,time_elapsed=self.search_by_color_hist(color_hist)<line_sep><return>query_img.as_dict() results time_elapsed<block_end><def_stmt>search_by_color_hist self color_hist num=20 reduced=<false><block_start>"""
Search images in database by color similarity to the given histogram.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int, optional
number of nearest neighbors to ret
reduced : boolean, optional
is the given color_hist already reduced in dimensionality?
Returns
-------
query_img : dict
info about the query image
results : list
list of dicts of nearest neighbors to query
"""<if_stmt>self.num_dimensions<g>0<and><not>reduced<block_start>color_hist=self.pca.transform(color_hist)<block_end>tt.tic('nn_ind')<line_sep>nn_ind,nn_dists=self.nn_ind(color_hist num)<line_sep>time_elapsed=tt.qtoc('nn_ind')<line_sep>results=[]<line_sep># TODO: tone up the amount of data returned: don't need resized size,
# _id, maybe something else?
<for_stmt>ind,dist zip(nn_ind nn_dists)<block_start>img_id=self.id_ind_map[ind]<line_sep>img=self.ic.get_image(img_id no_hist=<true>)<line_sep>img['url']=cgi.escape(img['url'])<line_sep>img['distance']=dist<line_sep>results.append(img)<block_end><return>results time_elapsed<block_end>@abc.abstractmethod<def_stmt>nn_ind self color_hist num<block_start>"""
Return num closest nearest neighbors (potentially approximate) to the
query color_hist, and the distances to them.
Override this search method in extending classes.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int
number of nearest neighbors to return.
Returns
-------
nn_ind : (num,) ndarray
Indices of the neighbors in the dataset.
nn_dists (num,) ndarray
Distances to the neighbors returned.
"""<line_sep><pass><block_end><block_end><class_stmt>SearchableImageCollectionExact(SearchableImageCollection)<block_start>"""
Search the image collection exhaustively (mainly through np.dot).
"""<line_sep>DISTANCE_METRICS=['manhattan' 'euclidean' 'chi_square']<def_stmt>nn_ind self color_hist num<block_start>"""
Exact nearest neighbor seach through exhaustive comparison.
"""<if_stmt>self.distance_metric<eq>'manhattan'<block_start>dists=manhattan_distances(color_hist self.hists_reduced)<block_end><elif_stmt>self.distance_metric<eq>'euclidean'<block_start>dists=euclidean_distances(color_hist self.hists_reduced squared=<true>)<block_end><elif_stmt>self.distance_metric<eq>'chi_square'<block_start>dists=-additive_chi2_kernel(color_hist self.hists_reduced)<block_end>dists=dists.flatten()<line_sep>nn_ind=np.argsort(dists).flatten()[:num]<line_sep>nn_dists=dists[nn_ind]<line_sep><return>nn_ind nn_dists<block_end><block_end><class_stmt>SearchableImageCollectionFLANN(SearchableImageCollection)<block_start>"""
Search the image collection using the FLANN library for aNN indexing.
The FLANN index is built with automatic tuning of the search algorithm,
which can take a while (~90s on 25K images).
"""<line_sep>DISTANCE_METRICS=['manhattan' 'euclidean' 'chi_square']<line_sep>@staticmethod<def_stmt>load filename# Saving the flann object results in memory errors, so we use its own
# method to save its index in a separate file.
<block_start>sic=cPickle.load(open(filename))<line_sep><return>sic.build_index(filename+'_flann_index')<block_end><def_stmt>save self filename# See comment in load().
<block_start>flann=self.flann<line_sep>self.flann=<none><line_sep>cPickle.dump(self open(filename 'w') 2)<line_sep>flann.save_index(filename+'_flann_index')<line_sep>self.flann=flann<block_end><def_stmt>__init__ self image_collection distance_metric sigma dimensions<block_start>super(SearchableImageCollectionFLANN self).__init__(image_collection distance_metric sigma dimensions)<line_sep>self.build_index()<block_end><def_stmt>build_index self index_filename=<none><block_start>tt.tic('build_index')<line_sep>pyflann.set_distance_type(self.distance_metric)<line_sep>self.flann=pyflann.FLANN()<if_stmt>index_filename<block_start>self.flann.load_index(index_filename self.hists_reduced)<block_end><else_stmt><block_start>self.params=self.flann.build_index(self.hists_reduced algorithm='autotuned' sample_fraction=0.3 target_precision=.8 build_weight=0.01 memory_weight=0.)<block_end>print(self.params)<line_sep>tt.toc('build_index')<line_sep><return>self<block_end><def_stmt>nn_ind self color_hist num<block_start>nn_ind,nn_dists=self.flann.nn_index(color_hist num checks=self.params['checks'])<line_sep><return>nn_ind.flatten() nn_dists.flatten()<block_end><block_end><class_stmt>SearchableImageCollectionCKDTree(SearchableImageCollection)<block_start>"""
Use the cKDTree data structure from scipy.spatial for the index.
Parameters:
- LEAF_SIZE (int): The number of points at which the algorithm switches
over to brute-force.
- EPS (non-negative float): Parameter for query(), such that the
k-th returned value is guaranteed to be no further than (1 + eps)
times the distance to the real k-th nearest neighbor.
NOTE: These parameters have not been tuned.
"""<line_sep>DISTANCE_METRICS=['manhattan' 'euclidean']<line_sep>Ps={'manhattan':1 'euclidean':2}<line_sep>LEAF_SIZE=5<line_sep>EPSILON=1<line_sep>@staticmethod<def_stmt>load filename<block_start><return>cPickle.load(open(filename)).build_index()<block_end><def_stmt>__init__ self image_collection distance_metric sigma dimensions<block_start>super(SearchableImageCollectionCKDTree self).__init__(image_collection distance_metric sigma dimensions)<line_sep>self.build_index()<block_end><def_stmt>build_index self<block_start>tt.tic('build_index_ckdtree')<line_sep>self.ckdtree=cKDTree(self.hists_reduced self.LEAF_SIZE)<line_sep>self.p=self.Ps[self.distance_metric]<line_sep>tt.toc('build_index_ckdtree')<line_sep><return>self<block_end><def_stmt>nn_ind self color_hist num<block_start>nn_dists,nn_ind=self.ckdtree.query(color_hist num eps=self.EPSILON p=self.p)<line_sep><return>nn_ind.flatten() nn_dists.flatten()<block_end><block_end> |
"""Python Cookbook
See
http://www.brynmawr.edu/math/people/anmyers/PAPERS/SIGEST_Coupons.pdf
and
https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
and
https://en.wikipedia.org/wiki/Binomial_coefficient
"""<import_from_stmt>math factorial<def_stmt>expected n population=8<block_start>"""
What is the probability p(n, d) that exactly n boxes of cereal will
have to be purchased in order to obtain, for the first time,
a complete collection of at least one of each of the d kinds of souvenir
coupons?
.. math::
p(n, d) = \frac{d!}{d^n} \lbrace\textstyle{ n-1 \atop d-1 }\rbrace
"""<line_sep><return>factorial(population)/population<power>n<times>stirling2(n-1 population-1)<block_end><def_stmt>binom n k<block_start>"""
.. math::
\binom n k = \frac{n!}{k!\,(n-k)!} \quad \text{for }\ 0\leq k\leq n
"""<line_sep><return>factorial(n)/(factorial(k)<times>factorial(n-k))<block_end><def_stmt>stirling2 n k<block_start>"""
The Stirling numbers of the second kind,
written S(n,k) or :math:`\lbrace\textstyle{n\atop k}\rbrace`
count the number of ways to partition a set of n labelled objects
into k nonempty unlabelled subsets.
.. math::
\lbrace\textstyle{n\atop n}\rbrace = 1 \\
\lbrace\textstyle{n\atop 1}\rbrace = 1 \\
\lbrace\textstyle{n\atop k}\rbrace = k \lbrace\textstyle{n-1 \atop k}\rbrace + \lbrace\textstyle{n-1 \atop k-1}\rbrace
Or
.. math::
\left\{ {n \atop k}\right\} = \frac{1}{k!}\sum_{j=0}^{k} (-1)^{k-j} \binom{k}{j} j^n
"""<line_sep><return>1/factorial(k)<times>sum((-1<if>(k-j)%2<else>1)<times>binom(k j)<times>j<power>n<for>j range(0 k+1))<block_end><if_stmt>__name__<eq>"__main__"<block_start><for_stmt>i range(8 30)<block_start>print(i expected(i 8))<block_end>print(binom(24 12))<block_end> |
<import_from_stmt>._kaldi_error *<import_from_stmt>._timer *<line_sep>__all__=[name<for>name dir()<if>name[0]<ne>'_'<and><not>name.endswith('Base')]<line_sep> |
# coding: utf-8
<import_from_stmt>common checker<import_from_stmt>warehouse entry<as>warehouse_entry<import_from_stmt>pmdb entry<as>pmdb_entry<import_from_stmt>dataset entry<as>dataset_entry<import_from_stmt>health entry<as>health_entry<import_from_stmt>job entry<as>job_entry<import_from_stmt>es entry<as>es_entry<line_sep>checker.check_sreworks_data_service_ready()<line_sep>print("======start init warehouse======")<line_sep>warehouse_entry.init()<line_sep>print("======end init warehouse======")<line_sep>print("======start init pmdb======")<line_sep>pmdb_entry.init()<line_sep>print("======end init pmdb======")<line_sep>print("======start init dataset======")<line_sep>dataset_entry.init()<line_sep>print("======end init dataset======")<line_sep>print("======start init health======")<line_sep>health_entry.init()<line_sep>print("======end init health======")<line_sep>print("======start init es======")<line_sep>es_entry.init()<line_sep>print("======end init es======")<line_sep>print("======start init job======")<line_sep>job_entry.init()<line_sep>print("======end init job======")<line_sep> |
<import_from_stmt>unittest2 TestCase<import_from_stmt>datetime datetime timedelta<import_stmt>securitybot.util<as>util<class_stmt>VarTest(TestCase)<block_start><def_stmt>test_hours self<block_start><assert_stmt>util.OPENING_HOUR<l>util.CLOSING_HOUR 'Closing hour must be after opening hour.'<block_end><block_end><class_stmt>NamedTupleTest(TestCase)<block_start><def_stmt>test_empty self<block_start>tup=util.tuple_builder()<assert_stmt>tup.answer<is><none><assert_stmt>tup.text<eq>''<block_end><def_stmt>test_full self<block_start>tup=util.tuple_builder(<true> 'Yes')<assert_stmt>tup.answer<is><true><assert_stmt>tup.text<eq>'Yes'<block_end><block_end><class_stmt>BusinessHoursTest(TestCase)<block_start><def_stmt>test_weekday self<block_start>'''Test business hours during a weekday.'''<line_sep># 18 July 2016 is a Monday. If this changes, please contact the IERS.
morning=datetime(year=2016 month=7 day=18 hour=util.OPENING_HOUR tzinfo=util.LOCAL_TZ)<assert_stmt>util.during_business_hours(morning)<line_sep>noon=datetime(year=2016 month=7 day=18 hour=12 tzinfo=util.LOCAL_TZ)<assert_stmt>util.during_business_hours(noon) 'This may fail if noon is no longer during business hours.'<line_sep>afternoon=datetime(year=2016 month=7 day=18 hour=util.CLOSING_HOUR-1 minute=59 second=59 tzinfo=util.LOCAL_TZ)<assert_stmt>util.during_business_hours(afternoon)<line_sep>breakfast=datetime(year=2016 month=7 day=18 hour=util.OPENING_HOUR-1 minute=59 second=59 tzinfo=util.LOCAL_TZ)<assert_stmt><not>util.during_business_hours(breakfast)<line_sep>supper=datetime(year=2016 month=7 day=18 hour=util.CLOSING_HOUR tzinfo=util.LOCAL_TZ)<assert_stmt><not>util.during_business_hours(supper)<block_end><def_stmt>test_weekend self<block_start>'''Test "business hours" during a weekend.'''<line_sep># As such, 17 July 2016 is a Sunday.
sunday_morning=datetime(year=2016 month=7 day=17 hour=util.OPENING_HOUR tzinfo=util.LOCAL_TZ)<assert_stmt><not>util.during_business_hours(sunday_morning)<block_end><block_end><class_stmt>ExpirationTimeTest(TestCase)<block_start><def_stmt>test_same_day self<block_start>'''Test time delta within the same day.'''<line_sep>date=datetime(year=2016 month=7 day=18 hour=util.OPENING_HOUR tzinfo=util.LOCAL_TZ)<line_sep>td=timedelta(hours=((util.CLOSING_HOUR-util.OPENING_HOUR)%24)/2)<line_sep>after=date+td<assert_stmt>util.get_expiration_time(date td)<eq>after<block_end><def_stmt>test_next_weekday self<block_start>'''Test time delta overnight.'''<line_sep>date=datetime(year=2016 month=7 day=18 hour=util.CLOSING_HOUR-1 tzinfo=util.LOCAL_TZ)<line_sep>next_date=datetime(year=2016 month=7 day=19 hour=util.OPENING_HOUR+1 tzinfo=util.LOCAL_TZ)<assert_stmt>util.get_expiration_time(date timedelta(hours=2))<eq>next_date<block_end><def_stmt>test_edge_weekday self<block_start>'''Test time delta overnight just barely within range.'''<line_sep>date=datetime(year=2016 month=7 day=18 hour=util.CLOSING_HOUR-1 minute=59 second=59 tzinfo=util.LOCAL_TZ)<line_sep>td=timedelta(seconds=1)<line_sep>after=datetime(year=2016 month=7 day=19 hour=util.OPENING_HOUR tzinfo=util.LOCAL_TZ)<assert_stmt>util.get_expiration_time(date td)<eq>after<block_end><def_stmt>test_next_weekend self<block_start>'''Test time delta over a weekend.'''<line_sep>date=datetime(year=2016 month=7 day=15 hour=util.CLOSING_HOUR-1 tzinfo=util.LOCAL_TZ)<line_sep>next_date=datetime(year=2016 month=7 day=18 hour=util.OPENING_HOUR+1 tzinfo=util.LOCAL_TZ)<assert_stmt>util.get_expiration_time(date timedelta(hours=2))<eq>next_date<block_end><def_stmt>test_edge_weekend self<block_start>'''Test time delta over a weekend just barely within range.'''<line_sep>date=datetime(year=2016 month=7 day=15 hour=util.CLOSING_HOUR-1 minute=59 second=59 tzinfo=util.LOCAL_TZ)<line_sep>td=timedelta(seconds=1)<line_sep>after=datetime(year=2016 month=7 day=18 hour=util.OPENING_HOUR tzinfo=util.LOCAL_TZ)<assert_stmt>util.get_expiration_time(date td)<eq>after<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>swahili path<block_start>"""Swahili
Attitudes towards the Swahili language among Kenyan school children
A dataset with 480 observations on the following 4 variables.
`Province`
`NAIROBI` or `PWANI`
`Sex`
`female` or `male`
`Attitude.Score`
Score (out a possible 200 points) on a survey of attitude towards the
Swahili language
`School`
Code for the school: `A` through `L`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `swahili.csv`.
Returns:
Tuple of np.ndarray `x_train` with 480 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='swahili.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/Stat2Data/Swahili.csv'<line_sep>maybe_download_and_extract(path url save_file_name='swahili.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end> |
# SPDX-License-Identifier: MIT
# Copyright 2020 <NAME>
<import_from_stmt>osgeo gdal<import_stmt>time<def_stmt>doit compress threads<block_start>gdal.SetConfigOption('GDAL_NUM_THREADS' str(threads))<line_sep>filename='/vsimem/test.tif'<line_sep>ds=gdal.GetDriverByName('GTiff').Create(filename 20000 20000 3 options=['COMPRESS='+compress 'TILED=YES'])<line_sep>ds.GetRasterBand(1).Fill(50)<line_sep>ds.GetRasterBand(3).Fill(100)<line_sep>ds.GetRasterBand(3).Fill(200)<line_sep>ds=<none><line_sep>ds=gdal.Open(filename gdal.GA_Update)<line_sep>start=time.time()<line_sep>ds.BuildOverviews('CUBIC' [2 4 8])<line_sep>end=time.time()<line_sep>print('COMPRESS=%s, NUM_THREADS=%d: %.2f'%(compress threads end-start))<line_sep>gdal.SetConfigOption('GDAL_NUM_THREADS' <none>)<block_end>doit('NONE' 0)<line_sep>doit('NONE' 2)<line_sep>doit('NONE' 4)<line_sep>doit('NONE' 8)<line_sep>doit('ZSTD' 0)<line_sep>doit('ZSTD' 2)<line_sep>doit('ZSTD' 4)<line_sep>doit('ZSTD' 8)<line_sep> |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
<import_stmt>sys<import_stmt>re<import_stmt>json<import_stmt>os<import_stmt>time<import_stmt>openpyxl<as>opx<def_stmt>parse_arguments <block_start>print(sys.argv)<line_sep>me_report_path=sys.argv[1]<line_sep>log_path=sys.argv[2]<line_sep>n_iter=sys.argv[3]<line_sep>out=sys.argv[4]<assert_stmt>n_iter.isdigit()<line_sep><return>me_report_path log_path int(n_iter) out<block_end><def_stmt>extract_by_keyword doc keyword pattern<block_start>rst=[]<for_stmt>i,s enumerate(doc)<block_start><if_stmt>keyword<in>s<block_start>p=re.findall(pattern s)<line_sep>print("L%d: extracted %s from '%s'"%(i p s.strip()))<line_sep>rst.extend(p)<block_end><block_end><return>rst<block_end><def_stmt>process_log fname log_path n_iter keyword pattern<block_start>rnt={}<for_stmt>i range(1 1+n_iter)<block_start>fname_path=os.path.join(log_path fname%i)<with_stmt>open(fname_path)<as>f<block_start>print("\nLoading %s"%fname_path)<line_sep>rst=extract_by_keyword(f keyword pattern)<block_end>rnt[fname%i]=rst<block_end><return>rnt<block_end><def_stmt>summarize func<block_start><def_stmt>wrapper *args **kwargs<block_start>log=func(*args **kwargs)<line_sep>times=list(log.items())<line_sep>times.sort(key=<lambda>x:x[1])<line_sep>min_file,min_time=times[0]<line_sep>avg=sum(map(<lambda>x:x[1] times))/len(times)<line_sep>log["min_time"]=min_time<line_sep>log["min_file"]=min_file<line_sep>log["avg_time"]=avg<line_sep><return>log<block_end><return>wrapper<block_end>@summarize<def_stmt>process_bert_log log_path n_iter<block_start>fname="bert%d.log"<line_sep>total=process_log(fname log_path n_iter "TotalTime" r"\d+.\d+")<line_sep>task=process_log(fname log_path n_iter "task_emit" r"\d+.\d+")<line_sep>log={}<for_stmt>fname total<block_start>log[fname]=float(total[fname][0])-float(task[fname][0])<block_end><return>log<block_end>@summarize<def_stmt>process_resnet_log log_path n_iter<block_start>fname="resnet%d.log"<line_sep>total=process_log(fname log_path n_iter "TotalTime" r"\d+.\d+")<line_sep>task=process_log(fname log_path n_iter "task_emit" r"\d+.\d+")<line_sep>log={}<for_stmt>fname total<block_start>log[fname]=float(total[fname][0])-float(task[fname][0])<block_end><return>log<block_end>@summarize<def_stmt>process_gpt_log log_path n_iter<block_start>fname="gpt%d.log"<line_sep>total=process_log(fname log_path n_iter "TotalTime" r"\d+.\d+")<line_sep>task=process_log(fname log_path n_iter "task_emit" r"\d+.\d+")<line_sep>log={}<for_stmt>fname total<block_start>log[fname]=float(total[fname][0])-float(task[fname][0])<block_end><return>log<block_end>@summarize<def_stmt>process_reid_log log_path n_iter<block_start>log={}<for_stmt>i range(8)<block_start>fname="reid_%d_"+str(i)+".log"<line_sep>total=process_log(fname log_path n_iter "TotalTime" r"\d+.\d+")<line_sep>task=process_log(fname log_path n_iter "task_emit" r"\d+.\d+")<for_stmt>fname total<block_start>log[fname]=float(total[fname][0])-float(task[fname][0])<block_end><block_end><return>log<block_end><def_stmt>write_to_me_report log me_report_path<block_start>wb=opx.load_workbook(me_report_path)<line_sep>sheet=wb["Sheet"]<line_sep>idx=sheet.max_row+1<line_sep>date=time.strftime('%m%d' time.localtime())<line_sep>sheet['A%d'%idx]=date<line_sep>sheet['B%d'%idx]=round(log["reid"]["min_time"] 2)<line_sep>sheet['C%d'%idx]=round(log["bert"]["min_time"] 2)<line_sep>sheet['D%d'%idx]=round(log['resnet']["min_time"] 2)<line_sep>sheet['E%d'%idx]=round(log['gpt']["min_time"] 2)<line_sep>wb.save(me_report_path)<block_end><def_stmt>generate_report <block_start>me_report_path,log_path,n_iter,out=parse_arguments()<line_sep>log_data={}<line_sep>bert_log=process_bert_log(log_path n_iter)<line_sep>resnet_log=process_resnet_log(log_path n_iter)<line_sep>gpt_log=process_gpt_log(log_path n_iter)<line_sep>reid_log=process_reid_log(log_path n_iter)<line_sep>log_data["bert"]=bert_log<line_sep>log_data["resnet"]=resnet_log<line_sep>log_data["gpt"]=gpt_log<line_sep>log_data["reid"]=reid_log<with_stmt>open(out "w")<as>f<block_start>json.dump(log_data f indent=2)<block_end>write_to_me_report(log_data me_report_path)<block_end><if_stmt>__name__<eq>"__main__"<block_start>generate_report()<block_end> |
<import_stmt>pickle<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<with_stmt>open('./quadratic/eval_record.pickle' 'rb')<as>loss<block_start>data=pickle.load(loss)<block_end>print('Mat_record' len(data['Mat_record']))<line_sep>#print('bias',data['inter_gradient_record'])
#print('constant',data['intra_record'])
<with_stmt>open('./quadratic/evaluate_record.pickle' 'rb')<as>loss1<block_start>data1=pickle.load(loss1)<block_end>x=np.array(data1['x_record'])<line_sep>print('x_record' x.shape)<line_sep>#print('bias',data1['inter_gradient_record'])
#print('constant',data1['intra_record'])
#x = range(10000)
#ax = plt.axes(yscale='log')
#ax.plot(x,data,'b')
#plt.show('loss')
|
<import_stmt>torch<import_from_stmt>kornia.losses ssim<as>dssim<import_from_stmt>lpips_pytorch LPIPS<line_sep>lpips_fn=LPIPS(net_type='alex' version='0.1')<line_sep>lpips_fn.eval()<def_stmt>mse image_pred image_gt valid_mask=<none> reduction='mean'<block_start>value=(image_pred-image_gt)<power>2<if_stmt>valid_mask<is><not><none><block_start>value=value[valid_mask]<block_end><if_stmt>reduction<eq>'mean'<block_start><return>torch.mean(value)<block_end><return>value<block_end><def_stmt>psnr image_pred image_gt valid_mask=<none> reduction='mean'<block_start>image_pred=image_pred/2+0.5<line_sep>image_gt=image_gt/2+0.5<line_sep><return>-10<times>torch.log10(mse(image_pred image_gt valid_mask reduction))<block_end><def_stmt>ssim image_pred image_gt reduction='mean'<block_start>image_pred=image_pred/2+0.5<line_sep>image_gt=image_gt/2+0.5<line_sep>dssim_=dssim(image_pred image_gt 3 reduction)# dissimilarity in [0, 1]
<return>1-2<times>dssim_<block_end># in [-1, 1]
<def_stmt>lpips image_pred image_gt device='cpu'<block_start>lpips_fn.to(device)<with_stmt>torch.no_grad()<block_start>lpips_=lpips_fn(image_pred image_gt)<block_end><return>lpips_.mean().item()<block_end> |
<import_stmt>socket<def_stmt>is_connectable host port<block_start>sock=<none><try_stmt><block_start>sock=socket.create_connection((host port) 1)<line_sep>result=<true><block_end><except_stmt>socket.error<block_start>result=<false><block_end><finally_stmt><block_start><if_stmt>sock<block_start>sock.close()<block_end><block_end><return>result<block_end> |
"""pypyr step that appends items to a mutable sequence, such as a list."""<import_stmt>logging<import_from_stmt>pypyr.utils.asserts assert_key_exists assert_key_is_truthy<line_sep>logger=logging.getLogger(__name__)<def_stmt>run_step context<block_start>"""Append item to a mutable sequence.
Expects input:
append:
list (list or str): Add addMe to this mutable sequence.
addMe (any): Add this item to the list.
unpack (bool): Optional. Defaults False. If True, enumerate addMe
and append each item individually.
If append.list is a str, it refers to a key in context which contains a
list, e.g context['my_list'] = [1, 2, 3]. If no such key exists, will
create a list with that name and add addMe as the 1st item on the new list.
This is an append, not an extend, unless append.unpack = True.
If you want to add to a set, use pypyr.steps.add instead.
Args:
context (pypyr.context.Context): Mandatory. Context is a dictionary or
dictionary-like.
Context must contain key 'append'
"""<line_sep>logger.debug("started")<line_sep>context.assert_key_has_value(key='append' caller=__name__)<line_sep>step_input=context.get_formatted('append')<line_sep>assert_key_is_truthy(obj=step_input key='list' caller=__name__ parent='append')<line_sep>assert_key_exists(obj=step_input key='addMe' caller=__name__ parent='append')<line_sep>lst=step_input['list']<line_sep>add_me=step_input['addMe']<line_sep>is_extend=step_input.get('unpack' <false>)<line_sep># str value means referring to a key in context rather than list instance
<if_stmt>isinstance(lst str)<block_start>existing_sequence=context.get(lst <none>)<if_stmt>existing_sequence<block_start>append_or_extend_list(existing_sequence add_me is_extend)<block_end><else_stmt># list(x) works only if x is iterable, [x] works when x != iterable
<block_start>context[lst]=list(add_me)<if>is_extend<else>[add_me]<block_end><block_end><else_stmt># anything that supports append: list, deque, array... if not is_extend
<block_start>append_or_extend_list(lst add_me is_extend)<block_end>logger.debug("started")<block_end><def_stmt>append_or_extend_list lst add_me is_extend<block_start>"""Append or extend list.
Args:
lst (list-like): The list to append/extend
add_me (any): Item(s) to append/extend to lst
is_extend (bool): If True does extend rather than append.
Returns: None
"""<if_stmt>is_extend<block_start>lst.extend(add_me)<block_end><else_stmt><block_start>lst.append(add_me)<block_end><block_end> |
<try_stmt># file is created during dvc build
<block_start><import_from_stmt>.build PKG# noqa, pylint:disable=unused-import
<block_end><except_stmt>ImportError<block_start>PKG=<none><block_end># type: ignore[assignment]
|
<import_stmt>psutil<import_stmt>os<import_stmt>subprocess<import_stmt>logging<def_stmt>simple_bind_cpus rank num_partition logical=<false><block_start>pid=os.getpid()<line_sep>p=psutil.Process(pid)<line_sep>cpu_count=psutil.cpu_count(logical=logical)<line_sep>cpu_count_per_worker=cpu_count<floordiv>num_partition<line_sep>cpu_list=list(range(rank<times>cpu_count_per_worker (rank+1)<times>cpu_count_per_worker))<line_sep>print("bind cpu list:{}".format(cpu_list))<line_sep>p.cpu_affinity(cpu_list)<line_sep>logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank pid cpu_list))<block_end><def_stmt>simple_bind_cpus_with_superthread rank num_partition<block_start>pid=os.getpid()<line_sep>p=psutil.Process(pid)<line_sep>phy_cpu_count=psutil.cpu_count(logical=<false>)<line_sep>cpu_count_per_worker=phy_cpu_count<floordiv>num_partition<line_sep>cpu_list=list(range(rank<times>cpu_count_per_worker (rank+1)<times>cpu_count_per_worker))<line_sep>cpu_list<augadd>list(range(phy_cpu_count+rank<times>cpu_count_per_worker phy_cpu_count+(rank+1)<times>cpu_count_per_worker))<line_sep>p.cpu_affinity(cpu_list)<line_sep>logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank pid cpu_list))<block_end><def_stmt>bind_cpus_with_list cpu_list<block_start>pid=os.getpid()<line_sep>p=psutil.Process(pid)<line_sep>p.cpu_affinity(cpu_list)<line_sep>logging.info("pid:{}, affinity to cpu {}".format(pid cpu_list))<block_end><def_stmt>bind_cpus_on_ecos rank num_partition<block_start>pid=os.getpid()<line_sep>p=psutil.Process(pid)<line_sep>allowed_list=cpu_allowed_list()<if_stmt>rank<eq>0<block_start>print("cpu allowed list len:{}, {}".format(len(allowed_list) allowed_list))<block_end>cpu_count_per_worker=len(allowed_list)<floordiv>num_partition<line_sep>cpu_list=allowed_list[int(rank<times>cpu_count_per_worker):int((rank+1)<times>cpu_count_per_worker)]<line_sep>p.cpu_affinity(cpu_list)<line_sep>logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank pid cpu_list))<block_end><def_stmt>cpu_allowed_list <block_start>byte_info=subprocess.check_output("cat /proc/$$/status|grep Cpus_allowed_list|awk '{print $2}'" shell=<true>)<line_sep>cpu_list=byte_info.decode("utf-8").replace("\n" "").split(",")<line_sep>allowed_list=[]<for_stmt>item cpu_list<block_start>ranges=[int(cpuid)<for>cpuid item.split('-')]<if_stmt>len(ranges)<eq>1<block_start>allowed_list.append(ranges[0])<block_end><else_stmt><block_start>allowed_list<augadd>list(range(ranges[0] ranges[1]+1))<block_end><block_end><return>allowed_list<block_end> |
<import_from_stmt>rosetta.text.api *<line_sep> |
<import_stmt>datetime<import_stmt>re<import_stmt>time<import_from_stmt>collections namedtuple<import_from_stmt>django.conf settings<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>trello ResourceUnavailable TrelloClient<import_from_stmt>core.models Event<line_sep># Create new command
<class_stmt>Command(BaseCommand)<block_start>help='Syncs event in trello board. Need a token.'<line_sep>missing_args_message=('You need to add a token! Get one here: '<concat>'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'<concat>'name=My+Application&expiration=1hour&response_type=token')<def_stmt>add_arguments self parser<block_start>parser.add_argument('trello_token' type=str)<block_end><def_stmt>handle self *args **options<block_start>token=options['trello_token']<line_sep>events=event_list()<line_sep>sync(events token)<block_end><block_end># Get data
EventTuple=namedtuple('EventTuple' 'name id city date')<def_stmt>event_list <block_start>event=Event.objects.all()<line_sep>result=[]<for_stmt>e event<block_start>name=e.name<line_sep>_id=str(e.pk)<line_sep>city=e.city<line_sep>date=datetime.date(e.date.year e.date.month e.date.day<or>1)<line_sep>result.append(EventTuple(name _id city date))<block_end><return>result<block_end># Sync to trello
ADMIN_BASE_URL='https://djangogirls.org/admin/core/event/'<def_stmt>sync events token<block_start>trello=TrelloClient(api_key=settings.TRELLO_API_KEY token=token)<line_sep>board=trello.get_board('55f7167c46760fcb5d68b385')<line_sep>far_away,less_2_months,less_1_month,less_1_week,today,past=board.all_lists()<line_sep>all_cards={card_id(c):c<for>c board.all_cards()}<line_sep>date_today=datetime.date.today()<for_stmt>e events<block_start>card=all_cards.get(e.id)<if_stmt><not>card<block_start>card=create_card(e far_away)<line_sep>create_checklist(card)<block_end># fetch card to get due date
<try_stmt><block_start>card.fetch()<block_end><except_stmt>ResourceUnavailable<block_start>print("Oopsie: too many requests! Let's wait 10 seconds!")<line_sep>time.sleep(10)<line_sep>card.fetch()<block_end><if_stmt>e.date<ne>card.due_date.date()<block_start>print('Changing due date of {} to {}'.format(e.city e.date))<line_sep>card.set_due(e.date)<block_end>distance=(e.date-date_today).days<if_stmt>distance<l>0<block_start>right_list=past<block_end><elif_stmt>distance<eq>0<block_start>right_list=today<block_end><elif_stmt>distance<l>7<block_start>right_list=less_1_week<block_end><elif_stmt>distance<l>30<block_start>right_list=less_1_month<block_end><elif_stmt>distance<l>60<block_start>right_list=less_2_months<block_end><else_stmt><block_start>right_list=far_away<block_end>ensure_card_in_list(card right_list)<block_end><block_end><def_stmt>card_id card<block_start>m=re.search(ADMIN_BASE_URL+r'(\d+)' card.desc)<line_sep><return>m.group(1)<block_end><def_stmt>create_card event list<block_start>print('Creating card {} ({})'.format(event.city event.date.isoformat()))<line_sep><return>list.add_card(name=event.city desc=ADMIN_BASE_URL+event.id due=event.date.isoformat())<block_end><def_stmt>create_checklist card<block_start>card.add_checklist("Things to do:" ["2 month check" "1 month check" "Thank you email and request for stats" "Stats obtained"])<block_end><def_stmt>ensure_checklist_in_card card<block_start><if_stmt><not>card.checklists<block_start>print("Adding checklist to {} card.".format(card.name))<line_sep>create_checklist(card)<block_end><block_end><def_stmt>ensure_card_in_list card list<block_start><if_stmt>card.list_id<ne>list.id<block_start>print('Moving {} to {}'.format(card.name list.name))<line_sep>card.change_list(list.id)<block_end><block_end> |
<import_stmt>os<line_sep>os.environ['CUDA_VISIBLE_DEVICES']='0'<import_stmt>tensorflow<as>tf<import_stmt>collections<import_stmt>re<import_stmt>random<def_stmt>get_assignment_map_from_checkpoint tvars init_checkpoint<block_start>"""Compute the union of the current variables and checkpoint variables."""<line_sep>assignment_map={}<line_sep>initialized_variable_names={}<line_sep>name_to_variable=collections.OrderedDict()<for_stmt>var tvars<block_start>name=var.name<line_sep>m=re.match('^(.*):\\d+$' name)<if_stmt>m<is><not><none><block_start>name=m.group(1)<block_end>name_to_variable[name]=var<block_end>init_vars=tf.train.list_variables(init_checkpoint)<line_sep>assignment_map=collections.OrderedDict()<for_stmt>x init_vars<block_start>(name var)=(x[0] x[1])<if_stmt>name<not><in>name_to_variable<block_start><continue><block_end>assignment_map[name]=name<line_sep>assignment_map[name]=name_to_variable[name]<line_sep>initialized_variable_names[name]=1<line_sep>initialized_variable_names[name+':0']=1<block_end>tf.logging.info('**** Trainable Variables ****')<for_stmt>var tvars<block_start>init_string=''<if_stmt>var.name<in>initialized_variable_names<block_start>init_string=', *INIT_FROM_CKPT*'<block_end>tf.logging.info(' name = %s, shape = %s%s' var.name var.shape init_string)<block_end><return>(assignment_map initialized_variable_names)<block_end><import_stmt>malaya_speech.train<as>train<import_stmt>malaya_speech<import_from_stmt>glob glob<import_stmt>librosa<import_stmt>numpy<as>np<def_stmt>lin_spectogram_from_wav wav hop_length win_length n_fft=1024<block_start>linear=librosa.stft(wav n_fft=n_fft win_length=win_length hop_length=hop_length)<line_sep># linear spectrogram
<return>linear.T<block_end><def_stmt>load_data wav win_length=400 sr=16000 hop_length=24 n_fft=512 spec_len=100 mode='train' <block_start>linear_spect=lin_spectogram_from_wav(wav hop_length win_length n_fft)<line_sep>mag,_=librosa.magphase(linear_spect)# magnitude
mag_T=mag.T<line_sep>freq,time=mag_T.shape<if_stmt>mode<eq>'train'<block_start><if_stmt>time<l>spec_len<block_start>spec_mag=np.pad(mag_T ((0 0) (0 spec_len-time)) 'constant')<block_end><else_stmt><block_start>spec_mag=mag_T<block_end><block_end><else_stmt><block_start>spec_mag=mag_T<block_end># preprocessing, subtract mean, divided by time-wise var
mu=np.mean(spec_mag 0 keepdims=<true>)<line_sep>std=np.std(spec_mag 0 keepdims=<true>)<line_sep><return>(spec_mag-mu)/(std+1e-5)<block_end>n_mels=257<def_stmt>calc v<block_start>r=load_data(v mode='train')<line_sep><return>r<block_end><def_stmt>preprocess_inputs example<block_start>s=tf.compat.v1.numpy_function(calc [example['waveforms']] tf.float32)<line_sep>s=tf.reshape(s (n_mels -1 1))<line_sep>example['inputs']=s<line_sep><return>example<block_end><def_stmt>parse serialized_example<block_start>data_fields={'waveforms':tf.VarLenFeature(tf.float32) 'targets':tf.VarLenFeature(tf.int64) }<line_sep>features=tf.parse_single_example(serialized_example features=data_fields)<for_stmt>k features.keys()<block_start>features[k]=features[k].values<block_end>features=preprocess_inputs(features)<line_sep>keys=list(features.keys())<for_stmt>k keys<block_start><if_stmt>k<not><in>['inputs' 'targets']<block_start>features.pop(k <none>)<block_end><block_end><return>features<block_end><def_stmt>get_dataset files batch_size=16 shuffle_size=5 thread_count=24<block_start><def_stmt>get <block_start>dataset=tf.data.TFRecordDataset(files)<line_sep>dataset=dataset.map(parse num_parallel_calls=thread_count)<line_sep>dataset=dataset.padded_batch(batch_size padded_shapes={'inputs':tf.TensorShape([n_mels <none> 1]) 'targets':tf.TensorShape([<none>]) } padding_values={'inputs':tf.constant(0 dtype=tf.float32) 'targets':tf.constant(0 dtype=tf.int64) } )<line_sep>dataset=dataset.shuffle(shuffle_size)<line_sep>dataset=dataset.repeat()<line_sep><return>dataset<block_end><return>get<block_end><import_stmt>tf_slim<as>slim<import_stmt>inception_utils<def_stmt>block_inception_a inputs scope=<none> reuse=<none><block_start>"""Builds Inception-A block for Inception v4 network."""<line_sep># By default use stride=1 and SAME padding
<with_stmt>slim.arg_scope([slim.conv2d slim.avg_pool2d slim.max_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope(scope 'BlockInceptionA' [inputs] reuse=reuse)<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(inputs 96 [1 1] scope='Conv2d_0a_1x1')<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(inputs 64 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=slim.conv2d(branch_1 96 [3 3] scope='Conv2d_0b_3x3')<block_end><with_stmt>tf.variable_scope('Branch_2')<block_start>branch_2=slim.conv2d(inputs 64 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_2=slim.conv2d(branch_2 96 [3 3] scope='Conv2d_0b_3x3')<line_sep>branch_2=slim.conv2d(branch_2 96 [3 3] scope='Conv2d_0c_3x3')<block_end><with_stmt>tf.variable_scope('Branch_3')<block_start>branch_3=slim.avg_pool2d(inputs [3 3] scope='AvgPool_0a_3x3')<line_sep>branch_3=slim.conv2d(branch_3 96 [1 1] scope='Conv2d_0b_1x1')<block_end><return>tf.concat(axis=3 values=[branch_0 branch_1 branch_2 branch_3])<block_end><block_end><block_end><def_stmt>block_reduction_a inputs scope=<none> reuse=<none><block_start>"""Builds Reduction-A block for Inception v4 network."""<line_sep># By default use stride=1 and SAME padding
<with_stmt>slim.arg_scope([slim.conv2d slim.avg_pool2d slim.max_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope(scope 'BlockReductionA' [inputs] reuse=reuse)<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(inputs 384 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(inputs 192 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=slim.conv2d(branch_1 224 [3 3] scope='Conv2d_0b_3x3')<line_sep>branch_1=slim.conv2d(branch_1 256 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_2')<block_start>branch_2=slim.max_pool2d(inputs [3 3] stride=2 padding='VALID' scope='MaxPool_1a_3x3' )<block_end><return>tf.concat(axis=3 values=[branch_0 branch_1 branch_2])<block_end><block_end><block_end><def_stmt>block_inception_b inputs scope=<none> reuse=<none><block_start>"""Builds Inception-B block for Inception v4 network."""<line_sep># By default use stride=1 and SAME padding
<with_stmt>slim.arg_scope([slim.conv2d slim.avg_pool2d slim.max_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope(scope 'BlockInceptionB' [inputs] reuse=reuse)<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(inputs 384 [1 1] scope='Conv2d_0a_1x1')<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(inputs 192 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=slim.conv2d(branch_1 224 [1 7] scope='Conv2d_0b_1x7')<line_sep>branch_1=slim.conv2d(branch_1 256 [7 1] scope='Conv2d_0c_7x1')<block_end><with_stmt>tf.variable_scope('Branch_2')<block_start>branch_2=slim.conv2d(inputs 192 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_2=slim.conv2d(branch_2 192 [7 1] scope='Conv2d_0b_7x1')<line_sep>branch_2=slim.conv2d(branch_2 224 [1 7] scope='Conv2d_0c_1x7')<line_sep>branch_2=slim.conv2d(branch_2 224 [7 1] scope='Conv2d_0d_7x1')<line_sep>branch_2=slim.conv2d(branch_2 256 [1 7] scope='Conv2d_0e_1x7')<block_end><with_stmt>tf.variable_scope('Branch_3')<block_start>branch_3=slim.avg_pool2d(inputs [3 3] scope='AvgPool_0a_3x3')<line_sep>branch_3=slim.conv2d(branch_3 128 [1 1] scope='Conv2d_0b_1x1')<block_end><return>tf.concat(axis=3 values=[branch_0 branch_1 branch_2 branch_3])<block_end><block_end><block_end><def_stmt>block_reduction_b inputs scope=<none> reuse=<none><block_start>"""Builds Reduction-B block for Inception v4 network."""<line_sep># By default use stride=1 and SAME padding
<with_stmt>slim.arg_scope([slim.conv2d slim.avg_pool2d slim.max_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope(scope 'BlockReductionB' [inputs] reuse=reuse)<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(inputs 192 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_0=slim.conv2d(branch_0 192 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(inputs 256 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=slim.conv2d(branch_1 256 [1 7] scope='Conv2d_0b_1x7')<line_sep>branch_1=slim.conv2d(branch_1 320 [7 1] scope='Conv2d_0c_7x1')<line_sep>branch_1=slim.conv2d(branch_1 320 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_2')<block_start>branch_2=slim.max_pool2d(inputs [3 3] stride=2 padding='VALID' scope='MaxPool_1a_3x3' )<block_end><return>tf.concat(axis=3 values=[branch_0 branch_1 branch_2])<block_end><block_end><block_end><def_stmt>block_inception_c inputs scope=<none> reuse=<none><block_start>"""Builds Inception-C block for Inception v4 network."""<line_sep># By default use stride=1 and SAME padding
<with_stmt>slim.arg_scope([slim.conv2d slim.avg_pool2d slim.max_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope(scope 'BlockInceptionC' [inputs] reuse=reuse)<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(inputs 256 [1 1] scope='Conv2d_0a_1x1')<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(inputs 384 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=tf.concat(axis=3 values=[slim.conv2d(branch_1 256 [1 3] scope='Conv2d_0b_1x3') slim.conv2d(branch_1 256 [3 1] scope='Conv2d_0c_3x1') ] )<block_end><with_stmt>tf.variable_scope('Branch_2')<block_start>branch_2=slim.conv2d(inputs 384 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_2=slim.conv2d(branch_2 448 [3 1] scope='Conv2d_0b_3x1')<line_sep>branch_2=slim.conv2d(branch_2 512 [1 3] scope='Conv2d_0c_1x3')<line_sep>branch_2=tf.concat(axis=3 values=[slim.conv2d(branch_2 256 [1 3] scope='Conv2d_0d_1x3') slim.conv2d(branch_2 256 [3 1] scope='Conv2d_0e_3x1') ] )<block_end><with_stmt>tf.variable_scope('Branch_3')<block_start>branch_3=slim.avg_pool2d(inputs [3 3] scope='AvgPool_0a_3x3')<line_sep>branch_3=slim.conv2d(branch_3 256 [1 1] scope='Conv2d_0b_1x1')<block_end><return>tf.concat(axis=3 values=[branch_0 branch_1 branch_2 branch_3])<block_end><block_end><block_end><def_stmt>inception_v4_base inputs final_endpoint='Mixed_7d' scope=<none><block_start>"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""<line_sep>end_points={}<def_stmt>add_and_check_final name net<block_start>end_points[name]=net<line_sep><return>name<eq>final_endpoint<block_end><with_stmt>tf.variable_scope(scope 'InceptionV4' [inputs])<block_start><with_stmt>slim.arg_scope([slim.conv2d slim.max_pool2d slim.avg_pool2d] stride=1 padding='SAME' )# 299 x 299 x 3
<block_start>net=slim.conv2d(inputs 32 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<if_stmt>add_and_check_final('Conv2d_1a_3x3' net)<block_start><return>net end_points<block_end># 149 x 149 x 32
net=slim.conv2d(net 32 [3 3] padding='VALID' scope='Conv2d_2a_3x3')<if_stmt>add_and_check_final('Conv2d_2a_3x3' net)<block_start><return>net end_points<block_end># 147 x 147 x 32
net=slim.conv2d(net 64 [3 3] scope='Conv2d_2b_3x3')<if_stmt>add_and_check_final('Conv2d_2b_3x3' net)<block_start><return>net end_points<block_end># 147 x 147 x 64
<with_stmt>tf.variable_scope('Mixed_3a')<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.max_pool2d(net [3 3] stride=2 padding='VALID' scope='MaxPool_0a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(net 96 [3 3] stride=2 padding='VALID' scope='Conv2d_0a_3x3' )<block_end>net=tf.concat(axis=3 values=[branch_0 branch_1])<if_stmt>add_and_check_final('Mixed_3a' net)<block_start><return>net end_points<block_end><block_end># 73 x 73 x 160
<with_stmt>tf.variable_scope('Mixed_4a')<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(net 64 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_0=slim.conv2d(branch_0 96 [3 3] padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.conv2d(net 64 [1 1] scope='Conv2d_0a_1x1')<line_sep>branch_1=slim.conv2d(branch_1 64 [1 7] scope='Conv2d_0b_1x7')<line_sep>branch_1=slim.conv2d(branch_1 64 [7 1] scope='Conv2d_0c_7x1')<line_sep>branch_1=slim.conv2d(branch_1 96 [3 3] padding='VALID' scope='Conv2d_1a_3x3' )<block_end>net=tf.concat(axis=3 values=[branch_0 branch_1])<if_stmt>add_and_check_final('Mixed_4a' net)<block_start><return>net end_points<block_end><block_end># 71 x 71 x 192
<with_stmt>tf.variable_scope('Mixed_5a')<block_start><with_stmt>tf.variable_scope('Branch_0')<block_start>branch_0=slim.conv2d(net 192 [3 3] stride=2 padding='VALID' scope='Conv2d_1a_3x3' )<block_end><with_stmt>tf.variable_scope('Branch_1')<block_start>branch_1=slim.max_pool2d(net [3 3] stride=2 padding='VALID' scope='MaxPool_1a_3x3' )<block_end>net=tf.concat(axis=3 values=[branch_0 branch_1])<if_stmt>add_and_check_final('Mixed_5a' net)<block_start><return>net end_points<block_end><block_end># 35 x 35 x 384
# 4 x Inception-A blocks
<for_stmt>idx range(4)<block_start>block_scope='Mixed_5'+chr(ord('b')+idx)<line_sep>net=block_inception_a(net block_scope)<if_stmt>add_and_check_final(block_scope net)<block_start><return>net end_points<block_end><block_end># 35 x 35 x 384
# Reduction-A block
net=block_reduction_a(net 'Mixed_6a')<if_stmt>add_and_check_final('Mixed_6a' net)<block_start><return>net end_points<block_end># 17 x 17 x 1024
# 7 x Inception-B blocks
<for_stmt>idx range(7)<block_start>block_scope='Mixed_6'+chr(ord('b')+idx)<line_sep>net=block_inception_b(net block_scope)<if_stmt>add_and_check_final(block_scope net)<block_start><return>net end_points<block_end><block_end># 17 x 17 x 1024
# Reduction-B block
net=block_reduction_b(net 'Mixed_7a')<if_stmt>add_and_check_final('Mixed_7a' net)<block_start><return>net end_points<block_end># 8 x 8 x 1536
# 3 x Inception-C blocks
<for_stmt>idx range(3)<block_start>block_scope='Mixed_7'+chr(ord('b')+idx)<line_sep>net=block_inception_c(net block_scope)<if_stmt>add_and_check_final(block_scope net)<block_start><return>net end_points<block_end><block_end><block_end><block_end><raise>ValueError('Unknown final endpoint %s'%final_endpoint)<block_end><def_stmt>model inputs is_training=<true> dropout_keep_prob=0.8 reuse=<none> scope='InceptionV4' bottleneck_dim=512 # inputs = tf.image.grayscale_to_rgb(inputs)
<block_start><with_stmt>tf.variable_scope(scope 'InceptionV4' [inputs] reuse=reuse)<as>scope<block_start><with_stmt>slim.arg_scope([slim.batch_norm slim.dropout] is_training=is_training)<block_start>net,end_points=inception_v4_base(inputs scope=scope)<line_sep>print(net.shape)<with_stmt>slim.arg_scope([slim.conv2d slim.max_pool2d slim.avg_pool2d] stride=1 padding='SAME' )<block_start><with_stmt>tf.variable_scope('Logits')# 8 x 8 x 1536
<block_start>kernel_size=net.get_shape()[1:3]<line_sep>print(kernel_size)<if_stmt>kernel_size.is_fully_defined()<block_start>net=slim.avg_pool2d(net kernel_size padding='VALID' scope='AvgPool_1a' )<block_end><else_stmt><block_start>net=tf.reduce_mean(input_tensor=net axis=[1 2] keepdims=<true> name='global_pool' )<block_end>end_points['global_pool']=net<line_sep># 1 x 1 x 1536
net=slim.dropout(net dropout_keep_prob scope='Dropout_1b')<line_sep>net=slim.flatten(net scope='PreLogitsFlatten')<line_sep>end_points['PreLogitsFlatten']=net<line_sep>bottleneck=slim.fully_connected(net bottleneck_dim scope='bottleneck')<line_sep>logits=slim.fully_connected(bottleneck 2 activation_fn=<none> scope='Logits_vad' )<line_sep><return>logits<block_end><block_end><block_end><block_end><block_end>init_lr=1e-3<line_sep>epochs=300000<line_sep>init_checkpoint='output-inception-v4/model.ckpt-401000'<def_stmt>model_fn features labels mode params<block_start>Y=tf.cast(features['targets'][: 0] tf.int32)<with_stmt>slim.arg_scope(inception_utils.inception_arg_scope())<block_start>logits=model(features['inputs'])<block_end>loss=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits labels=Y))<line_sep>tf.identity(loss 'train_loss')<line_sep>accuracy=tf.metrics.accuracy(labels=Y predictions=tf.argmax(logits axis=1))<line_sep>tf.identity(accuracy[1] name='train_accuracy')<line_sep>variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)<line_sep>assignment_map,initialized_variable_names=get_assignment_map_from_checkpoint(variables init_checkpoint)<line_sep>tf.train.init_from_checkpoint(init_checkpoint assignment_map)<if_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start>global_step=tf.train.get_or_create_global_step()<line_sep>learning_rate=tf.constant(value=init_lr shape=[] dtype=tf.float32)<line_sep>learning_rate=tf.train.polynomial_decay(learning_rate global_step epochs end_learning_rate=0.00001 power=1.0 cycle=<false> )<line_sep>optimizer=tf.train.RMSPropOptimizer(learning_rate decay=0.9 momentum=0.9 epsilon=1.0)<line_sep>train_op=optimizer.minimize(loss global_step=global_step)<line_sep>estimator_spec=tf.estimator.EstimatorSpec(mode=mode loss=loss train_op=train_op)<block_end><elif_stmt>mode<eq>tf.estimator.ModeKeys.EVAL<block_start>estimator_spec=tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.EVAL loss=loss eval_metric_ops={'accuracy':accuracy} )<block_end><return>estimator_spec<block_end>train_hooks=[tf.train.LoggingTensorHook(['train_accuracy' 'train_loss'] every_n_iter=1)]<line_sep>train_files=glob('vad2/data/vad-train-*')+glob('noise/data/vad-train-*')<line_sep>train_dataset=get_dataset(train_files batch_size=32)<line_sep>dev_files=glob('vad2/data/vad-dev-*')+glob('noise/data/vad-dev-*')<line_sep>dev_dataset=get_dataset(dev_files batch_size=16)<line_sep>save_directory='output-inception-v4-vad'<line_sep>train.run_training(train_fn=train_dataset model_fn=model_fn model_dir=save_directory num_gpus=1 log_step=1 save_checkpoint_step=25000 max_steps=epochs eval_fn=dev_dataset train_hooks=train_hooks )<line_sep> |
<import_from_stmt>slack_sdk WebClient<import_from_stmt>slack_bolt.app.app SlackAppDevelopmentServer App<import_from_stmt>tests.mock_web_api_server setup_mock_web_api_server cleanup_mock_web_api_server <import_from_stmt>tests.utils remove_os_env_temporarily restore_os_env<class_stmt>TestDevServer<block_start>signing_secret="secret"<line_sep>valid_token="<PASSWORD>"<line_sep>mock_api_server_base_url="http://localhost:8888"<line_sep>web_client=WebClient(token=valid_token base_url=mock_api_server_base_url )<def_stmt>setup_method self<block_start>self.old_os_env=remove_os_env_temporarily()<line_sep>setup_mock_web_api_server(self)<block_end><def_stmt>teardown_method self<block_start>cleanup_mock_web_api_server(self)<line_sep>restore_os_env(self.old_os_env)<block_end><def_stmt>test_instance self<block_start>server=SlackAppDevelopmentServer(port=3001 path="/slack/events" app=App(signing_secret=self.signing_secret client=self.web_client) )<assert_stmt>server<is><not><none><block_end><block_end> |
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>metakernel Magic<class_stmt>SASsessionMagic(Magic)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(SASsessionMagic self).__init__(*args **kwargs)<block_end><def_stmt>line_SASsession self *args<block_start>"""
SAS Kernel magic allows a programatic way to submit configuration
details.
This magic is only available within the SAS Kernel
"""<if_stmt>len(args)<g>1<block_start>args=''.join(args)<block_end><elif_stmt>len(args)<eq>1<block_start>args=''.join(args[0])<block_end>args=args.replace(' ' '')<line_sep>args=args.replace('"' '')<line_sep>args=args.replace("'" '')<line_sep>sess_params=dict(s.split('=')<for>s args.split(','))<line_sep>self.kernel._allow_stdin=<true><line_sep>self.kernel._start_sas(**sess_params)<block_end><block_end><def_stmt>register_magics kernel<block_start>kernel.register_magics(SASsessionMagic)<block_end><def_stmt>register_ipython_magics <block_start><import_from_stmt>metakernel IPythonKernel<import_from_stmt>IPython.core.magic register_line_magic<line_sep>kernel=IPythonKernel()<line_sep>magic=SASsessionMagic(kernel)<line_sep># Make magics callable:
kernel.line_magics["SASsession"]=magic<line_sep>@register_line_magic<def_stmt>SASsession line<block_start>kernel.call_magic("%SASsession "+line)<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_stmt>logging<import_stmt>pytest<import_stmt>six<import_from_stmt>dbnd parameter task<import_from_stmt>dbnd._core.current try_get_current_task<import_from_stmt>dbnd._core.task_ctrl.task_ctrl TaskCtrl<import_from_stmt>targets.values ObjectValueType StrValueType<if_stmt>six.PY2<block_start><import_from_stmt>future.builtins *<line_sep>__future_module__=<true><block_end>py_2_only_import=pytest.importorskip("__builtin__")<line_sep>@task<def_stmt>task_with_str_param something=parameter(default=<none>)[str]<block_start>current_task=try_get_current_task()<line_sep>ctrl=current_task.ctrl# type: TaskCtrl
task_as_cmd_line=ctrl.task_repr.calculate_command_line_for_task()<line_sep>logging.info("Str type: %s, task repr: %s" type(str) task_as_cmd_line)<assert_stmt>"newstr.BaseNewStr"<in>str(type(str))<assert_stmt>"@"<not><in>task_as_cmd_line<line_sep><return>"task_with_str"<block_end>@task<def_stmt>task_with_object_param something=parameter(default=<none>)[object]<block_start>current_task=try_get_current_task()<line_sep>ctrl=current_task.ctrl# type: TaskCtrl
task_as_cmd_line=ctrl.task_repr.calculate_command_line_for_task()<line_sep>logging.info("Object type: %s, task repr: %s" type(object) task_as_cmd_line)<assert_stmt>"newobject"<in>object.__name__<assert_stmt>"@"<not><in>task_as_cmd_line<line_sep><return>"task_with_object_param"<block_end><class_stmt>TestPy3ObjectsBuiltins(object)<block_start><def_stmt>test_newstr_as_type self# future.builtins.str is actually "newstr",
# we want to check that correct value type is selected
<block_start><assert_stmt>"newstr"<in>repr(str)<line_sep>p=parameter(default=<none>)[str]<assert_stmt>isinstance(p.parameter.value_type StrValueType)<block_end><def_stmt>test_newstr_run self<block_start>a=task_with_str_param.dbnd_run(something="333")<line_sep>print(a.root_task.something)<block_end><def_stmt>test_object_as_type self# future.builtins.str is actually "newstr",
# we want to check that correct value type is selected
<block_start><assert_stmt>"newobject"<in>repr(object)<line_sep>p=parameter(default=<none>)[object]<assert_stmt>isinstance(p.parameter.value_type ObjectValueType)<block_end><def_stmt>test_object_run self<block_start>a=task_with_object_param.dbnd_run(something="333")<line_sep>print(a.root_task.something)<block_end><block_end> |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
<import_from_future_stmt> absolute_import<import_from_stmt>abc abstractmethod<import_from_stmt>pex.commands.command Command Result<import_from_stmt>pex.pex PEX<class_stmt>PEXCommand(Command)<block_start>@abstractmethod<def_stmt>run self pex# type: (PEX) -> Result
<block_start><raise>NotImplementedError()<block_end><block_end> |
# Copyright 2017, <NAME>, All rights reserved.
<import_stmt>json<import_from_stmt>urllib.parse quote<import_from_stmt>controller AutoQueuePattern<import_from_stmt>tests.integration.test_web.test_web_app BaseTestWebApp<class_stmt>TestAutoQueueHandler(BaseTestWebApp)<block_start><def_stmt>test_get self<block_start>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="one"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="t wo"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="thr'ee"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fo\"ur"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fi%ve"))<line_sep>resp=self.test_app.get("/server/autoqueue/get")<line_sep>self.assertEqual(200 resp.status_int)<line_sep>json_list=json.loads(str(resp.html))<line_sep>self.assertEqual(5 len(json_list))<line_sep>self.assertIn({"pattern":"one"} json_list)<line_sep>self.assertIn({"pattern":"t wo"} json_list)<line_sep>self.assertIn({"pattern":"thr'ee"} json_list)<line_sep>self.assertIn({"pattern":"fo\"ur"} json_list)<line_sep>self.assertIn({"pattern":"fi%ve"} json_list)<block_end><def_stmt>test_get_is_ordered self<block_start>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="a"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="b"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="c"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="d"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="e"))<line_sep>resp=self.test_app.get("/server/autoqueue/get")<line_sep>self.assertEqual(200 resp.status_int)<line_sep>json_list=json.loads(str(resp.html))<line_sep>self.assertEqual(5 len(json_list))<line_sep>self.assertEqual([{"pattern":"a"} {"pattern":"b"} {"pattern":"c"} {"pattern":"d"} {"pattern":"e"}] json_list)<block_end><def_stmt>test_add_good self<block_start>resp=self.test_app.get("/server/autoqueue/add/one")<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(1 len(self.auto_queue_persist.patterns))<line_sep>self.assertIn(AutoQueuePattern("one") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("/value/with/slashes" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/add/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(2 len(self.auto_queue_persist.patterns))<line_sep>self.assertIn(AutoQueuePattern("/value/with/slashes") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote(" value with spaces" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/add/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(3 len(self.auto_queue_persist.patterns))<line_sep>self.assertIn(AutoQueuePattern(" value with spaces") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("value'with'singlequote" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/add/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(4 len(self.auto_queue_persist.patterns))<line_sep>self.assertIn(AutoQueuePattern("value'with'singlequote") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("value\"with\"doublequote" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/add/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(5 len(self.auto_queue_persist.patterns))<line_sep>self.assertIn(AutoQueuePattern("value\"with\"doublequote") self.auto_queue_persist.patterns)<block_end><def_stmt>test_add_double self<block_start>resp=self.test_app.get("/server/autoqueue/add/one")<line_sep>self.assertEqual(200 resp.status_int)<line_sep>resp=self.test_app.get("/server/autoqueue/add/one" expect_errors=<true>)<line_sep>self.assertEqual(400 resp.status_int)<line_sep>self.assertEqual("Auto-queue pattern 'one' already exists." str(resp.html))<block_end><def_stmt>test_add_empty_value self<block_start>uri=quote(quote(" " safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/add/"+uri expect_errors=<true>)<line_sep>self.assertEqual(400 resp.status_int)<line_sep>self.assertEqual(0 len(self.auto_queue_persist.patterns))<line_sep>resp=self.test_app.get("/server/autoqueue/add/" expect_errors=<true>)<line_sep>self.assertEqual(404 resp.status_int)<line_sep>self.assertEqual(0 len(self.auto_queue_persist.patterns))<block_end><def_stmt>test_remove_good self<block_start>self.auto_queue_persist.add_pattern(AutoQueuePattern("one"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern("/value/with/slashes"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern(" value with spaces"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern("value'with'singlequote"))<line_sep>self.auto_queue_persist.add_pattern(AutoQueuePattern("value\"with\"doublequote"))<line_sep>resp=self.test_app.get("/server/autoqueue/remove/one")<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(4 len(self.auto_queue_persist.patterns))<line_sep>self.assertNotIn(AutoQueuePattern("one") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("/value/with/slashes" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/remove/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(3 len(self.auto_queue_persist.patterns))<line_sep>self.assertNotIn(AutoQueuePattern("/value/with/slashes") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote(" value with spaces" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/remove/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(2 len(self.auto_queue_persist.patterns))<line_sep>self.assertNotIn(AutoQueuePattern(" value with spaces") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("value'with'singlequote" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/remove/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(1 len(self.auto_queue_persist.patterns))<line_sep>self.assertNotIn(AutoQueuePattern("value'with'singlequote") self.auto_queue_persist.patterns)<line_sep>uri=quote(quote("value\"with\"doublequote" safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/remove/"+uri)<line_sep>self.assertEqual(200 resp.status_int)<line_sep>self.assertEqual(0 len(self.auto_queue_persist.patterns))<line_sep>self.assertNotIn(AutoQueuePattern("value\"with\"doublequote") self.auto_queue_persist.patterns)<block_end><def_stmt>test_remove_non_existing self<block_start>resp=self.test_app.get("/server/autoqueue/remove/one" expect_errors=<true>)<line_sep>self.assertEqual(400 resp.status_int)<line_sep>self.assertEqual("Auto-queue pattern 'one' doesn't exist." str(resp.html))<block_end><def_stmt>test_remove_empty_value self<block_start>uri=quote(quote(" " safe="") safe="")<line_sep>resp=self.test_app.get("/server/autoqueue/remove/"+uri expect_errors=<true>)<line_sep>self.assertEqual(400 resp.status_int)<line_sep>self.assertEqual("Auto-queue pattern ' ' doesn't exist." str(resp.html))<line_sep>self.assertEqual(0 len(self.auto_queue_persist.patterns))<line_sep>resp=self.test_app.get("/server/autoqueue/remove/" expect_errors=<true>)<line_sep>self.assertEqual(404 resp.status_int)<line_sep>self.assertEqual(0 len(self.auto_queue_persist.patterns))<block_end><block_end> |
<import_from_stmt>pysmt.shortcuts Symbol<import_from_stmt>pysmt.typing INT<line_sep>h=Symbol("H" INT)<line_sep>domain=(1<le>h)&(10<ge>h)<line_sep> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_from_stmt>knack.util CLIError<import_from_stmt>azure.cli.core.azclierror InvalidArgumentValueError ArgumentUsageError<import_from_stmt>azure.cli.core.util is_guid<import_from_stmt>azure.graphrbac.models GraphErrorException<import_from_stmt>msrestazure.azure_exceptions CloudError<import_from_stmt>.._client_factory cf_synapse_role_assignments cf_synapse_role_definitions cf_graph_client_factory<import_from_stmt>..constant ITEM_NAME_MAPPING<line_sep># List Synapse Role Assignment
<def_stmt>list_role_assignments cmd workspace_name role=<none> assignee=<none> assignee_object_id=<none> scope=<none> item=<none> item_type=<none><block_start><if_stmt>bool(assignee)<and>bool(assignee_object_id)<block_start><raise>ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')<block_end><if_stmt>bool(item)<ne>bool(item_type)<block_start><raise>ArgumentUsageError('usage error: --item-type STRING --item STRING')<block_end><return>_list_role_assignments(cmd workspace_name role assignee<or>assignee_object_id scope resolve_assignee=(<not>assignee_object_id) item=item item_type=item_type)<block_end><def_stmt>_list_role_assignments cmd workspace_name role=<none> assignee=<none> scope=<none> resolve_assignee=<true> item=<none> item_type=<none><block_start>"""Prepare scope, role ID and resolve object ID from Graph API."""<if_stmt>any([scope item item_type])<block_start>scope=_build_role_scope(workspace_name scope item item_type)<block_end>role_id=_resolve_role_id(cmd role workspace_name)<line_sep>object_id=_resolve_object_id(cmd assignee fallback_to_object_id=<true>)<if>resolve_assignee<else>assignee<line_sep>client=cf_synapse_role_assignments(cmd.cli_ctx workspace_name)<line_sep>role_assignments=client.list_role_assignments(role_id object_id scope).value<line_sep><return>role_assignments<block_end># Show Synapse Role Assignment By Id
<def_stmt>get_role_assignment_by_id cmd workspace_name role_assignment_id<block_start>client=cf_synapse_role_assignments(cmd.cli_ctx workspace_name)<line_sep><return>client.get_role_assignment_by_id(role_assignment_id)<block_end># Delete Synapse Role Assignment
<def_stmt>delete_role_assignment cmd workspace_name ids=<none> assignee=<none> assignee_object_id=<none> role=<none> scope=<none> item=<none> item_type=<none><block_start>client=cf_synapse_role_assignments(cmd.cli_ctx workspace_name)<if_stmt><not>any([ids assignee assignee_object_id role scope item item_type])<block_start><raise>ArgumentUsageError('usage error: No argument are provided. --assignee STRING | --ids GUID')<block_end><if_stmt>ids<block_start><if_stmt>any([assignee assignee_object_id role scope item item_type])<block_start><raise>ArgumentUsageError('You should not provide --role or --assignee or --assignee_object_id '<concat>'or --scope or --principal-type when --ids is provided.')<block_end>role_assignments=list_role_assignments(cmd workspace_name <none> <none> <none> <none> <none> <none>)<line_sep>assignment_id_list=[x.id<for>x role_assignments]<line_sep># check role assignment id
<for_stmt>assignment_id ids<block_start><if_stmt>assignment_id<not><in>assignment_id_list<block_start><raise>ArgumentUsageError("role assignment id:'{}' doesn't exist.".format(assignment_id))<block_end><block_end># delete when all ids check pass
<for_stmt>assignment_id ids<block_start>client.delete_role_assignment_by_id(assignment_id)<block_end><return><block_end>role_assignments=list_role_assignments(cmd workspace_name role assignee assignee_object_id scope item item_type)<if_stmt>any([scope item item_type])<block_start>scope=_build_role_scope(workspace_name scope item item_type)<line_sep>role_assignments=[x<for>x role_assignments<if>x.scope<eq>scope]<block_end><if_stmt>role_assignments<block_start><for_stmt>assignment role_assignments<block_start>client.delete_role_assignment_by_id(assignment.id)<block_end><block_end><else_stmt><block_start><raise>CLIError('No matched assignments were found to delete, please provide correct --role or --assignee.'<concat>'Use `az synapse role assignment list` to get role assignments.')<block_end><block_end><def_stmt>create_role_assignment cmd workspace_name role assignee=<none> assignee_object_id=<none> scope=<none> assignee_principal_type=<none> item_type=<none> item=<none> assignment_id=<none><block_start>"""Check parameters are provided correctly, then call _create_role_assignment."""<if_stmt>assignment_id<and><not>is_guid(assignment_id)<block_start><raise>InvalidArgumentValueError('usage error: --id GUID')<block_end><if_stmt>bool(assignee)<eq>bool(assignee_object_id)<block_start><raise>ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')<block_end><if_stmt>assignee_principal_type<and><not>assignee_object_id<block_start><raise>ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]')<block_end><if_stmt>bool(item)<ne>bool(item_type)<block_start><raise>ArgumentUsageError('usage error: --item-type STRING --item STRING')<block_end><try_stmt><block_start><return>_create_role_assignment(cmd workspace_name role assignee<or>assignee_object_id scope item item_type resolve_assignee=(<not>assignee_object_id) assignee_principal_type=assignee_principal_type assignment_id=assignment_id)<block_end><except_stmt>Exception<as>ex# pylint: disable=broad-except
<block_start><if_stmt>_error_caused_by_role_assignment_exists(ex)# for idempotent
<block_start><return>list_role_assignments(cmd workspace_name role=role assignee=assignee assignee_object_id=assignee_object_id scope=scope item=item item_type=item_type)<block_end><raise><block_end><block_end><def_stmt>_resolve_object_id cmd assignee fallback_to_object_id=<false><block_start><if_stmt>assignee<is><none><block_start><return><none><block_end>client=cf_graph_client_factory(cmd.cli_ctx)<line_sep>result=<none><try_stmt><block_start>result=list(client.users.list(filter="userPrincipalName eq '{0}' or mail eq '{0}' or displayName eq '{0}'".format(assignee)))<if_stmt><not>result<block_start>result=list(client.service_principals.list(filter="displayName eq '{}'".format(assignee)))<block_end><if_stmt><not>result<block_start>result=list(client.groups.list(filter="mail eq '{}'".format(assignee)))<block_end><if_stmt><not>result<and>is_guid(assignee)# assume an object id, let us verify it
<block_start>result=_get_object_stubs(client [assignee])<block_end># 2+ matches should never happen, so we only check 'no match' here
<if_stmt><not>result<block_start><raise>CLIError("Cannot find user or group or service principal in graph database for '{assignee}'. "<concat>"If the assignee is a principal id, make sure the corresponding principal is created "<concat>"with 'az ad sp create --id {assignee}'.".format(assignee=assignee))<block_end><if_stmt>len(result)<g>1<block_start><raise>CLIError("Find more than one user or group or service principal in graph database for '{assignee}'. "<concat>"Please using --assignee-object-id GUID to specify assignee accurately".format(assignee=assignee))<block_end><return>result[0].object_id<block_end><except_stmt>(CloudError GraphErrorException)<block_start><if_stmt>fallback_to_object_id<and>is_guid(assignee)<block_start><return>assignee<block_end><raise><block_end><block_end><def_stmt>_get_object_stubs graph_client assignees<block_start><import_from_stmt>azure.graphrbac.models GetObjectsParameters<line_sep>result=[]<line_sep>assignees=list(assignees)# callers could pass in a set
<for_stmt>i range(0 len(assignees) 1000)<block_start>params=GetObjectsParameters(include_directory_object_references=<true> object_ids=assignees[i:i+1000])<line_sep>result<augadd>list(graph_client.objects.get_objects_by_object_ids(params))<block_end><return>result<block_end><def_stmt>_error_caused_by_role_assignment_exists ex<block_start><return>getattr(ex 'status_code' <none>)<eq>409<and>'role assignment already exists'<in>ex.message<block_end><def_stmt>_create_role_assignment cmd workspace_name role assignee scope=<none> item=<none> item_type=<none> resolve_assignee=<true> assignee_principal_type=<none> assignment_id=<none><block_start>"""Prepare scope, role ID and resolve object ID from Graph API."""<line_sep>scope=_build_role_scope(workspace_name scope item item_type)<line_sep>role_id=_resolve_role_id(cmd role workspace_name)<line_sep>object_id=_resolve_object_id(cmd assignee fallback_to_object_id=<true>)<if>resolve_assignee<else>assignee<line_sep>assignment_client=cf_synapse_role_assignments(cmd.cli_ctx workspace_name)<line_sep><return>assignment_client.create_role_assignment(assignment_id<if>assignment_id<is><not><none><else>_gen_guid() role_id object_id scope assignee_principal_type)<block_end><def_stmt>_build_role_scope workspace_name scope item item_type<block_start><if_stmt>scope<block_start><return>scope<block_end><if_stmt>item<and>item_type# workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}
<block_start>scope="workspaces/"+workspace_name+"/"+item_type+"/"+item<block_end><else_stmt><block_start>scope="workspaces/"+workspace_name<block_end><return>scope<block_end><def_stmt>_resolve_role_id cmd role workspace_name<block_start>role_id=<none><if_stmt><not>role<block_start><return>role_id<block_end><if_stmt>is_guid(role)<block_start>role_id=role<block_end><else_stmt><block_start>role_definition_client=cf_synapse_role_definitions(cmd.cli_ctx workspace_name)<line_sep>role_definition=role_definition_client.list_role_definitions()<line_sep>role_dict={x.name.lower():x.id<for>x role_definition<if>x.name}<if_stmt>role.lower()<not><in>role_dict<block_start><raise>CLIError("Role '{}' doesn't exist.".format(role))<block_end>role_id=role_dict[role.lower()]<block_end><return>role_id<block_end><def_stmt>_gen_guid <block_start><import_stmt>uuid<line_sep><return>uuid.uuid4()<block_end># List Synapse Role Definitions Scope
<def_stmt>list_scopes cmd workspace_name<block_start>client=cf_synapse_role_definitions(cmd.cli_ctx workspace_name)<line_sep><return>client.list_scopes()<block_end># List Synapse Role Definitions
<def_stmt>list_role_definitions cmd workspace_name is_built_in=<none><block_start>client=cf_synapse_role_definitions(cmd.cli_ctx workspace_name)<line_sep>role_definitions=client.list_role_definitions(is_built_in)<line_sep><return>role_definitions<block_end><def_stmt>_build_role_scope_format scope item_type<block_start><if_stmt>scope<block_start><return>scope<block_end><if_stmt>item_type<block_start>scope="workspaces/{workspaceName}/"+item_type+"/"+ITEM_NAME_MAPPING[item_type]<block_end><else_stmt><block_start>scope="workspaces/{workspaceName}"<block_end><return>scope<block_end># Get Synapse Role Definition
<def_stmt>get_role_definition cmd workspace_name role<block_start>role_id=_resolve_role_id(cmd role workspace_name)<line_sep>client=cf_synapse_role_definitions(cmd.cli_ctx workspace_name)<line_sep><return>client.get_role_definition_by_id(role_id)<block_end> |
"""
# AWS S3 Tests
"""<import_from_stmt>os.path join<as>pathJoin<import_stmt>pytest<import_from_stmt>ml_logger logger<line_sep>@pytest.fixture(scope='session')<def_stmt>log_dir request<block_start><return>request.config.getoption('--logdir')<block_end>@pytest.fixture(scope="session")<def_stmt>setup log_dir<block_start>logger.configure('main_test_script' root=log_dir)<line_sep>print(f"logging to {pathJoin(logger.root logger.prefix)}")<block_end><def_stmt>test_s3_upload setup<block_start><import_stmt>os pathlib<line_sep>profile=os.environ.get('ML_LOGGER_TEST_AWS_PROFILE' <none>)<if_stmt>profile<block_start>os.environ['AWS_PROFILE']=profile<block_end>s3_bucket=os.environ['ML_LOGGER_TEST_S3_BUCKET']<line_sep>target="s3://"+s3_bucket+"/test_dir.tar"<line_sep>logger.upload_dir(pathlib.Path(__file__).absolute().parent target)<block_end><def_stmt>test_s3_download setup<block_start><import_stmt>os glob<line_sep>profile=os.environ.get('ML_LOGGER_TEST_AWS_PROFILE' <none>)<if_stmt>profile<block_start>os.environ['AWS_PROFILE']=profile<block_end>s3_bucket=os.environ['ML_LOGGER_TEST_S3_BUCKET']<line_sep>source="s3://"+s3_bucket+"/test_dir.tar"<line_sep>local_prefix='/tmp/test_dir_download'<line_sep>logger.download_dir(source to=local_prefix)<assert_stmt>local_prefix+'/test_s3.py'<in>glob.glob(local_prefix+"/*")<line_sep>logger.remove("test_dir_download")<block_end><def_stmt>test_s3_glob setup<block_start><import_stmt>os<line_sep>profile=os.environ.get('ML_LOGGER_TEST_AWS_PROFILE' <none>)<if_stmt>profile<block_start>os.environ['AWS_PROFILE']=profile<block_end>s3_bucket=os.environ['ML_LOGGER_TEST_S3_BUCKET']<line_sep>target="s3://"+s3_bucket+"/test_dir.tar"<line_sep>logger.upload_dir('.' target)<line_sep>files=logger.glob_s3(s3_bucket)<assert_stmt>'test_dir.tar'<in>files<line_sep>files=logger.glob_s3(wd=s3_bucket)<assert_stmt>'test_dir.tar'<in>files<line_sep>files=logger.glob_s3(s3_bucket+"/test_dir.tar")<assert_stmt>'test_dir.tar'<in>files<line_sep>files=logger.glob_s3(s3_bucket+"/this_does_not_exist")<assert_stmt><not>files<block_end><def_stmt>test_s3_glob_prefix setup<block_start><import_stmt>os<line_sep>profile=os.environ.get('ML_LOGGER_TEST_AWS_PROFILE' <none>)<if_stmt>profile<block_start>os.environ['AWS_PROFILE']=profile<block_end>s3_bucket=os.environ['ML_LOGGER_TEST_S3_BUCKET']<line_sep>target="s3://"+s3_bucket+"/prefix/prefix-2/test_dir.tar"<line_sep>logger.upload_dir("." target)<line_sep>files=logger.glob_s3(wd=s3_bucket+"/prefix/prefix-2")<assert_stmt>'test_dir.tar'<in>files<block_end><def_stmt>test_s3_remove setup<block_start><import_stmt>os<line_sep>example_data={'a':1 'b':2}<line_sep>s3_bucket=os.environ.get('ML_LOGGER_TEST_S3_BUCKET' <none>)<line_sep>target="s3://"+s3_bucket+"/prefix/prefix-2/example_data.pt"<line_sep>logger.save_torch(example_data target)<line_sep>file,=logger.glob_s3(target[5:])<line_sep>logger.remove_s3(s3_bucket file)<assert_stmt><not>logger.glob_s3(target[5:])<block_end><def_stmt>test_s3_upload_download_torch setup<block_start><import_stmt>os<line_sep>example_data={'a':1 'b':2}<line_sep>s3_bucket=os.environ.get('ML_LOGGER_TEST_S3_BUCKET' <none>)<line_sep>file="prefix/prefix-2/example_data.pt"<line_sep>target="s3://"+s3_bucket+"/"+file<line_sep>logger.remove_s3(s3_bucket file)<line_sep>logger.save_torch(example_data target)<line_sep>downloaded_data=logger.load_torch(target)<assert_stmt>downloaded_data['a']<eq>1<assert_stmt>downloaded_data['b']<eq>2<block_end> |
"""
Display a 4D labels layer and paint only in 3D.
This is useful e.g. when proofreading segmentations within a time series.
"""<import_stmt>numpy<as>np<import_from_stmt>skimage data<import_stmt>napari<line_sep>blobs=np.stack([data.binary_blobs(length=128 blob_size_fraction=0.05 n_dim=3 volume_fraction=f)<for>f np.linspace(0.05 0.5 10)] axis=0 )<line_sep>viewer=napari.view_image(blobs.astype(float) rendering='attenuated_mip')<line_sep>labels=viewer.add_labels(np.zeros_like(blobs dtype=np.int32))<line_sep>labels.n_edit_dimensions=3<line_sep>labels.brush_size=15<line_sep>labels.mode='paint'<line_sep>labels.n_dimensional=<true><line_sep>napari.run()<line_sep> |
<import_stmt>torchvision.transforms<as>T<import_from_stmt>torchvision.datasets ImageFolder<class_stmt>WHURS19(ImageFolder)<block_start>""" WHU-RS19 dataset from'Structural High-resolution Satellite Image Indexing', Xia at al. (2010)
https://hal.archives-ouvertes.fr/file/index/docid/458685/filename/structural_satellite_indexing_XYDG.pdf
"""<def_stmt>__init__ self root:str=".data/WHU-RS19" transform:T.Compose=T.Compose([T.ToTensor()])<block_start>super().__init__(root=root transform=transform)<block_end><block_end> |
# Copyright 2020 Samsung Electronics Co., Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
<import_from_stmt>tempest.lib.services.identity.v3 protocols_client<import_from_stmt>tempest.tests.lib fake_auth_provider<import_from_stmt>tempest.tests.lib.services base<class_stmt>TestProtocolsClient(base.BaseServiceTest)<block_start>FAKE_PROTOCOLS_INFO={"links":{"next":<none> "previous":<none> "self":"http://example.com/identity/v3/OS-FEDERATION/"+"identity_providers/FAKE_ID/protocols"} "protocols":[{"id":"fake_id1" "links":{"identity_provider":"http://example.com/identity/v3/"+"OS-FEDERATION/identity_providers/"+"FAKE_ID" "self":"http://example.com/identity/v3/OS-FEDERATION/"<concat>"identity_providers/FAKE_ID/protocols/fake_id1"} "mapping_id":"fake123"}]}<line_sep>FAKE_PROTOCOL_INFO={"protocol":{"id":"fake_id1" "links":{"identity_provider":"http://example.com/identity/v3/OS-"+"FEDERATION/identity_providers/FAKE_ID" "self":"http://example.com/identity/v3/OS-FEDERATION/"+"identity_providers/FAKE_ID/protocols/fake_id1"} "mapping_id":"fake123"}}<def_stmt>setUp self<block_start>super(TestProtocolsClient self).setUp()<line_sep>fake_auth=fake_auth_provider.FakeAuthProvider()<line_sep>self.client=protocols_client.ProtocolsClient(fake_auth 'identity' 'regionOne')<block_end><def_stmt>_test_add_protocol_to_identity_provider self bytes_body=<false><block_start>self.check_service_client_function(self.client.add_protocol_to_identity_provider 'tempest.lib.common.rest_client.RestClient.put' self.FAKE_PROTOCOL_INFO bytes_body idp_id="FAKE_ID" protocol_id="fake_id1" status=201)<block_end><def_stmt>_test_list_protocols_of_identity_provider self bytes_body=<false><block_start>self.check_service_client_function(self.client.list_protocols_of_identity_provider 'tempest.lib.common.rest_client.RestClient.get' self.FAKE_PROTOCOLS_INFO bytes_body idp_id="FAKE_ID" status=200)<block_end><def_stmt>_test_get_protocol_for_identity_provider self bytes_body=<false><block_start>self.check_service_client_function(self.client.get_protocol_for_identity_provider 'tempest.lib.common.rest_client.RestClient.get' self.FAKE_PROTOCOL_INFO bytes_body idp_id="FAKE_ID" protocol_id="fake_id1" status=200)<block_end><def_stmt>_test_update_mapping_for_identity_provider self bytes_body=<false><block_start>self.check_service_client_function(self.client.update_mapping_for_identity_provider 'tempest.lib.common.rest_client.RestClient.patch' self.FAKE_PROTOCOL_INFO bytes_body idp_id="FAKE_ID" protocol_id="fake_id1" status=200)<block_end><def_stmt>_test_delete_protocol_from_identity_provider self bytes_body=<false><block_start>self.check_service_client_function(self.client.delete_protocol_from_identity_provider 'tempest.lib.common.rest_client.RestClient.delete' {} bytes_body idp_id="FAKE_ID" protocol_id="fake_id1" status=204)<block_end><def_stmt>test_add_protocol_to_identity_provider_with_str_body self<block_start>self._test_add_protocol_to_identity_provider()<block_end><def_stmt>test_add_protocol_to_identity_provider_with_bytes_body self<block_start>self._test_add_protocol_to_identity_provider(bytes_body=<true>)<block_end><def_stmt>test_list_protocols_of_identity_provider_with_str_body self<block_start>self._test_list_protocols_of_identity_provider()<block_end><def_stmt>test_list_protocols_of_identity_provider_with_bytes_body self<block_start>self._test_list_protocols_of_identity_provider(bytes_body=<true>)<block_end><def_stmt>test_get_protocol_for_identity_provider_with_str_body self<block_start>self._test_get_protocol_for_identity_provider()<block_end><def_stmt>test_get_protocol_for_identity_provider_with_bytes_body self<block_start>self._test_get_protocol_for_identity_provider(bytes_body=<true>)<block_end><def_stmt>test_update_mapping_for_identity_provider_with_str_body self<block_start>self._test_update_mapping_for_identity_provider()<block_end><def_stmt>test_update_mapping_for_identity_provider_with_bytes_body self<block_start>self._test_update_mapping_for_identity_provider(bytes_body=<true>)<block_end><def_stmt>test_delete_protocol_from_identity_provider_with_str_body self<block_start>self._test_delete_protocol_from_identity_provider()<block_end><def_stmt>test_delete_protocol_from_identity_provider_with_bytes_body self<block_start>self._test_delete_protocol_from_identity_provider(bytes_body=<false>)<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.