content
stringlengths
0
1.55M
"""Utilities for interacting with databases"""<def_stmt>generate_connect_string host:str port:int db:str user:str password:str <arrow>str<block_start>conn_string=f"postgresql://{user}:{password}@"<if_stmt><not>host.startswith('/')<block_start>conn_string<augadd>f"{host}:{port}"<block_end>conn_string<augadd>f"/{db}"<if_stmt>host.startswith('/')<block_start>conn_string<augadd>f"?host={host}"<block_end><return>conn_string<block_end>
EVENT_ALGO_LOG="eAlgoLog"<line_sep>EVENT_ALGO_SETTING="eAlgoSetting"<line_sep>EVENT_ALGO_VARIABLES="eAlgoVariables"<line_sep>EVENT_ALGO_PARAMETERS="eAlgoParameters"<line_sep>APP_NAME="AlgoTrading"<line_sep>
#! /usr/bin/python # -*- coding: utf8 -*- <import_stmt>os time random<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>tensorflow<as>tf<import_stmt>tensorlayer<as>tl<import_from_stmt>model *<import_from_stmt>utils *<import_from_stmt>config *<line_sep>###====================== HYPER-PARAMETERS ===========================### batch_size=config.train.batch_size<line_sep>patch_size=config.train.in_patch_size<line_sep>ni=int(np.sqrt(config.train.batch_size))<def_stmt>compute_charbonnier_loss tensor1 tensor2 is_mean=<true><block_start>epsilon=1e-6<if_stmt>is_mean<block_start>loss=tf.reduce_mean(tf.reduce_mean(tf.sqrt(tf.square(tf.subtract(tensor1 tensor2))+epsilon) [1 2 3]))<block_end><else_stmt><block_start>loss=tf.reduce_mean(tf.reduce_sum(tf.sqrt(tf.square(tf.subtract(tensor1 tensor2))+epsilon) [1 2 3]))<block_end><return>loss<block_end><def_stmt>load_file_list <block_start>train_hr_file_list=[]<line_sep>train_lr_file_list=[]<line_sep>valid_hr_file_list=[]<line_sep>valid_lr_file_list=[]<line_sep>directory=config.train.hr_folder_path<for_stmt>filename [y<for>y os.listdir(directory)<if>os.path.isfile(os.path.join(directory y))]<block_start>train_hr_file_list.append("%s%s"%(directory filename))<block_end>directory=config.train.lr_folder_path<for_stmt>filename [y<for>y os.listdir(directory)<if>os.path.isfile(os.path.join(directory y))]<block_start>train_lr_file_list.append("%s%s"%(directory filename))<block_end>directory=config.valid.hr_folder_path<for_stmt>filename [y<for>y os.listdir(directory)<if>os.path.isfile(os.path.join(directory y))]<block_start>valid_hr_file_list.append("%s%s"%(directory filename))<block_end>directory=config.valid.lr_folder_path<for_stmt>filename [y<for>y os.listdir(directory)<if>os.path.isfile(os.path.join(directory y))]<block_start>valid_lr_file_list.append("%s%s"%(directory filename))<block_end><return>sorted(train_hr_file_list) sorted(train_lr_file_list) sorted(valid_hr_file_list) sorted(valid_lr_file_list)<block_end><def_stmt>prepare_nn_data hr_img_list lr_img_list idx_img=<none><block_start>i=np.random.randint(len(hr_img_list))<if>(idx_img<is><none>)<else>idx_img<line_sep>input_image=get_imgs_fn(lr_img_list[i])<line_sep>output_image=get_imgs_fn(hr_img_list[i])<line_sep>scale=int(output_image.shape[0]/input_image.shape[0])<assert_stmt>scale<eq>config.model.scale<line_sep>out_patch_size=patch_size<times>scale<line_sep>input_batch=np.empty([batch_size patch_size patch_size 3])<line_sep>output_batch=np.empty([batch_size out_patch_size out_patch_size 3])<for_stmt>idx range(batch_size)<block_start>in_row_ind=random.randint(0 input_image.shape[0]-patch_size)<line_sep>in_col_ind=random.randint(0 input_image.shape[1]-patch_size)<line_sep>input_cropped=augment_imgs_fn(input_image[in_row_ind:in_row_ind+patch_size in_col_ind:in_col_ind+patch_size])<line_sep>input_cropped=normalize_imgs_fn(input_cropped)<line_sep>input_cropped=np.expand_dims(input_cropped axis=0)<line_sep>input_batch[idx]=input_cropped<line_sep>out_row_ind=in_row_ind<times>scale<line_sep>out_col_ind=in_col_ind<times>scale<line_sep>output_cropped=output_image[out_row_ind:out_row_ind+out_patch_size out_col_ind:out_col_ind+out_patch_size]<line_sep>output_cropped=normalize_imgs_fn(output_cropped)<line_sep>output_cropped=np.expand_dims(output_cropped axis=0)<line_sep>output_batch[idx]=output_cropped<block_end><return>input_batch output_batch<block_end><def_stmt>train <block_start>save_dir="%s/%s_train"%(config.model.result_path tl.global_flag['mode'])<line_sep>checkpoint_dir="%s"%(config.model.checkpoint_path)<line_sep>tl.files.exists_or_mkdir(save_dir)<line_sep>tl.files.exists_or_mkdir(checkpoint_dir)<line_sep>###========================== DEFINE MODEL ============================### t_image=tf.placeholder('float32' [batch_size patch_size patch_size 3] name='t_image_input')<line_sep>t_target_image=tf.placeholder('float32' [batch_size patch_size<times>config.model.scale patch_size<times>config.model.scale 3] name='t_target_image')<line_sep>t_target_image_down=tf.image.resize_images(t_target_image size=[patch_size<times>2 patch_size<times>2] method=0 align_corners=<false>)<line_sep>net_image2,net_grad2,net_image1,net_grad1=LapSRN(t_image is_train=<true> reuse=<false>)<line_sep>net_image2.print_params(<false>)<line_sep>## test inference net_image_test,net_grad_test,_,_=LapSRN(t_image is_train=<false> reuse=<true>)<line_sep>###========================== DEFINE TRAIN OPS ==========================### loss2=compute_charbonnier_loss(net_image2.outputs t_target_image is_mean=<true>)<line_sep>loss1=compute_charbonnier_loss(net_image1.outputs t_target_image_down is_mean=<true>)<line_sep>g_loss=loss1+loss2<times>4<line_sep>g_vars=tl.layers.get_variables_with_name('LapSRN' <true> <true>)<with_stmt>tf.variable_scope('learning_rate')<block_start>lr_v=tf.Variable(config.train.lr_init trainable=<false>)<block_end>g_optim=tf.train.AdamOptimizer(lr_v beta1=config.train.beta1).minimize(g_loss var_list=g_vars)<line_sep>###========================== RESTORE MODEL =============================### sess=tf.Session(config=tf.ConfigProto(allow_soft_placement=<true> log_device_placement=<false>))<line_sep>tl.layers.initialize_global_variables(sess)<line_sep>tl.files.load_and_assign_npz(sess=sess name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']) network=net_image2)<line_sep>###========================== PRE-LOAD DATA ===========================### train_hr_list,train_lr_list,valid_hr_list,valid_lr_list=load_file_list()<line_sep>###========================== INTERMEDIATE RESULT ===============================### sample_ind=37<line_sep>sample_input_imgs,sample_output_imgs=prepare_nn_data(valid_hr_list valid_lr_list sample_ind)<line_sep>tl.vis.save_images(truncate_imgs_fn(sample_input_imgs) [ni ni] save_dir+'/train_sample_input.png')<line_sep>tl.vis.save_images(truncate_imgs_fn(sample_output_imgs) [ni ni] save_dir+'/train_sample_output.png')<line_sep>###========================== TRAINING ====================### sess.run(tf.assign(lr_v config.train.lr_init))<line_sep>print(" ** learning rate: %f"%config.train.lr_init)<for_stmt>epoch range(config.train.n_epoch)## update learning rate <block_start><if_stmt>epoch<ne>0<and>(epoch%config.train.decay_iter<eq>0)<block_start>lr_decay=config.train.lr_decay<power>(epoch<floordiv>config.train.decay_iter)<line_sep>lr=config.train.lr_init<times>lr_decay<line_sep>sess.run(tf.assign(lr_v lr))<line_sep>print(" ** learning rate: %f"%(lr))<block_end>epoch_time=time.time()<line_sep>total_g_loss,n_iter=0 0<line_sep>## load image data idx_list=np.random.permutation(len(train_hr_list))<for_stmt>idx_file range(len(idx_list))<block_start>step_time=time.time()<line_sep>batch_input_imgs,batch_output_imgs=prepare_nn_data(train_hr_list train_lr_list idx_file)<line_sep>errM,_=sess.run([g_loss g_optim] {t_image:batch_input_imgs t_target_image:batch_output_imgs})<line_sep>total_g_loss<augadd>errM<line_sep>n_iter<augadd>1<block_end>print("[*] Epoch: [%2d/%2d] time: %4.4fs, loss: %.8f"%(epoch config.train.n_epoch time.time()-epoch_time total_g_loss/n_iter))<line_sep>## save model and evaluation on sample set <if_stmt>(epoch<ge>0)<block_start>tl.files.save_npz(net_image2.all_params name=checkpoint_dir+'/params_{}.npz'.format(tl.global_flag['mode']) sess=sess)<if_stmt>config.train.dump_intermediate_result<is><true><block_start>sample_out,sample_grad_out=sess.run([net_image_test.outputs net_grad_test.outputs] {t_image:sample_input_imgs})#; print('gen sub-image:', out.shape, out.min(), out.max()) tl.vis.save_images(truncate_imgs_fn(sample_out) [ni ni] save_dir+'/train_predict_%d.png'%epoch)<line_sep>tl.vis.save_images(truncate_imgs_fn(np.abs(sample_grad_out)) [ni ni] save_dir+'/train_grad_predict_%d.png'%epoch)<block_end><block_end><block_end><block_end><def_stmt>test file<block_start><try_stmt><block_start>img=get_imgs_fn(file)<block_end><except_stmt>IOError<block_start>print('cannot open %s'%(file))<block_end><else_stmt><block_start>checkpoint_dir=config.model.checkpoint_path<line_sep>save_dir="%s/%s"%(config.model.result_path tl.global_flag['mode'])<line_sep>input_image=normalize_imgs_fn(img)<line_sep>size=input_image.shape<line_sep>print('Input size: %s,%s,%s'%(size[0] size[1] size[2]))<line_sep>t_image=tf.placeholder('float32' [<none> size[0] size[1] size[2]] name='input_image')<line_sep>net_g,_,_,_=LapSRN(t_image is_train=<false> reuse=<false>)<line_sep>###========================== RESTORE G =============================### sess=tf.Session(config=tf.ConfigProto(allow_soft_placement=<true> log_device_placement=<false>))<line_sep>tl.layers.initialize_global_variables(sess)<line_sep>tl.files.load_and_assign_npz(sess=sess name=checkpoint_dir+'/params_train.npz' network=net_g)<line_sep>###======================= TEST =============================### start_time=time.time()<line_sep>out=sess.run(net_g.outputs {t_image:[input_image]})<line_sep>print("took: %4.4fs"%(time.time()-start_time))<line_sep>tl.files.exists_or_mkdir(save_dir)<line_sep>tl.vis.save_image(truncate_imgs_fn(out[0 : : :]) save_dir+'/test_out.png')<line_sep>tl.vis.save_image(input_image save_dir+'/test_input.png')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('-m' '--mode' choices=['train' 'test'] default='train' help='select mode')<line_sep>parser.add_argument('-f' '--file' help='input file')<line_sep>args=parser.parse_args()<line_sep>tl.global_flag['mode']=args.mode<if_stmt>tl.global_flag['mode']<eq>'train'<block_start>train()<block_end><elif_stmt>tl.global_flag['mode']<eq>'test'<block_start><if_stmt>(args.file<is><none>)<block_start><raise>Exception("Please enter input file name for test mode")<block_end>test(args.file)<block_end><else_stmt><block_start><raise>Exception("Unknow --mode")<block_end><block_end>
<import_stmt>subprocess<import_stmt>time<import_stmt>unittest<import_stmt>boto3<import_from_stmt>time sleep<import_stmt>json<import_stmt>os<import_stmt>sys<import_stmt>datetime<import_stmt>cfn_flip<line_sep># Modify the name of the bucket prefix for testing BUCKET_PREFIX="appdevstore"<line_sep>AWS_REGION=os.environ.get("AWS_DEFAULT_REGION" "us-east-1")<class_stmt>TestLambda(unittest.TestCase)<block_start>''' fail case newlgrp success case testlggrp already exists subscription filter idempotent '''<def_stmt>setUp self# Set Up AWS Clients <block_start>self.log_group_client=boto3.client('logs' AWS_REGION)<line_sep>self.cf=boto3.client('cloudformation' AWS_REGION)<line_sep>self.lambda_cl=boto3.client('lambda' AWS_REGION)<line_sep># AWS Resource Names self.log_group_name='testloggroup-%s'%(datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))<line_sep>self.stack_name="TestLogGrpConnectorStack-%s"%(datetime.datetime.now().strftime("%d-%m-%y-%H-%M-%S"))<line_sep>self.bucket_name=get_bucket_name()<line_sep>self.outputs={}<line_sep># Read template self.template_data=read_file("test/test-template.yaml")<block_end><def_stmt>tearDown self<block_start><if_stmt>self.stack_exists(self.stack_name)<block_start>self.delete_stack(self.stack_name)<block_end>self.delete_log_group()<block_end><def_stmt>test_1_lambda self<block_start>self.create_stack(self.stack_name self.template_data self.create_stack_parameters("Lambda" "false"))<line_sep>print("Testing Stack Creation")<line_sep>self.assertTrue(self.stack_exists(self.stack_name))<line_sep>self.create_log_group()<line_sep>self.assert_subscription_filter("SumoLGLBDFilter")<block_end><def_stmt>test_2_existing_logs self<block_start>self.create_log_group()<line_sep>self.create_stack(self.stack_name self.template_data self.create_stack_parameters("Lambda" "true"))<line_sep>print("Testing Stack Creation")<line_sep>self.assertTrue(self.stack_exists(self.stack_name))<line_sep>#self.invoke_lambda() self.assert_subscription_filter("SumoLGLBDFilter")<block_end><def_stmt>test_3_kinesis self<block_start>self.create_stack(self.stack_name self.template_data self.create_stack_parameters("Kinesis" "false"))<line_sep>print("Testing Stack Creation")<line_sep>self.assertTrue(self.stack_exists(self.stack_name))<line_sep>self.create_log_group()<line_sep>self.assert_subscription_filter("SumoLGLBDFilter")<block_end><def_stmt>test_4_existing_kinesis self<block_start>self.create_log_group()<line_sep>self.create_stack(self.stack_name self.template_data self.create_stack_parameters("Kinesis" "true"))<line_sep>print("Testing Stack Creation")<line_sep>self.assertTrue(self.stack_exists(self.stack_name))<line_sep>#self.invoke_lambda() self.assert_subscription_filter("SumoLGLBDFilter")<block_end><def_stmt>create_stack_parameters self destination existing pattern='test'<block_start><return>[{'ParameterKey':'DestinationType' 'ParameterValue':destination} {'ParameterKey':'LogGroupPattern' 'ParameterValue':pattern} {'ParameterKey':'UseExistingLogs' 'ParameterValue':existing} {'ParameterKey':'BucketName' 'ParameterValue':self.bucket_name}]<block_end><def_stmt>stack_exists self stack_name<block_start>stacks=self.cf.list_stacks()['StackSummaries']<for_stmt>stack stacks<block_start><if_stmt>stack['StackStatus']<eq>'DELETE_COMPLETE'<block_start><continue><block_end><if_stmt>stack_name<eq>stack['StackName']<and>stack['StackStatus']<eq>'CREATE_COMPLETE'<block_start>print("%s stack exists"%stack_name)<line_sep>stack_data=self.cf.describe_stacks(StackName=self.stack_name)<line_sep>outputs_stacks=stack_data["Stacks"][0]["Outputs"]<for_stmt>output outputs_stacks<block_start>self.outputs[output["OutputKey"]]=output["OutputValue"]<block_end><return><true><block_end><block_end><return><false><block_end><def_stmt>create_stack self stack_name template_data parameters<block_start>params={'StackName':stack_name 'TemplateBody':template_data 'Capabilities':['CAPABILITY_IAM' 'CAPABILITY_AUTO_EXPAND'] 'Parameters':parameters}<line_sep>stack_result=self.cf.create_stack(**params)<line_sep>print('Creating {}'.format(stack_name) stack_result)<line_sep>waiter=self.cf.get_waiter('stack_create_complete')<line_sep>print("...waiting for stack to be ready...")<line_sep>waiter.wait(StackName=stack_name)<block_end><def_stmt>delete_stack self stack_name<block_start>params={'StackName':stack_name}<line_sep>stack_result=self.cf.delete_stack(**params)<line_sep>print('Deleting {}'.format(stack_name) stack_result)<line_sep>waiter=self.cf.get_waiter('stack_delete_complete')<line_sep>print("...waiting for stack to be removed...")<line_sep>waiter.wait(StackName=stack_name)<block_end><def_stmt>delete_log_group self<block_start>response=self.log_group_client.delete_log_group(logGroupName=self.log_group_name)<line_sep>print("deleting log group" response)<block_end><def_stmt>create_log_group self<block_start>response=self.log_group_client.create_log_group(logGroupName=self.log_group_name)<line_sep>print("creating log group" response)<block_end><def_stmt>assert_subscription_filter self filter_name<block_start>sleep(60)<line_sep>response=self.log_group_client.describe_subscription_filters(logGroupName=self.log_group_name filterNamePrefix=filter_name)<line_sep>print("testing subscription filter exists" response)<line_sep># Add multiple assert for name, destination arn, role arn. <assert_stmt>len(response['subscriptionFilters'])<g>0<assert_stmt>response['subscriptionFilters'][0]['filterName']<eq>filter_name<assert_stmt>response['subscriptionFilters'][0]['logGroupName']<eq>self.log_group_name<assert_stmt>response['subscriptionFilters'][0]['destinationArn']<eq>self.outputs["destinationArn"]<if_stmt>"roleArn"<in>self.outputs<block_start><assert_stmt>response['subscriptionFilters'][0]['roleArn']<eq>self.outputs["roleArn"]<block_end><block_end><def_stmt>_parse_template self template_name<block_start>output_file=cfn_flip.to_json(read_file(template_name))<line_sep>template_data=json.loads(output_file)<line_sep>print("Validating cloudformation template")<line_sep>self.cf.validate_template(TemplateBody=template_data)<line_sep><return>template_data<block_end><def_stmt>invoke_lambda self<block_start>lambda_arn=self.outputs["LambdaARN"]<line_sep>output=self.lambda_cl.invoke(FunctionName=lambda_arn InvocationType='Event' LogType='None' Payload=bytes(json.dumps({"value":"test"}) "utf-8"))<if_stmt>output["StatusCode"]<ne>202<block_start><raise>Exception("Failed to invoke Lambda")<block_end>time.sleep(60)<block_end><block_end><def_stmt>read_file file_path<block_start>file_path=os.path.join(os.path.dirname(os.getcwd()) file_path)<with_stmt>open(file_path "r")<as>f<block_start><return>f.read().strip()<block_end><block_end><def_stmt>get_bucket_name <block_start><return>'%s-%s'%(BUCKET_PREFIX AWS_REGION)<block_end><def_stmt>upload_to_s3 file_path<block_start>print("Uploading %s file in S3 region: %s"%(file_path AWS_REGION))<line_sep>s3=boto3.client('s3' AWS_REGION)<line_sep>bucket_name=get_bucket_name()<line_sep>key=os.path.basename(file_path)<line_sep>filename=os.path.join(os.path.dirname(os.path.abspath(__file__)) file_path)<line_sep>s3.upload_file(os.path.join(__file__ filename) bucket_name key ExtraArgs={'ACL':'public-read'})<block_end><def_stmt>create_sam_package_and_upload <block_start>template_file_path=os.path.join(os.path.dirname(os.getcwd()) "sam/template.yaml")<line_sep>packaged_template_path=os.path.join(os.path.dirname(os.getcwd()) "sam/packaged.yaml")<line_sep># Create packaged template run_command(["sam" "package" "--template-file" template_file_path "--output-template-file" packaged_template_path "--s3-bucket" get_bucket_name() "--s3-prefix" "test-log-group-lambda-connector"])<line_sep># Upload the packaged template to S3 upload_to_s3(packaged_template_path)<block_end><def_stmt>_run command input=<none> check=<false> **kwargs<block_start><if_stmt>sys.version_info<ge>(3 5)<block_start><return>subprocess.run(command capture_output=<true>)<block_end><if_stmt>input<is><not><none><block_start><if_stmt>'stdin'<in>kwargs<block_start><raise>ValueError('stdin and input arguments may not both be used.')<block_end>kwargs['stdin']=subprocess.PIPE<block_end>process=subprocess.Popen(command **kwargs)<try_stmt><block_start>stdout,stderr=process.communicate(input)<block_end><except_stmt><block_start>process.kill()<line_sep>process.wait()<line_sep><raise><block_end>retcode=process.poll()<if_stmt>check<and>retcode<block_start><raise>subprocess.CalledProcessError(retcode process.args output=stdout stderr=stderr)<block_end><return>retcode stdout stderr<block_end><def_stmt>run_command cmdargs<block_start>resp=_run(cmdargs)<if_stmt>len(resp.stderr.decode())<g>0# traceback.print_exc() <block_start><raise>Exception("Error in run command %s cmd: %s"%(resp cmdargs))<block_end><return>resp.stdout<block_end><if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<g>1<block_start>BUCKET_PREFIX=sys.argv.pop()<block_end>create_sam_package_and_upload()<line_sep># upload_code_in_multiple_regions() # Run the test cases unittest.main()<block_end>
# coding=utf-8 # Copyright 2018 Google LLC & <NAME>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests TPU specfic parts of ModularGAN."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>absl flags<import_from_stmt>absl.testing parameterized<import_from_stmt>compare_gan datasets<import_from_stmt>compare_gan test_utils<import_from_stmt>compare_gan.gans consts<as>c<import_from_stmt>compare_gan.gans.modular_gan ModularGAN<import_stmt>tensorflow<as>tf<line_sep>FLAGS=flags.FLAGS<class_stmt>ModularGanTpuTest(parameterized.TestCase test_utils.CompareGanTestCase)<block_start><def_stmt>setUp self<block_start>super(ModularGanTpuTest self).setUp()<line_sep>self.model_dir=self._get_empty_model_dir()<line_sep>self.run_config=tf.contrib.tpu.RunConfig(model_dir=self.model_dir tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))<block_end>@parameterized.parameters([1 2 5])<def_stmt>testBatchSize self disc_iters use_tpu=<true><block_start>parameters={"architecture":c.DUMMY_ARCH "lambda":1 "z_dim":128 "disc_iters":disc_iters }<line_sep>batch_size=16<line_sep>dataset=datasets.get_dataset("cifar10")<line_sep>gan=ModularGAN(dataset=dataset parameters=parameters model_dir=self.model_dir)<line_sep>estimator=gan.as_estimator(self.run_config batch_size=batch_size use_tpu=<true>)<line_sep>estimator.train(gan.input_fn steps=1)<line_sep>gen_args=gan.generator.call_arg_list<line_sep>disc_args=gan.discriminator.call_arg_list<line_sep>self.assertLen(gen_args disc_iters+1)# D steps, G step. self.assertLen(disc_args disc_iters+1)# D steps, G step. <for_stmt>args gen_args<block_start>self.assertAllEqual(args["z"].shape.as_list() [8 128])<block_end><for_stmt>args disc_args<block_start>self.assertAllEqual(args["x"].shape.as_list() [16 32 32 3])<block_end><block_end>@parameterized.parameters([1 2 5])<def_stmt>testBatchSizeSplitDiscCalls self disc_iters<block_start>parameters={"architecture":c.DUMMY_ARCH "lambda":1 "z_dim":128 "disc_iters":disc_iters }<line_sep>batch_size=16<line_sep>dataset=datasets.get_dataset("cifar10")<line_sep>gan=ModularGAN(dataset=dataset parameters=parameters deprecated_split_disc_calls=<true> model_dir=self.model_dir)<line_sep>estimator=gan.as_estimator(self.run_config batch_size=batch_size use_tpu=<true>)<line_sep>estimator.train(gan.input_fn steps=1)<line_sep>gen_args=gan.generator.call_arg_list<line_sep>disc_args=gan.discriminator.call_arg_list<line_sep>self.assertLen(gen_args disc_iters+1)# D steps, G step. # Each D and G step calls discriminator twice: for real and fake images. self.assertLen(disc_args 2<times>(disc_iters+1))<for_stmt>args gen_args<block_start>self.assertAllEqual(args["z"].shape.as_list() [8 128])<block_end><for_stmt>args disc_args<block_start>self.assertAllEqual(args["x"].shape.as_list() [8 32 32 3])<block_end><block_end>@parameterized.parameters([1 2 5])<def_stmt>testBatchSizeExperimentalJointGenForDisc self disc_iters<block_start>parameters={"architecture":c.DUMMY_ARCH "lambda":1 "z_dim":128 "disc_iters":disc_iters }<line_sep>batch_size=16<line_sep>dataset=datasets.get_dataset("cifar10")<line_sep>gan=ModularGAN(dataset=dataset parameters=parameters experimental_joint_gen_for_disc=<true> model_dir=self.model_dir)<line_sep>estimator=gan.as_estimator(self.run_config batch_size=batch_size use_tpu=<true>)<line_sep>estimator.train(gan.input_fn steps=1)<line_sep>gen_args=gan.generator.call_arg_list<line_sep>disc_args=gan.discriminator.call_arg_list<line_sep>self.assertLen(gen_args 2)<line_sep>self.assertLen(disc_args disc_iters+1)<line_sep>self.assertAllEqual(gen_args[0]["z"].shape.as_list() [8<times>disc_iters 128])<line_sep>self.assertAllEqual(gen_args[1]["z"].shape.as_list() [8 128])<for_stmt>args disc_args<block_start>self.assertAllEqual(args["x"].shape.as_list() [16 32 32 3])<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_from_stmt>dragonfly Grammar AppContext MappingRule Dictation Key Text Integer Mimic <line_sep>context=AppContext(executable="chrome")<line_sep>grammar=Grammar("chrome" context=context)<line_sep>noSpaceNoCaps=Mimic("\\no-caps-on")+Mimic("\\no-space-on")<line_sep>rules=MappingRule(name="chrome" mapping={"edit":Key("w-a") "reload":Key("f5") "open":Key("escape, o") "jump":Key("f") "new tab":Key("t") "search tabs":Key("T") "find":Key("slash") "console":Key("cs-j") "close tab":Key("c-w") "escape":Key('escape') } extras=[Dictation("text") Integer("n" 0 20000) ] defaults={"n":1})<line_sep>grammar.add_rule(rules)<line_sep>grammar.load()<def_stmt>unload <block_start><global>grammar<if_stmt>grammar<block_start>grammar.unload()<block_end>grammar=<none><block_end>
########################################################################### # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### # # This code generated (see scripts folder for possible source): # - Command: "python starthinker_ui/manage.py example" # ########################################################################### <import_stmt>argparse<import_stmt>textwrap<import_from_stmt>starthinker.util.configuration Configuration<import_from_stmt>starthinker.task.fred.run fred<def_stmt>recipe_fred_series_to_bigquery config auth fred_api_key fred_series_id fred_units fred_frequency fred_aggregation_method project dataset<block_start>"""Download federal reserve series. Args: auth (authentication) - Credentials used for writing data. fred_api_key (string) - 32 character alpha-numeric lowercase string. fred_series_id (string) - Series ID to pull data from. fred_units (choice) - A key that indicates a data value transformation. fred_frequency (choice) - An optional parameter that indicates a lower frequency to aggregate values to. fred_aggregation_method (choice) - A key that indicates the aggregation method used for frequency aggregation. project (string) - Existing BigQuery project. dataset (string) - Existing BigQuery dataset. """<line_sep>fred(config {'auth':auth 'api_key':fred_api_key 'frequency':fred_frequency 'series':[{'series_id':fred_series_id 'units':fred_units 'aggregation_method':fred_aggregation_method}] 'out':{'bigquery':{'project':project 'dataset':dataset}}})<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter description=textwrap.dedent(""" Download federal reserve series. 1. Specify the values for a <a href='https://fred.stlouisfed.org/docs/api/fred/series_observations.html' target='_blank'>Fred observations API call</a>. 2. A table will appear in the dataset. """))<line_sep>parser.add_argument("-project" help="Cloud ID of Google Cloud Project." default=<none>)<line_sep>parser.add_argument("-key" help="API Key of Google Cloud Project." default=<none>)<line_sep>parser.add_argument("-client" help="Path to CLIENT credentials json file." default=<none>)<line_sep>parser.add_argument("-user" help="Path to USER credentials json file." default=<none>)<line_sep>parser.add_argument("-service" help="Path to SERVICE credentials json file." default=<none>)<line_sep>parser.add_argument("-verbose" help="Print all the steps as they happen." action="store_true")<line_sep>parser.add_argument("-auth" help="Credentials used for writing data." default='service')<line_sep>parser.add_argument("-fred_api_key" help="32 character alpha-numeric lowercase string." default='')<line_sep>parser.add_argument("-fred_series_id" help="Series ID to pull data from." default='')<line_sep>parser.add_argument("-fred_units" help="A key that indicates a data value transformation." default='lin')<line_sep>parser.add_argument("-fred_frequency" help="An optional parameter that indicates a lower frequency to aggregate values to." default='')<line_sep>parser.add_argument("-fred_aggregation_method" help="A key that indicates the aggregation method used for frequency aggregation." default='avg')<line_sep>parser.add_argument("-project" help="Existing BigQuery project." default='')<line_sep>parser.add_argument("-dataset" help="Existing BigQuery dataset." default='')<line_sep>args=parser.parse_args()<line_sep>config=Configuration(project=args.project user=args.user service=args.service client=args.client key=args.key verbose=args.verbose)<line_sep>recipe_fred_series_to_bigquery(config args.auth args.fred_api_key args.fred_series_id args.fred_units args.fred_frequency args.fred_aggregation_method args.project args.dataset)<block_end>
# -*- coding: utf-8 -*- """ Created on Fri Jan 27 12:47:00 2017 @author: sakurai """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>sklearn.cluster AffinityPropagation<import_from_stmt>sklearn.metrics f1_score<import_from_stmt>sklearn.metrics normalized_mutual_info_score<import_from_stmt>sklearn.preprocessing LabelEncoder<def_stmt>ap_cluster_k x K preference_init=-1.0 max_iter=30 c=<none> iter_finetune=10<block_start>''' Clustering of x by affinity propagation which the number of cluster is K. args: x (ndarray): Data matrix. K (int): Target number of clusters. max_iter (int): Number of trials for bisection search. c (ndarray, optional): Class labels of x. If this parameter is specified, the function try to find the better solution by random search. iter_finetune (int): Number of steps for the random search. '''<line_sep># first, search rough lower bound of the preference <assert_stmt>preference_init<l>0 "preference_init must be negative."<line_sep>p=float(preference_init)# preference parameter p_upper=0<for_stmt>i range(5)<block_start>ap=AffinityPropagation(preference=p).fit(y)<line_sep>k_current=len(ap.cluster_centers_indices_)<if_stmt>k_current<g>K<block_start>p_upper=p<line_sep>k_upper=k_current<line_sep>p<augmul>10<block_end><else_stmt><block_start>p_lower=p<line_sep>k_lower=k_current<line_sep><break><block_end><block_end><else_stmt><block_start><raise>RuntimeError("Can't find initial lower bound for preference."<concat>" Try another value of p_initial.")<block_end># search the preference by bisection method <for_stmt>i range(max_iter)<block_start>p=(p_lower+p_upper)/2<line_sep>ap=AffinityPropagation(preference=p).fit(y)<line_sep>k_current=len(ap.cluster_centers_indices_)<line_sep>print('K = {}, k_current = {}, p = {}'.format(K k_current p))<line_sep>print('{}:{}, {}:{}, {}:{}'.format(k_lower p_lower k_current p k_upper p_upper))<line_sep># if the current k goes out of bounds then retry with perturbed p <while_stmt>k_current<l>k_lower<or>k_current<g>k_upper<block_start>print("retry")<line_sep>p<augadd>np.random.uniform(p_lower p_upper)/10<line_sep>ap=AffinityPropagation(preference=p).fit(y)<line_sep>k_current=len(ap.cluster_centers_indices_)<line_sep>print('K = {}, k_current = {}, p = {}'.format(K k_current p))<line_sep>print('{}:{}, {}:{}, {}:{}'.format(k_lower p_lower k_current p k_upper p_upper))<block_end><if_stmt>k_current<l>K<block_start>p_lower=p<line_sep>k_lower=k_current<block_end><elif_stmt>k_current<g>K<block_start>p_upper=p<line_sep>k_upper=k_current<block_end><else_stmt><block_start><break><block_end><block_end><else_stmt><block_start><raise>RuntimeError("Can't find a preference to form K clusters."<concat>" Try another value of p_initial.")<block_end><if_stmt>c<is><none><block_start><return>ap<block_end># Search further better preference in terms of NMI score by random search p_best=p<line_sep>score_best=normalized_mutual_info_score(c ap.predict(y))<line_sep>print('initial score:' score_best)<line_sep>print()<for_stmt>i range(iter_finetune)<block_start>p=np.random.normal(p_best (p_upper-p_lower)/2)<if_stmt>p<l>p_lower<or>p<g>p_upper# where p is rejected <block_start>print('reject')<line_sep><continue><block_end>ap=AffinityPropagation(preference=p).fit(y)<line_sep>k_current=len(ap.cluster_centers_indices_)<if_stmt>k_current<l>K<and>p<g>p_lower<block_start>p_lower=p<block_end><elif_stmt>k_current<g>K<and>p<l>p_upper<block_start>p_upper=p<block_end><else_stmt># wgere k_current is K <block_start>score=normalized_mutual_info_score(c ap.predict(y))<if_stmt>score<g>score_best<block_start>print("update p {} -> {}".format(p_best p))<line_sep>p_best=p<line_sep>score_best=score<block_end><block_end>print('p: {}, {}, {}'.format(p_lower p p_upper))<line_sep>print('score: {}'.format(score_best))<line_sep>print()<block_end><return>AffinityPropagation(preference=p_best).fit(y)<block_end><if_stmt>__name__<eq>'__main__'<block_start>y_train=np.load('y_train.npy')<line_sep>c_train=np.load('c_train.npy').ravel()<line_sep>y_test=np.load('y_test.npy')<line_sep>c_test=np.load('c_test.npy').ravel()<line_sep>c_train=LabelEncoder().fit_transform(c_train)<line_sep>c_test=LabelEncoder().fit_transform(c_test)<line_sep>K=40<line_sep># K = len(np.unique(c_train)) y=y_train[c_train.ravel()<l>K]<line_sep>c=c_train[c_train<l>K]<line_sep># y = y_test[c_test.ravel() < K] # c = c_test[c_test < K] ap=ap_cluster_k(y K preference_init=-1.0 c=c iter_finetune=30)<line_sep>c_pred=ap.predict(y)<line_sep>print(normalized_mutual_info_score(c c_pred))<line_sep>plt.plot(np.vstack((c_pred c)).T)<line_sep>plt.show()<block_end># print f1_score(c, c_pred)
<import_stmt>sys<line_sep>#print sys.argv[0], len( sys.argv ) <if_stmt>len(sys.argv)<g>1<block_start><with_stmt>open(sys.argv[1] 'r')<as>f_in<block_start>result=0<for_stmt>line f_in<block_start>data=line.strip().split()<line_sep># print('data:', data) <if_stmt>data[0]<eq>"+"<block_start>result<augadd>float(data[1])<block_end><elif_stmt>data[0]<eq>"-"<block_start>result<augsub>float(data[1])<block_end><elif_stmt>data[0]<eq>"="<block_start>print("RESULT:" result)<line_sep>result=0<block_end><else_stmt><block_start>print('unknow:' data)<block_end><block_end><block_end><block_end>
<import_stmt>pytest<line_sep># from https://github.com/ethereum/tests/blob/c951a3c105d600ccd8f1c3fc87856b2bcca3df0a/BasicTests/txtest.json # noqa: E501 TRANSACTION_FIXTURES=[{"chainId":<none> "key":"c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4" "nonce":0 "gasPrice":1000000000000 "gas":10000 "to":"13978aee95f38490e9769c39b2773ed763d9cd5f" "value":10000000000000000 "data":"" "signed":"f86b8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc10000801ba0eab47c1a49bf2fe5d40e01d313900e19ca485867d462fe06e139e3a536c6d4f4a014a569d327dcda4b29f74f93c0e9729d2f49ad726e703f9cd90dbb0fbf6649f1"# noqa: E501 } {"chainId":<none> "key":"c87f65ff3f271bf5dc8643484f66b200109caffe4bf98c4cb393dc35740b28c0" "nonce":0 "gasPrice":1000000000000 "gas":10000 "to":"" "value":0 "data":"<KEY>" # noqa: E501 "signed":"<KEY>"# noqa: E501 } {"chainId":1 "key":"0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318" "nonce":0 "gasPrice":234567897654321 "gas":2000000 "to":"0xF0109fC8DF283027b6285cc889F5aA624EaC1F55" "value":1000000000 "data":"" "signed":"0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428" # noqa: E501 } ]<line_sep># Hand-built for 2930 TYPED_TRANSACTION_FIXTURES=[{"chainId":1 "nonce":3 "gasPrice":1 "gas":25000 "to":"b94f5374fce5edbc8e2a8697c15331677e6ebf0b" "value":10 "data":"5544" "access_list":[[b'\xf0'<times>20 [b'\0'<times>32 b'\xff'<times>32]] ] "key":(b'\0'<times>31)+b'\x01' "sender":b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf' "intrinsic_gas":21000+32+2400+1900<times>2 "for_signing":'01f87a0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff' # noqa: E501 "signed":'01f8bf0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85bf85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80a017047e844eef895a876778a828731a33b67863aea7b9591a0001651ee47322faa043b4d0e8d59e8663c813ffa1bb99f020278a139f07c47f3858653071b3cec6b3' # noqa: E501 "hash":"13ab8b6371d8873405db20104705d7fecee2f9083f247250519e4b4c568b17fb" }]<line_sep>@pytest.fixture(params=range(len(TRANSACTION_FIXTURES)))<def_stmt>txn_fixture request<block_start><return>TRANSACTION_FIXTURES[request.param]<block_end>@pytest.fixture(params=range(len(TYPED_TRANSACTION_FIXTURES)))<def_stmt>typed_txn_fixture request<block_start><return>TYPED_TRANSACTION_FIXTURES[request.param]<block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>argparse<import_stmt>os<import_stmt>os.path<as>osp<import_from_stmt>collections defaultdict<import_stmt>mmcv<import_from_stmt>tqdm tqdm<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='LaSOT test dataset to COCO Video format')<line_sep>parser.add_argument('-i' '--input' help='root directory of LaSOT test dataset' )<line_sep>parser.add_argument('-o' '--output' help='directory to save coco formatted label file' )<line_sep>parser.add_argument('--split' help='the split set of lasot, all denotes the whole dataset' choices=['train' 'test' 'all'] default='all')<line_sep><return>parser.parse_args()<block_end><def_stmt>convert_lasot ann_dir save_dir split='test'<block_start>"""Convert lasot dataset to COCO style. Args: ann_dir (str): The path of lasot dataset save_dir (str): The path to save `lasot`. split (str): the split ('train' or 'test') of dataset. """<assert_stmt>split<in>['train' 'test'] f'split [{split}] does not exist'<line_sep>lasot=defaultdict(list)<line_sep>records=dict(vid_id=1 img_id=1 ann_id=1 global_instance_id=1)<line_sep>lasot['categories']=[dict(id=0 name=0)]<line_sep>videos_list=mmcv.list_from_file(osp.join(osp.dirname(__file__) 'testing_set.txt'))<if_stmt>split<eq>'train'<block_start>train_videos_list=[]<for_stmt>video_class os.listdir(ann_dir)<block_start><for_stmt>video_id os.listdir(osp.join(ann_dir video_class))<block_start><if_stmt>video_id<not><in>videos_list<block_start>train_videos_list.append(video_id)<block_end><block_end><block_end>videos_list=train_videos_list<block_end><for_stmt>video_name tqdm(videos_list desc=split)<block_start>video_class=video_name.split('-')[0]<line_sep>video_path=osp.join(ann_dir video_class video_name)<line_sep>video=dict(id=records['vid_id'] name=video_name)<line_sep>lasot['videos'].append(video)<line_sep>gt_bboxes=mmcv.list_from_file(osp.join(video_path 'groundtruth.txt'))<line_sep>full_occlusion=mmcv.list_from_file(osp.join(video_path 'full_occlusion.txt'))<line_sep>full_occlusion=full_occlusion[0].split(',')<line_sep>out_of_view=mmcv.list_from_file(osp.join(video_path 'out_of_view.txt'))<line_sep>out_of_view=out_of_view[0].split(',')<line_sep>img=mmcv.imread(osp.join(video_path 'img/00000001.jpg'))<line_sep>height,width,_=img.shape<for_stmt>frame_id,gt_bbox enumerate(gt_bboxes)<block_start>file_name='%08d'%(frame_id+1)+'.jpg'<line_sep>file_name=osp.join(video_class video_name 'img' file_name)<line_sep>image=dict(file_name=file_name height=height width=width id=records['img_id'] frame_id=frame_id video_id=records['vid_id'])<line_sep>lasot['images'].append(image)<line_sep>x1,y1,w,h=gt_bbox.split(',')<line_sep>ann=dict(id=records['ann_id'] video_id=records['vid_id'] image_id=records['img_id'] instance_id=records['global_instance_id'] category_id=0 bbox=[int(x1) int(y1) int(w) int(h)] area=int(w)<times>int(h) full_occlusion=full_occlusion[frame_id]<eq>'1' out_of_view=out_of_view[frame_id]<eq>'1')<line_sep>lasot['annotations'].append(ann)<line_sep>records['ann_id']<augadd>1<line_sep>records['img_id']<augadd>1<block_end>records['global_instance_id']<augadd>1<line_sep>records['vid_id']<augadd>1<block_end><if_stmt><not>osp.isdir(save_dir)<block_start>os.makedirs(save_dir)<block_end>mmcv.dump(lasot osp.join(save_dir f'lasot_{split}.json'))<line_sep>print(f'-----LaSOT {split} Dataset------')<line_sep>print(f'{records["vid_id"]-1} videos')<line_sep>print(f'{records["global_instance_id"]-1} instances')<line_sep>print(f'{records["img_id"]-1} images')<line_sep>print(f'{records["ann_id"]-1} objects')<line_sep>print('-----------------------------')<block_end><def_stmt>main <block_start>args=parse_args()<if_stmt>args.split<eq>'all'<block_start><for_stmt>split ['train' 'test']<block_start>convert_lasot(args.input args.output split=split)<block_end><block_end><else_stmt><block_start>convert_lasot(args.input args.output split=args.split)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
''' Provider for duck dataset from <NAME> '''<import_stmt>os<import_stmt>os.path<import_stmt>json<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>pickle<import_stmt>copy<import_stmt>psutil<import_from_stmt>pyquaternion Quaternion<import_stmt>class_mapping<class_stmt>SegDataset()<block_start><def_stmt>__init__ self root='processed_pc' chained_flow_root='chained_flow' filelist_name='data_prep/train_raw.txt' labelweight_filename='data_prep/labelweights.npz' npoints=16384 num_frames=1 train=<true><block_start>self.npoints=npoints<line_sep>self.train=train<line_sep>self.root=root<line_sep>self.chained_flow_root=chained_flow_root<line_sep>self.num_frames=num_frames<line_sep>self.num_max_nonkey=num_frames-1<line_sep>self.labelweights=np.load(labelweight_filename)['labelweights']<line_sep>filenames=[]<line_sep>raw_txt_file=open(filelist_name 'r')<line_sep>l=raw_txt_file.readline()<while_stmt>len(l)<g>0<block_start>l=l.split(' ')[0]<line_sep>l=l.split('/')<line_sep>sequence_name=l[0]<line_sep>frame_id=int(l[-1].split('.')[0])<line_sep>filenames.append([sequence_name frame_id])<line_sep>l=raw_txt_file.readline()<block_end>filenames.sort()<line_sep>self.filenames=filenames<line_sep>##### debug # self.filenames = [f for f in self.filenames if 'SYNTHIA-SEQS-01-DAWN' in f[0]] self.cache={}<line_sep>self.cache_mem_usage=0.95<block_end><def_stmt>read_data self sequence_name frame_id<block_start><if_stmt>sequence_name<in>self.cache<block_start><if_stmt>frame_id<in>self.cache[sequence_name]<block_start>pc,rgb,semantic,chained_flowed,center=self.cache[sequence_name][frame_id]<line_sep><return>pc rgb semantic chained_flowed center<block_end><block_end>fn=os.path.join(self.root sequence_name+'-'+str(frame_id).zfill(6)+'.npz')<if_stmt>os.path.exists(fn)<block_start>data=np.load(fn)<line_sep>pc=data['pc']<line_sep>rgb=data['rgb']<line_sep>semantic=data['semantic']<line_sep>center=data['center']<line_sep>chained_flow=[]<line_sep>##### read flow basename_split=os.path.basename(fn).split('.npz')[0].split('-')<for_stmt>f range(-self.num_max_nonkey self.num_max_nonkey+1)<block_start><if_stmt>f<ne>0<block_start>new_basename='-'.join(basename_split+[str(int(basename_split[-1])+f).zfill(6)])+'.npz'<line_sep>chained_flow_fn=os.path.join(self.chained_flow_root new_basename)<if_stmt>os.path.exists(chained_flow_fn)<block_start>chained_flow_data=np.load(chained_flow_fn)['chained_flow']<block_end><else_stmt><block_start>chained_flow_data=<none><block_end><block_end><else_stmt><block_start>chained_flow_data=pc<block_end>chained_flow.append(chained_flow_data)<block_end><for_stmt>i range(self.num_max_nonkey+1 self.num_max_nonkey<times>2+1)<block_start><if_stmt>chained_flow[i]<is><none><block_start>chained_flow[i]=chained_flow[i-1]<block_end><else_stmt><block_start>chained_flow[i]=chained_flow[i-1]+chained_flow[i]<block_end><block_end><for_stmt>i range(self.num_max_nonkey-1 -1 -1)<block_start><if_stmt>chained_flow[i]<is><none><block_start>chained_flow[i]=chained_flow[i+1]<block_end><else_stmt><block_start>chained_flow[i]=chained_flow[i+1]+chained_flow[i]<block_end><block_end>chained_flowed=np.stack(chained_flow axis=-2)<line_sep>semantic=semantic.astype('uint8')<block_end><else_stmt><block_start>pc,rgb,semantic,chained_flowed,center=<none> <none> <none> <none> <none><block_end>mem=psutil.virtual_memory()<if_stmt>(mem.used/mem.total)<l>self.cache_mem_usage<block_start><if_stmt>sequence_name<not><in>self.cache<block_start>self.cache[sequence_name]={}<block_end>self.cache[sequence_name][frame_id]=(pc rgb semantic chained_flowed center)<block_end><return>pc rgb semantic chained_flowed center<block_end><def_stmt>read_training_data_point self index<block_start>sequence_name,frame_id=self.filenames[index]<line_sep>pcs=[]<line_sep>rgbs=[]<line_sep>semantics=[]<line_sep>chained_floweds_raw=[]<line_sep>center_0=<none><line_sep>exist_frame_id=[]<line_sep>most_recent_success=-1<for_stmt>diff range(0 self.num_frames)##### combination of (sequence_name, frame_id) is guaranteed to exist, therefore diff=0 will not return none <block_start>pc,rgb,semantic,chained_flowed,center=self.read_data(sequence_name frame_id-diff)<if_stmt>pc<is><none><block_start>pc,rgb,semantic,chained_flowed,center=self.read_data(sequence_name most_recent_success)<block_end><else_stmt><block_start>most_recent_success=frame_id-diff<block_end>exist_frame_id.append(most_recent_success)<if_stmt>diff<eq>0<block_start>center_0=center<block_end>pcs.append(pc)<line_sep>rgbs.append(rgb)<line_sep>semantics.append(semantic)<line_sep>chained_floweds_raw.append(chained_flowed)<block_end>exist_frame_id.reverse()<line_sep>##### resolve the cases for repeated frames, at the start of the sequence in the dataset chained_floweds_list=[]<for_stmt>f_dest range(self.num_frames)<block_start>chained_floweds=[]<for_stmt>f_src range(self.num_frames)<block_start>f_diff=exist_frame_id[f_dest]-exist_frame_id[f_src]<line_sep>chained_floweds.append(chained_floweds_raw[f_dest][: f_diff+self.num_max_nonkey])<block_end>chained_floweds=np.stack(chained_floweds axis=-2)<line_sep>chained_floweds_list.append(chained_floweds)<block_end>pc=np.stack(pcs axis=0)<line_sep>rgb=np.stack(rgbs axis=0)<line_sep>semantic=np.stack(semantics axis=0)<line_sep>chained_flowed=np.stack(chained_floweds_list axis=0)<line_sep><return>pc rgb semantic chained_flowed center_0<block_end><def_stmt>half_crop_w_context self half context pc rgb semantic chained_flowed center<block_start>num_frames=pc.shape[0]<line_sep>all_idx=np.arange(pc.shape[1])<line_sep>sample_indicies_half_w_context=[]<if_stmt>half<eq>0<block_start><for_stmt>f range(num_frames)<block_start>sample_idx_half_w_context=all_idx[pc[f : 2]<g>(center[2]-context)]<line_sep>sample_indicies_half_w_context.append(sample_idx_half_w_context)<block_end><block_end><else_stmt><block_start><for_stmt>f range(num_frames)<block_start>sample_idx_half_w_context=all_idx[pc[f : 2]<l>(center[2]+context)]<line_sep>sample_indicies_half_w_context.append(sample_idx_half_w_context)<block_end><block_end>pc_half_w_context=[pc[f s]<for>f,s enumerate(sample_indicies_half_w_context)]<line_sep>rgb_half_w_context=[rgb[f s]<for>f,s enumerate(sample_indicies_half_w_context)]<line_sep>semantic_half_w_context=[semantic[f s]<for>f,s enumerate(sample_indicies_half_w_context)]<line_sep>chained_flowed_half_w_context=[chained_flowed[f s]<for>f,s enumerate(sample_indicies_half_w_context)]<if_stmt>half<eq>0<block_start>loss_masks=[p[: 2]<g>center[2]<for>p pc_half_w_context]<block_end><else_stmt><block_start>loss_masks=[p[: 2]<l>center[2]<for>p pc_half_w_context]<block_end>valid_pred_idx_in_full=sample_indicies_half_w_context<line_sep><return>pc_half_w_context rgb_half_w_context semantic_half_w_context chained_flowed_half_w_context loss_masks valid_pred_idx_in_full<block_end><def_stmt>augment self pc chained_flowed center<block_start>flip=np.random.uniform(0 1)<g>0.5<if_stmt>flip<block_start>pc=(pc-center)<line_sep>pc[: 0]<augmul>-1<line_sep>pc<augadd>center<line_sep>chained_flowed=(chained_flowed-center)<line_sep>chained_flowed[: : 0]<augmul>-1<line_sep>chained_flowed<augadd>center<block_end>scale=np.random.uniform(0.8 1.2)<line_sep>pc=(pc-center)<times>scale+center<line_sep>chained_flowed=(chained_flowed-center)<times>scale+center<line_sep>rot_axis=np.array([0 1 0])<line_sep>rot_angle=np.random.uniform(np.pi<times>2)<line_sep>q=Quaternion(axis=rot_axis angle=rot_angle)<line_sep>R=q.rotation_matrix<line_sep>pc=np.dot(pc-center R)+center<line_sep>chained_flowed=np.dot(chained_flowed-center R)+center<line_sep><return>pc chained_flowed<block_end><def_stmt>mask_and_label_conversion self semantic loss_mask<block_start>labels=[]<line_sep>loss_masks=[]<for_stmt>i,s enumerate(semantic)<block_start>sem=s.astype('int32')<line_sep>label=class_mapping.index_to_label_vec_func(sem)<line_sep>loss_mask_=(label<ne>12)<times>loss_mask[i]<line_sep>label[label<eq>12]=0<line_sep>labels.append(label)<line_sep>loss_masks.append(loss_mask_)<block_end><return>labels loss_masks<block_end><def_stmt>choice_to_num_points self pc rgb label chained_flowed loss_mask valid_pred_idx_in_full# shuffle idx to change point order (change FPS behavior) <block_start><for_stmt>f range(self.num_frames)<block_start>idx=np.arange(pc[f].shape[0])<line_sep>choice_num=self.npoints<if_stmt>pc[f].shape[0]<g>choice_num<block_start>shuffle_idx=np.random.choice(idx choice_num replace=<false>)<block_end><else_stmt><block_start>shuffle_idx=np.concatenate([np.random.choice(idx choice_num-idx.shape[0]) np.arange(idx.shape[0])])<block_end>pc[f]=pc[f][shuffle_idx]<line_sep>rgb[f]=rgb[f][shuffle_idx]<line_sep>chained_flowed[f]=chained_flowed[f][shuffle_idx]<line_sep>label[f]=label[f][shuffle_idx]<line_sep>loss_mask[f]=loss_mask[f][shuffle_idx]<line_sep>valid_pred_idx_in_full[f]=valid_pred_idx_in_full[f][shuffle_idx]<block_end>pc=np.concatenate(pc axis=0)<line_sep>rgb=np.concatenate(rgb axis=0)<line_sep>label=np.concatenate(label axis=0)<line_sep>chained_flowed=np.concatenate(chained_flowed axis=0)<line_sep>loss_mask=np.concatenate(loss_mask axis=0)<line_sep>valid_pred_idx_in_full=np.concatenate(valid_pred_idx_in_full axis=0)<line_sep><return>pc rgb label chained_flowed loss_mask valid_pred_idx_in_full<block_end><def_stmt>get self index half=0 context=1.<block_start>pc,rgb,semantic,chained_flowed,center=self.read_training_data_point(index)<line_sep>pc,rgb,semantic,chained_flowed,loss_mask,valid_pred_idx_in_full=self.half_crop_w_context(half context pc rgb semantic chained_flowed center)<line_sep>label,loss_mask=self.mask_and_label_conversion(semantic loss_mask)<line_sep>pc,rgb,label,chained_flowed,loss_mask,valid_pred_idx_in_full=self.choice_to_num_points(pc rgb label chained_flowed loss_mask valid_pred_idx_in_full)<if_stmt>self.train<block_start>pc,chained_flowed=self.augment(pc chained_flowed center)<block_end><if_stmt>self.train<block_start>labelweights=1/np.log(1.2+self.labelweights)<line_sep># labelweights = 1 / self.labelweights labelweights=labelweights/labelweights.min()<block_end><else_stmt><block_start>labelweights=np.ones_like(self.labelweights)<block_end><return>pc rgb label chained_flowed labelweights loss_mask valid_pred_idx_in_full<block_end><def_stmt>__len__ self<block_start><return>len(self.filenames)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>mayavi.mlab<as>mlab<import_stmt>class_mapping<line_sep>NUM_POINT=8192<line_sep>num_frames=3<line_sep>d=SegDataset(root='processed_pc' chained_flow_root='chained_flow' npoints=NUM_POINT train=<true> num_frames=num_frames)<line_sep>print(len(d))<import_stmt>time<line_sep>tic=time.time()<line_sep>point_size=0.2<for_stmt>idx range(200 len(d))<block_start><for_stmt>half [0 1]<block_start>print(d.filenames[idx])<line_sep>batch_data=np.zeros((NUM_POINT<times>num_frames 3+3))<line_sep>batch_chained_flowed=np.zeros((NUM_POINT<times>num_frames 3))<line_sep>batch_label=np.zeros((NUM_POINT<times>num_frames) dtype='int32')<line_sep>batch_mask=np.zeros((NUM_POINT<times>num_frames) dtype=np.bool)<line_sep>pc,rgb,label,chained_flowed,labelweights,loss_mask,valid_pred_idx_in_full=d.get(idx half)<line_sep>batch_data[: :3]=pc<line_sep>batch_data[: 3:]=rgb<line_sep>batch_chained_flowed=chained_flowed<line_sep>batch_label=label<line_sep>batch_mask=loss_mask<line_sep>print(batch_data[0<times>NUM_POINT:1<times>NUM_POINT :3]-batch_chained_flowed[0<times>NUM_POINT:1<times>NUM_POINT 0])<line_sep>print(batch_data[1<times>NUM_POINT:2<times>NUM_POINT :3]-batch_chained_flowed[1<times>NUM_POINT:2<times>NUM_POINT 1])<line_sep>print(batch_data[2<times>NUM_POINT:3<times>NUM_POINT :3]-batch_chained_flowed[2<times>NUM_POINT:3<times>NUM_POINT 2])<line_sep>batch_labelweights=labelweights[batch_label]<line_sep>##### select only the first frame, for viz batch_data=batch_data[:NUM_POINT]<line_sep>batch_label=batch_label[:NUM_POINT]<line_sep>batch_chained_flowed=batch_chained_flowed[:NUM_POINT]<line_sep>batch_mask=batch_mask[:NUM_POINT]<line_sep>batch_labelweights=batch_labelweights[:NUM_POINT]<line_sep>##### mlab viz, with semantic mlab.figure(bgcolor=(1 1 1))<line_sep>pc_valid=batch_data[: :3][batch_mask]<line_sep>rgb_valid=batch_data[: 3:][batch_mask]<line_sep>label_valid=batch_label[batch_mask]<line_sep>chained_flowed_valid=batch_chained_flowed[batch_mask]<for_stmt>i range(12)<block_start>pc_sem=pc_valid[label_valid<eq>i]<line_sep>color=class_mapping.index_to_color[class_mapping.label_to_index[i]]<line_sep>mlab.points3d(pc_sem[: 0] pc_sem[: 1] pc_sem[: 2] scale_factor=point_size color=(color[0]/255 color[1]/255 color[2]/255))<block_end>pc_non_valid=batch_data[: :3][np.logical_not(batch_mask)]<line_sep>mlab.points3d(pc_non_valid[: 0] pc_non_valid[: 1] pc_non_valid[: 2] scale_factor=point_size color=(0 0 0))<line_sep>color=np.array([[1 0 0] [1 1 0] [0 1 0] [0 1 1] [0 0 1]])<line_sep>fwrite=open('view.pts' 'w')<for_stmt>i range(batch_data.shape[0])# p = batch_data[i, :3] <block_start><for_stmt>f range(0 num_frames)<block_start>p=batch_chained_flowed[i f]<line_sep>fwrite.write('{} {} {} {} {} {}\n'.format(p[0] p[1] p[2] color[f 0] color[f 1] color[f 2]))<block_end><block_end>input()<block_end><block_end>print(time.time()-tic)<block_end>
<import_from_stmt>amadeus.client.decorator Decorator<import_from_stmt>amadeus.reference_data._urls Urls<import_from_stmt>amadeus.reference_data._location Location<import_from_stmt>amadeus.reference_data._locations Locations<import_from_stmt>amadeus.reference_data._airlines Airlines<import_from_stmt>amadeus.reference_data._recommended_locations RecommendedLocations<class_stmt>ReferenceData(Decorator object)<block_start><def_stmt>__init__ self client<block_start>Decorator.__init__(self client)<line_sep>self.urls=Urls(client)<line_sep>self.locations=Locations(client)<line_sep>self.airlines=Airlines(client)<line_sep>self.recommended_locations=RecommendedLocations(client)<block_end><def_stmt>location self location_id<block_start><return>Location(self.client location_id)<block_end><block_end>
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.autograd Function<import_from_stmt>torch.autograd.function once_differentiable<import_from_stmt>maskrcnn_benchmark _C<line_sep># TODO: Use JIT to replace CUDA implementation in the future. <class_stmt>_SigmoidFocalLoss(Function)<block_start>@staticmethod<def_stmt>forward ctx logits targets gamma alpha<block_start>ctx.save_for_backward(logits targets)<line_sep>num_classes=logits.shape[1]<line_sep>ctx.num_classes=num_classes<line_sep>ctx.gamma=gamma<line_sep>ctx.alpha=alpha<line_sep>losses=_C.sigmoid_focalloss_forward(logits targets num_classes gamma alpha)<line_sep><return>losses<block_end>@staticmethod@once_differentiable<def_stmt>backward ctx d_loss<block_start>logits,targets=ctx.saved_tensors<line_sep>num_classes=ctx.num_classes<line_sep>gamma=ctx.gamma<line_sep>alpha=ctx.alpha<line_sep>d_loss=d_loss.contiguous()<line_sep>d_logits=_C.sigmoid_focalloss_backward(logits targets d_loss num_classes gamma alpha)<line_sep><return>d_logits <none> <none> <none> <none><block_end><block_end>sigmoid_focal_loss_cuda=_SigmoidFocalLoss.apply<def_stmt>sigmoid_focal_loss_cpu logits targets gamma alpha<block_start>num_classes=logits.shape[1]<line_sep>gamma=gamma[0]<line_sep>alpha=alpha[0]<line_sep>dtype=targets.dtype<line_sep>device=targets.device<line_sep>class_range=torch.arange(1 num_classes+1 dtype=dtype device=device).unsqueeze(0)<line_sep>t=targets.unsqueeze(1)<line_sep>p=torch.sigmoid(logits)<line_sep>term1=(1-p)<power>gamma<times>torch.log(p)<line_sep>term2=p<power>gamma<times>torch.log(1-p)<line_sep><return>-(t<eq>class_range).float()<times>term1<times>alpha-((t<ne>class_range)<times>(t<ge>0)).float()<times>term2<times>(1-alpha)<block_end><class_stmt>SigmoidFocalLoss(nn.Module)<block_start><def_stmt>__init__ self gamma alpha<block_start>super(SigmoidFocalLoss self).__init__()<line_sep>self.gamma=gamma<line_sep>self.alpha=alpha<block_end><def_stmt>forward self logits targets<block_start>device=logits.device<if_stmt>logits.is_cuda<block_start>loss_func=sigmoid_focal_loss_cuda<block_end><else_stmt><block_start>loss_func=sigmoid_focal_loss_cpu<block_end>loss=loss_func(logits targets self.gamma self.alpha)<line_sep><return>loss.sum()<block_end><def_stmt>__repr__ self<block_start>tmpstr=self.__class__.__name__+"("<line_sep>tmpstr<augadd>"gamma="+str(self.gamma)<line_sep>tmpstr<augadd>", alpha="+str(self.alpha)<line_sep>tmpstr<augadd>")"<line_sep><return>tmpstr<block_end><block_end><import_from_stmt>maskrcnn_benchmark.modeling.rpn.gaussian_net.gau_label_infer three_points_solve<class_stmt>FixedIOULoss(nn.Module)<block_start><def_stmt>three_point_solve self li lj lk a b eps=1e-6<block_start>lkj,lji=lk-lj lj-li<line_sep>inverse_w2=(lkj/b-lji/a)/(a+b)<line_sep>dx=-(w2<times>lji/a+a)/2<line_sep># dx = (lkj * a * a + lji * b * b) / (lji*b - lkj * a) / 2 <return>w2 dx<block_end><def_stmt>cross_points_set_solve_3d self L points a b step=1 solver=1# points_set: (N, 3), # (c, y, x) <block_start>""" L[cj, yj-a, xj] L[cj, yj, xj-a] L[cj, yj, xj] L[cj, yj, xj + b] L[cj, yj+b, xj] """<line_sep>cj,yj,xj=points[: 0] points[: 1] points[: 2]<line_sep>idx=torch.arange(len(points))<line_sep>lx=L[cj yj]# (N, W) lxi,lxj,lxk=lx[idx xj-a] lx[idx xj] lx[idx xj+b]<line_sep>ly=L[cj : xj]# (N, H) not (H, N) lyi,lyj,lyk=ly[idx yj-a] lxj ly[idx yj+b]<line_sep>li=torch.cat([lxi lyi] dim=0)<line_sep>lj=torch.cat([lxj lyj] dim=0)<line_sep>lk=torch.cat([lxk lyk] dim=0)<line_sep>s,d=self.three_point_solve(li lj lk a b)<line_sep>n=len(s)<floordiv>2<line_sep>w,h=s[:n] s[n:]<line_sep>dx,dy=d[:n] d[n:]<line_sep># cx = xj.float() + dx # 1/2 cause use center point # cy = yj.float() + dy # x1 = cx - (w-1/step) / 2 # notice here # y1 = cy - (h-1/step) / 2 # return torch.stack([x1 * step, y1 * step, w * step, h * step, lxj], dim=1) # lxj == lyj <return>dx dy w h<block_end><def_stmt>forward self bbox target sf=0.125<block_start><def_stmt>center2corner dx dy w h<block_start>l=w/2-dx<line_sep>r=w/2+dx<line_sep>t=h/2-dy<line_sep>b=h/2+dy<line_sep><return>l t r b<block_end>pred_l,pred_t,pred_r,pred_b=center2corner(*bbox)<line_sep>targ_l,targ_t,targ_r,targ_b=center2corner(*target)<line_sep>l_range=(0 4)<line_sep>pred_l=pred_l.clamp(*l_range)<line_sep>pred_r=pred_r.clamp(*l_range)<line_sep>pred_t=pred_t.clamp(*l_range)<line_sep>pred_b=pred_b.clamp(*l_range)<line_sep>target_aera=target[2]<times>target[3]<line_sep>pred_aera=(pred_l+pred_r)<times>(pred_t+pred_b)<line_sep>w_intersect=torch.min(pred_l targ_l)+torch.min(pred_r targ_r)<line_sep>h_intersect=torch.min(pred_b targ_b)+torch.min(pred_t targ_t)<line_sep>area_intersect=w_intersect<times>h_intersect<line_sep>area_union=target_aera+pred_aera-area_intersect<line_sep># iou_losses = -torch.log((area_intersect.clamp(0) + 1.0) / (area_union.clamp(0) + 1.0)) iou_losses=-torch.log(((area_intersect.clamp(0)+1.0)/(area_union.clamp(0)+1.0)).clamp(0.1))<line_sep># if iou_losses.max() > 10: # print("ok") # targ_w, targ_h = target[2], target[3] # l1_losses = 0. # for p, t, s in zip([pred_l, pred_t, pred_r, pred_b], # [targ_l, targ_t, targ_r, targ_b], # [targ_w, targ_h, targ_w, targ_h]): # l1_losses += torch.log(1 + 3 * smooth_l1((p - t) / s)) # l1_losses /= 4 # cause loss from 4 sub-loss: l, t, r, b # valid = ((bbox[2] > 0) & (bbox[3] > 0) & (pred_l > 0) & (pred_r > 0) & (pred_t > 0) & (pred_b > 0)).float() # assert (targ_h <= 0).sum() == 0 and (targ_w <= 0).sum() == 0 and (targ_l <= 0).sum() == 0 and (targ_r <= 0).sum() == 0 \ # and (targ_t <= 0).sum() == 0 and (targ_b <= 0).sum() == 0, "" # return iou_losses * valid, l1_losses * (1 - valid) <return>iou_losses<times>0 iou_losses<times>0<block_end><block_end><def_stmt>smooth_l1 error beta=1./9<block_start>""" very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter """<line_sep>n=torch.abs(error)<line_sep>cond=n<l>beta<line_sep>loss=torch.where(cond 0.5<times>n<power>2/beta n-0.5<times>beta)<line_sep><return>loss<block_end><class_stmt>FixSigmoidFocalLoss(nn.Module)<block_start><def_stmt>__init__ self gamma alpha sigma fpn_strides c EPS=1e-6<block_start>super(FixSigmoidFocalLoss self).__init__()<line_sep>self.gamma=gamma<line_sep>self.alpha=alpha<line_sep>self.sigma=sigma<line_sep>self.EPS=EPS<line_sep>self.fpn_strides=fpn_strides<line_sep>self.c=c# (0.5, 2, 1, 2) print("c1, c2, c3, c4 for pos loss:" self.c)<line_sep>self.g_mul_p=<false><line_sep>self.iou_loss=FixedIOULoss()<block_end><def_stmt>forward self cls_logits gau_logits targets valid=<none><block_start>""" :param logits: shape=(B, H, W, C) :param targets: shape=(B, H, W, C) :return: """<line_sep>gamma=self.gamma<line_sep>alpha=self.alpha<line_sep>eps=self.EPS<line_sep>c1,c2,c3,c4,c5=self.c<line_sep># num_classes = logits.shape[1] # dtype = targets.dtype # device = targets.device # # class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0) q=targets<line_sep>p=torch.sigmoid(cls_logits)<line_sep>g=torch.sigmoid(gau_logits)<line_sep># if self.g_mul_p: g = g * p # loss = -(q - p) ** gamma * (torch.log(p) * alpha + torch.log(1-p) * (1 - alpha)) # origin # loss = -(q - p) ** gamma * (q * torch.log(p) * alpha + (1 - q) * torch.log(1-p) * (1 - alpha)) # correct 1 # loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) * alpha + (1 - q) * torch.log((1-p)/(1-q+eps)) * (1 - alpha)) # correct 2 # correct 3 # loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) + (1 - q) * torch.log((1-p)/(1-q+eps))) # neg_loss = (1-alpha) * (q <= eps).float() * loss # pos_loss = alpha * (q > eps).float() * loss # correct 4 # loss = -(q - p) ** gamma * (q * torch.log(p/(q+eps)) + (1 - q) * torch.log((1-p)/(1-q+eps))) # neg_loss = (1-alpha) * (q <= eps).float() * loss # pos_loss = 4 * alpha * (q > eps).float() * loss # correct 5 # loss = - (q * torch.log(p) + (1 - q) * torch.log(1-p)) # correct 1-2 # neg_loss = (q <= eps).float() * (- torch.log(1 - p)) * (1 - alpha) * ((q - p) ** gamma) # q * |log(p) - log(q)|^2, cause inference need -log(p), so use log L2 Loss, q to weight like centerness. # pos_loss = q * (torch.log(p / (q + eps)) ** 2) * alpha # * (q > eps).float() # loss 1 # loss = (- q * torch.log(p) - (1 - q) * torch.log(1 - p)) * ((q - p) ** gamma) # neg_loss = (q <= eps).float() * loss * (1 - alpha) # pos_loss = (q > eps).float() * loss * alpha # loss 1, FL # loss = (- q * torch.log(p / (q + eps)) - (1 - q) * torch.log((1 - p)/(1 - q + eps))) * ((q - p) ** gamma) # neg_loss = (q <= eps).float() * loss * (1 - alpha) # pos_loss = (q > eps).float() * loss * alpha # print((q > eps).sum(), (q <= eps).sum()) # # loss 2, log loss # neg_loss = (q <= eps).float() * (- torch.log(1 - p) * (p ** gamma)) * (1 - alpha) # FL # pos_loss = (q * smooth_l1(torch.log(p / (q + eps)))) * alpha # smoothl1([ln(p) - ln(q)]) # should be (p + eps) / (q+ eps) # # loss3, log diff loss # # use p # neg_loss = (q <= eps).float() * (1 - alpha) * (- p ** gamma * torch.log(1 - p)) # pos_loss = (q > eps).float() * alpha * (- (1 - p) ** gamma * torch.log(p)) # # # use g # gau_neg_loss = (q <= eps).float() * (1 - alpha) * (- g ** gamma * torch.log(1 - g)) * c5 # fpn_stride, object_range, out_factor = self.fpn_strides[0], torch.Tensor([32, 64]), 2 # out_factor==2 means accept object range is [min/2, max*2] # # object_range[1] *= out_factor # # object_range[0] /= out_factor # # w**2=2/L * s**2(fpn_stride) in [32**2, 64**2], L in [2*(s/32)**2, 2*(s/64)**2], L*sf=[0.5, 2] # sf = object_range[0] / fpn_stride * object_range[1] / fpn_stride / 2 # 1/2 * (O1 * O2) / S**2=16, make 1/d2(log_q) to (0.5, 2) # factor = self.sigma * self.sigma * sf # 1/diff2(log_q) in (8, 32), log_q*16 make it in (0.5, 2) # # log_p = -torch.log(g + eps) * factor # log_q = -torch.log(q + eps) * factor # center_log_p, center_log_q = log_p[:, 1:-1, 1:-1, :], log_q[:, 1:-1, 1:-1, :] # # qx_diff1, qy_diff1 = (center_log_q - log_q[:, :-2, 1:-1, :]), (center_log_q - log_q[:, 1:-1, :-2, :]) # # px_diff1, py_diff1 = (center_log_p - log_p[:, :-2, 1:-1, :]), (center_log_p - log_p[:, 1:-1, :-2, :]) # left, right = lambda x: x[:, 1:-1, :-2, :], lambda x: x[:, 1:-1, 2:, :] # top, bottom = lambda x: x[:, :-2, 1:-1, :], lambda x: x[:, 2:, 1:-1, :] # qx_diff1 = center_log_q - left(log_q) # qy_diff1 = center_log_q - top(log_q) # px_diff1 = center_log_p - left(log_p) # py_diff1 = center_log_p - top(log_p) # qx_diff2 = left(log_q) + right(log_q) - 2 * center_log_q # qy_diff2 = top(log_q) + bottom(log_q) - 2 * center_log_q # px_diff2 = left(log_p) + right(log_p) - 2 * center_log_p # py_diff2 = top(log_p) + bottom(log_p) - 2 * center_log_p # # print('px_diff', px_diff1.max(), px_diff1[qx_diff1 > 0].mean()) # # print('qy_diff', qy_diff1.max(), qy_diff1[qy_diff1 > 0].mean()) # # valid_x = (q[:, :-2, 1:-1, :] > eps) & (q[:, 2:, 1:-1, :] > eps) # # valid_y = (q[:, 1:-1, :-2, :] > eps) & (q[:, 1:-1, 2:, :] > eps) # # # abs(dx) = s/8/2, (32, 64) -> t in (2, 4), (-tf/2, tf/2) # tf = (object_range[1] / fpn_stride) # dqx = -((qx_diff1+eps) / (qx_diff2+eps) + 0.5)[valid] / tf # dqy = -((qy_diff1+eps) / (qy_diff2+eps) + 0.5)[valid] / tf # dpx = -((px_diff1+eps) / (qx_diff2+eps) + 0.5)[valid] / tf # use qx_diff2, not px_diff2 to get smooth grad. # dpy = -((py_diff1+eps) / (qy_diff2+eps) + 0.5)[valid] / tf # x_loss = torch.log(1 + 3 * (dqx - dpx).clamp(-1, 1).abs()) # y_loss = torch.log(1 + 3 * (dqy - dpy).clamp(-1, 1).abs()) # xy_loss = (smooth_l1(x_loss, beta=0.25) + smooth_l1(y_loss, beta=0.25)) # # d2_range = 1./2/out_factor, 2 * out_factor # px_diff2 = px_diff2.clamp(*d2_range)[valid] # py_diff2 = py_diff2.clamp(*d2_range)[valid] # qx_diff2 = qx_diff2.clamp(*d2_range)[valid] # qy_diff2 = qy_diff2.clamp(*d2_range)[valid] # # gau_loss = (q[:, 1:-1, 1:-1, :] > 0).float() * smooth_l1(center_log_p - center_log_q) # wh_loss = (smooth_l1(c3 * torch.log(qx_diff2/px_diff2), beta=0.25) + # smooth_l1(c3 * torch.log(qy_diff2/py_diff2), beta=0.25)) # # # def ri(x): return round(x.item(), 3) # # print("neg_loss", ri(neg_loss.max()), ri(neg_loss.mean()), end=';') # # # # def ri(x): return round(x.item(), 3) if valid.sum() > 0 else 0 # # print('gau_loss', ri(gau_loss.max()), ri(gau_loss.mean()), end=";") # # print('wh_loss', ri(wh_loss.max()), ri(wh_loss.mean()), end=';') # # print('xy_loss', ri(xy_loss.max()), ri(xy_loss.mean()), ) # valid_q = q[:, 1:-1, 1:-1, :][valid] # gau_loss = q[:, 1:-1, 1:-1, :] * (c1*gau_loss) # wh_loss = valid_q * (c2*wh_loss) # xy_loss = valid_q * (c4*xy_loss) # return neg_loss.sum(), pos_loss.sum(), gau_neg_loss.sum() * 0, gau_loss.sum(), wh_loss.sum(), xy_loss.sum() # loss4, IOU neg_loss=(q<le>eps).float()<times>(1-alpha)<times>(-p<power>gamma<times>torch.log(1-p))<line_sep>pos_loss=(q<g>eps).float()<times>alpha<times>(-(1-p)<power>gamma<times>torch.log(p))<line_sep>g=g.permute((0 3 1 2))<line_sep>q=q.permute((0 3 1 2))<line_sep>valid=valid.permute((0 3 1 2))<line_sep>factor=self.sigma<times>self.sigma<line_sep>log_p=-torch.log(g+eps)<times>factor<line_sep>log_q=-torch.log(q+eps)<times>factor<line_sep>fpn_stride,object_range,out_factor=self.fpn_strides[0] torch.Tensor([32 64]) 2<line_sep>sf=1/((object_range[0]/fpn_stride<times>object_range[1]/fpn_stride)<power>0.5)<line_sep>iou_losses=0.<line_sep>l1_losses=0.<for_stmt>b range(len(valid))<block_start>idx=torch.nonzero(valid[b])<if_stmt>len(idx)<eq>0<block_start><continue><block_end>idx[: 1:]<augadd>1<line_sep>p_bboxes=self.iou_loss.cross_points_set_solve_3d(log_p[b] idx 1 1 step=1 solver=1)<line_sep>q_bboxes=self.iou_loss.cross_points_set_solve_3d(log_q[b] idx 1 1 step=1 solver=1)<line_sep>iou_loss,l1_loss=self.iou_loss(p_bboxes q_bboxes sf)<line_sep>valid_q=q[b : 1:-1 1:-1][valid[b]]<line_sep>iou_losses<augadd>(valid_q<times>iou_loss).sum()<line_sep>l1_losses<augadd>(valid_q<times>l1_loss).sum()<block_end><def_stmt>ri x<block_start><return>round(x.item() 3)<block_end>print("neg_loss" ri(neg_loss.max()) ri(neg_loss.mean()) end=';')<line_sep>print(iou_losses l1_losses)<line_sep><return>neg_loss.sum() pos_loss.sum() iou_losses<times>0 l1_losses<times>0<block_end><block_end><class_stmt>L2LossWithLogit(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(L2LossWithLogit self).__init__()<line_sep>self.mse=nn.MSELoss(reduction='sum')<block_end><def_stmt>forward self logits targets<block_start>p=torch.sigmoid(logits)<line_sep><return>self.mse(p targets)<block_end><block_end>
#encoding:utf-8 subreddit='HQDesi'<line_sep>t_channel='@r_HqDesi'<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission)<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>setuptools find_packages setup<with_stmt>open("README.md")<as>f<block_start>long_description=f.read()<block_end><with_stmt>open("requirements.txt" "r")<as>f<block_start>required=f.read().splitlines()<block_end>setup(name="alepython" description="Python Accumulated Local Effects (ALE) package." author="<NAME>" author_email="<EMAIL>" license="Apache 2" long_description=long_description long_description_content_type="text/markdown" url="https://github.com/MaximeJumelle/alepython/" install_requires=required extras_require={"test":["pytest>=5.4" "pytest-cov>=2.8"]} setup_requires=["setuptools-scm"] python_requires=">=3.5" use_scm_version=dict(write_to="src/alepython/_version.py") keywords="alepython" package_dir={"":"src"} packages=find_packages(where="src") include_package_data=<true> classifiers=["Programming Language :: Python :: 3" "License :: OSI Approched :: Apache 2" "Operating System :: OS Independent" ] )<line_sep>
# -*- coding: utf-8 - # # This file is part of tproxy released under the MIT license. # See the NOTICE for more information. version_info=(0 5 4)<line_sep>__version__=".".join(map(str version_info))<line_sep>
<import_from_stmt>plotly.graph_objs Histogram<line_sep>
# -*- coding: utf-8 -*- <import_stmt>pytest<import_stmt>six<def_stmt>test_success user<block_start>expected="User(email=None, firstName=None, id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)"# noqa <assert_stmt>expected<eq>repr(user)<block_end><def_stmt>test_allOf cat cat_spec cat_swagger_spec<block_start>expected="Cat(category=None, id=None, name=None, neutered=None, photoUrls=None, tags=None)"# noqa <assert_stmt>expected<eq>repr(cat)<block_end>@pytest.mark.skipif(six.PY3 reason="py2 has ascii default strings")<def_stmt>test_unicode_py2 user<block_start>user.firstName='Ümlaut'<line_sep>expected=r"User(email=None, firstName='\xc3\x9cmlaut', id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)"# noqa <assert_stmt>expected<eq>repr(user)<block_end>@pytest.mark.skipif(six.PY2 reason="py3 has unicode default strings")<def_stmt>test_unicode_py3 user<block_start>user.firstName='Ümlaut'<line_sep>expected="User(email=None, firstName='Ümlaut', id=None, lastName=None, password=<PASSWORD>, phone=None, userStatus=None, username=None)"# noqa <assert_stmt>expected<eq>repr(user)<block_end>
# flake8: noqa <import_from_stmt>. frame series<import_from_stmt>.frame *<import_from_stmt>.series *<line_sep>__all__=frame.__all__+series.__all__<line_sep>
# -*- coding: utf-8 -*- """ =============================================================================== The code for the class dlm =============================================================================== This is the main class of the Bayeisan dynamic linear model. It provides the modeling, filtering, forecasting and smoothing function of a dlm. The dlm use the @builder to construct the @baseModel based on user supplied @components and then run @kalmanFilter to filter the result. Example: >>> # randomly generate fake data on 1000 days >>> import numpy as np >>> data = np.random.random((1, 1000)) >>> # construct the dlm of a linear trend and a 7-day seasonality >>> from pydlm import dlm, trend, seasonality >>> myDlm = dlm(data) + trend(degree = 2, 0.98) + seasonality(period = 7, 0.98) >>> # filter the result >>> myDlm.fitForwardFilter() >>> # extract the filtered result >>> myDlm.getFilteredObs() """<line_sep># This is the major class for fitting time series data using the # dynamic linear model. dlm is a subclass of builder, with adding the # Kalman filter functionality for filtering the data <import_from_stmt>copy deepcopy<import_from_stmt>numpy matrix<import_from_stmt>pydlm.predict.dlmPredictMod dlmPredictModule<import_from_stmt>pydlm.access.dlmAccessMod dlmAccessModule<import_from_stmt>pydlm.tuner.dlmTuneMod dlmTuneModule<import_from_stmt>pydlm.plot.dlmPlotMod dlmPlotModule<class_stmt>dlm(dlmPlotModule dlmPredictModule dlmAccessModule dlmTuneModule)<block_start>""" The main class of the dynamic linear model. This is the main class of the Bayeisan dynamic linear model. It provides the modeling, filtering, forecasting and smoothing function of a dlm. The dlm use the @builder to construct the @baseModel based on user supplied @components and then run @kalmanFilter to filter the result. Example 1: >>> # randomly generate fake data on 1000 days >>> import numpy as np >>> data = np.random.random((1, 1000)) >>> # construct the dlm of a linear trend and a 7-day seasonality >>> myDlm = dlm(data) + trend(degree = 2, 0.98) + seasonality(period = 7, 0.98) >>> # filter the result >>> myDlm.fitForwardFilter() >>> # extract the filtered result >>> myDlm.getFilteredObs() Example 2 (fit a linear regression): >>> from pydlm import dynamic >>> data = np.random.random((1, 100)) >>> mydlm = dlm(data) + trend(degree=1, 0.98, name='a') + dynamic(features=[[i] for i in range(100)], 1, name='b') >>> mydlm.fit() >>> coef_a = mydlm.getLatentState('a') >>> coef_b = mydlm.getLatentState('b') Attributes: data: a list of doubles of the raw time series data. It could be either the python's built-in list of doubles or numpy 1d array. """<line_sep># define the basic members # initialize the result <def_stmt>__init__ self data **options<block_start>super(dlm self).__init__(data **options)<line_sep># This model is used for prediction. Prediction functions # will change the model status to forecast at a particular # date. Using a copied model will help the main model from # being changed and behaving abnormally. self._predictModel=<none><block_end><def_stmt>exportModel self<block_start>""" Export the dlm builder. Currently the method only support dlm without dynamic components. """<if_stmt>length(self.builder.dynamicComponents)<g>0<block_start><raise>ValueError('Cannot export dlm builder with dynamic components.')<block_end><if_stmt><not>self.initialized<block_start><raise>ValueError('Cannot export dlm before the model was initilized.')<block_end><return>deepcopy(self.builder)<block_end><def_stmt>buildFromModel self model<block_start>""" Construct the dlm with exported model from other DLM with status. Args: model: The exported model from other dlm. Must be the return from dlm.exportModel() """<line_sep>self._initializeFromBuilder(exported_builder=model)<block_end># ===================== modeling components ===================== # add component <def_stmt>add self component<block_start>""" Add new modeling component to the dlm. Currently support: trend, seasonality, autoregression and dynamic component. Args: component: the modeling component, could be either one of the following:\n trend, seasonality, dynamic, autoReg. Returns: A dlm object with added component. """<line_sep>self.__add__(component)<block_end><def_stmt>__add__ self component<block_start>self.builder.__add__(component)<line_sep>self.initialized=<false><line_sep><return>self<block_end># list all components <def_stmt>ls self<block_start>""" List out all existing components """<line_sep>self.builder.ls()<block_end># delete one component <def_stmt>delete self name<block_start>""" Delete model component by its name Args: name: the name of the component. """<line_sep>self.builder.delete(name)<line_sep>self.initialized=<false><block_end># ========================== model training component ======================= <def_stmt>fitForwardFilter self useRollingWindow=<false> windowLength=3<block_start>""" Fit forward filter on the available data. User can choose use rolling windowFront or not. If user choose not to use the rolling window, then the filtering will be based on all the previous data. If rolling window is used, then the filtering for a particular date will only consider previous dates that are within the rolling window length. Args: useRollingWindow: indicate whether rolling window should be used. windowLength: the length of the rolling window if used. """<line_sep># check if the feature size matches the data size self._checkFeatureSize()<line_sep># see if the model has been initialized <if_stmt><not>self.initialized<block_start>self._initialize()<block_end><if_stmt>self._printInfo<block_start>print('Starting forward filtering...')<block_end><if_stmt><not>useRollingWindow# we start from the last step of previous filtering <block_start><if_stmt>self.result.filteredType<eq>'non-rolling'<block_start>start=self.result.filteredSteps[1]+1<block_end><else_stmt><block_start>start=0<line_sep># because we refit the forward filter, we need to reset the # backward smoother as well. self.result.smoothedSteps=[0 -1]<block_end># determine whether renew should be used self._forwardFilter(start=start end=self.n-1 renew=self.options.stable)<line_sep>self.result.filteredType='non-rolling'<block_end><else_stmt><block_start><if_stmt>self.result.filteredType<eq>'rolling'<block_start>windowFront=self.result.filteredSteps[1]+1<block_end><else_stmt><block_start>windowFront=0<line_sep># because we refit the forward filter, we need to reset the # backward smoother as well. self.result.smoothedSteps=[0 -1]<block_end>self.result.filteredType='rolling'<line_sep># if end is still within (0, windowLength - 1), we should run the # usual ff from <if_stmt>windowFront<l>windowLength<block_start>self._forwardFilter(start=self.result.filteredSteps[1]+1 end=min(windowLength-1 self.n-1))<block_end># for the remaining date, we use a rolling window <for_stmt>today range(max(windowFront windowLength) self.n)<block_start>self._forwardFilter(start=today-windowLength+1 end=today save=today ForgetPrevious=<true>)<block_end><block_end>self.result.filteredSteps=[0 self.n-1]<line_sep>self.turnOn('filtered plot')<line_sep>self.turnOn('predict plot')<if_stmt>self._printInfo<block_start>print('Forward filtering completed.')<block_end><block_end><def_stmt>fitBackwardSmoother self backLength=<none><block_start>""" Fit backward smoothing on the data. Starting from the last observed date. Args: backLength: integer, indicating how many days the backward smoother should go, starting from the last date. """<line_sep># see if the model has been initialized <if_stmt><not>self.initialized<block_start><raise>NameError('Backward Smoother has to be run after'+' forward filter')<block_end><if_stmt>self.result.filteredSteps[1]<ne>self.n-1<block_start><raise>NameError('Forward Fiter needs to run on full data before'+'using backward Smoother')<block_end># default value for backLength <if_stmt>backLength<is><none><block_start>backLength=self.n<block_end><if_stmt>self._printInfo<block_start>print('Starting backward smoothing...')<block_end># if the smoothed dates has already been done, we do nothing <if_stmt>self.result.smoothedSteps[1]<eq>self.n-1<and>self.result.smoothedSteps[0]<le>self.n-1-backLength+1<block_start><return><none><block_end># if the smoothed dates start from n - 1, we just need to continue <elif_stmt>self.result.smoothedSteps[1]<eq>self.n-1<block_start>self._backwardSmoother(start=self.result.smoothedSteps[0]-1 days=backLength)<block_end># if the smoothed dates are even earlier, # we need to start from the beginning <elif_stmt>self.result.smoothedSteps[1]<l>self.n-1<block_start>self._backwardSmoother(start=self.n-1 days=backLength)<block_end>self.result.smoothedSteps=[self.n-backLength self.n-1]<line_sep>self.turnOn('smoothed plot')<if_stmt>self._printInfo<block_start>print('Backward smoothing completed.')<block_end><block_end><def_stmt>fit self<block_start>""" An easy caller for fitting both the forward filter and backward smoother. """<line_sep>self.fitForwardFilter()<line_sep>self.fitBackwardSmoother()<block_end># ======================= data appending, popping and altering =============== # Append new data or features to the dlm <def_stmt>append self data component='main'<block_start>""" Append the new data to the main data or the components (new feature data) Args: data: the new data component: the name of which the new data to be added to.\n 'main': the main time series data\n other omponent name: add new feature data to other component. """<line_sep># initialize the model to ease the modification <if_stmt><not>self.initialized<block_start>self._initialize()<block_end># if we are adding new data to the time series <if_stmt>component<eq>'main'# add the data to the self.data <block_start>self.data.extend(list(data))<line_sep># update the length self.n<augadd>len(data)<line_sep>self.result._appendResult(len(data))<line_sep># update the automatic components as well <for_stmt>component self.builder.automaticComponents<block_start>comp=self.builder.automaticComponents[component]<line_sep>comp.appendNewData(data)<block_end># give a warning to remind to append dynamic components <if_stmt>len(self.builder.dynamicComponents)<g>0<block_start>print('Remember to append the new features for the'+' dynamic components as well')<block_end><block_end># if we are adding new data to the features of dynamic components <elif_stmt>component<in>self.builder.dynamicComponents<block_start>comp=self.builder.dynamicComponents[component]<line_sep>comp.appendNewData(data)<block_end><else_stmt><block_start><raise>NameError('Such dynamic component does not exist.')<block_end><block_end># pop the data of a specific date out <def_stmt>popout self date<block_start>""" Pop out the data for a given date Args: date: the index indicates which date to be popped out. """<if_stmt>date<l>0<or>date<g>self.n-1<block_start><raise>NameError('The date should be between 0 and '+str(self.n-1))<block_end># initialize the model to ease the modification <if_stmt><not>self.initialized<block_start>self._initialize()<block_end># pop out the data at date self.data.pop(date)<line_sep>self.n<augsub>1<line_sep># pop out the feature at date <for_stmt>name self.builder.dynamicComponents<block_start>comp=self.builder.dynamicComponents[name]<line_sep>comp.popout(date)<block_end># pop out the results at date self.result._popout(date)<line_sep># update the filtered and the smoothed steps self.result.filteredSteps[1]=date-1<line_sep>self.result.smoothedSteps[1]=date-1<if_stmt>self.result.filteredSteps[0]<g>self.result.filteredSteps[1]<block_start>self.result.filteredSteps=[0 -1]<line_sep>self.result.smoothedSteps=[0 -1]<block_end><elif_stmt>self.result.smoothedSteps[0]<g>self.result.smoothedSteps[1]<block_start>self.result.smoothedSteps=[0 -1]<block_end><block_end># alter the data of a specific days <def_stmt>alter self date data component='main'<block_start>""" To alter the data for a specific date and a specific component. Args: date: the date of the altering data data: the new data. data must be a numeric value for main time series and must be a list of numerical values for dynamic components. component: the component for which the new data need to be supplied to.\n 'main': the main time series data\n other component name: other component feature data """<if_stmt>date<l>0<or>date<g>self.n-1<block_start><raise>NameError('The date should be between 0 and '+str(self.n-1))<block_end># initialize the model to ease the modification <if_stmt><not>self.initialized<block_start>self._initialize()<block_end># to alter the data for the observed chain <if_stmt>component<eq>'main'<block_start>self.data[date]=data<line_sep># we also automatically alter all the automatic components <for_stmt>component self.builder.automaticComponents<block_start>comp=self.builder.automaticComponents[component]<line_sep>comp.alter(date data)<block_end><block_end># to alter the feature of a component <elif_stmt>component<in>self.builder.dynamicComponents<block_start>comp=self.builder.dynamicComponents[component]<line_sep>comp.alter(date data)<block_end><else_stmt><block_start><raise>NameError('Such dynamic component does not exist.')<block_end># update the filtered and the smoothed steps self.result.filteredSteps[1]=date-1<line_sep>self.result.smoothedSteps[1]=date-1<if_stmt>self.result.filteredSteps[0]<g>self.result.filteredSteps[1]<block_start>self.result.filteredSteps=[0 -1]<line_sep>self.result.smoothedSteps=[0 -1]<block_end><elif_stmt>self.result.smoothedSteps[0]<g>self.result.smoothedSteps[1]<block_start>self.result.smoothedSteps=[0 -1]<block_end><block_end># ignore the data of a given date <def_stmt>ignore self date<block_start>""" Ignore the data for a specific day. treat it as missing data Args: date: the date to ignore. """<if_stmt>date<l>0<or>date<g>self.n-1<block_start><raise>NameError('The date should be between 0 and '+str(self.n-1))<block_end>self.alter(date=date data=<none> component='main')<block_end># ================================ control options ========================= <def_stmt>showOptions self<block_start>""" Print out all the option values """<line_sep>allItems=vars(self.options)<for_stmt>item allItems<block_start>print(item+': '+str(allItems[item]))<block_end><block_end><def_stmt>stableMode self use=<true><block_start>""" Turn on the stable mode, i.e., using the renewal strategy. Indicate whether the renew strategy should be used to add numerical stability. When the filter goes over certain steps, the information contribution of the previous data has decayed to minimum. In the stable mode, We then ignore those days and refit the time series starting from current - renewTerm, where renewTerm is computed according to the discount. Thus, the effective sample size of the dlm is twice renewTerm. When discount = 1, there will be no renewTerm, since all the information will be passed along. """<line_sep># if option changes, reset everything <if_stmt>self.options.stable<ne>use<block_start>self.initialized=<false><block_end><if_stmt>use<is><true><block_start>self.options.stable=<true><block_end><elif_stmt>use<is><false><block_start>self.options.stable=<false><block_end><else_stmt><block_start><raise>NameError('Incorrect option input')<block_end><block_end><def_stmt>evolveMode self evoType='dependent'<block_start>""" Control whether different component evolve indpendently. If true, then the innovation will only be added on each component but not the correlation between the components, so that for component with discount equals to 1, the smoothed results will always be constant. Args: evoType: If set to 'independent', then each component will evolve independently. If set to 'dependent', then the components will proceed jointly. Default to 'independent'. Switch to 'dependent' if efficiency is a concern. Returns: a dlm object (for chaining purpose) """<line_sep># if option changes, reset everything <if_stmt>(self.options.innovationType<eq>'whole'<and>evoType<eq>'independent')<or>(self.options.innovationType<eq>'component'<and>evoType<eq>'dependent')<block_start>self.initialized=<false><block_end><if_stmt>evoType<eq>'independent'<block_start>self.options.innovationType='component'<block_end><elif_stmt>evoType<eq>'dependent'<block_start>self.options.innovationType='whole'<block_end><else_stmt><block_start><raise>NameError('Incorrect option input')<block_end># for chaining <return>self<block_end><def_stmt>noisePrior self prior=0<block_start>""" To set the prior for the observational noise. Calling with empty argument will enable the auto noise intializer (currently, the min of 1 and the variance of time series). Args: prior: the prior of the observational noise. Returns: A dlm object (for chaining purpose) """<if_stmt>prior<g>0<block_start>self.options.noise=prior<line_sep>self.initialized=<false><block_end><else_stmt><block_start>self.options.useAutoNoise=<true><line_sep>self.initialized=<false><block_end># for chaining <return>self<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># producer for alcaiterativephisym (HCAL Iterative Phi Symmetry) <import_stmt>Calibration.HcalAlCaRecoProducers.alcaEcalHcalReadoutsProducer_cfi<line_sep>IterativePhiSymProd=Calibration.HcalAlCaRecoProducers.alcaEcalHcalReadoutsProducer_cfi.alcaEcalHcalReadoutsProducer.clone()<line_sep>
<import_stmt>pathlib<import_stmt>typing<import_stmt>urllib.parse<line_sep>_PLUGIN_DIR=pathlib.Path(__file__).parent<line_sep>PLUGIN_DIR=str(_PLUGIN_DIR)<line_sep>CONFIGS_DIR=str(_PLUGIN_DIR.joinpath('configs'))<line_sep>SCRIPTS_DIR=str(_PLUGIN_DIR.joinpath('scripts'))<def_stmt>scan_sql_directory root:str<arrow>typing.List[pathlib.Path]<block_start><return>[path<for>path sorted(pathlib.Path(root).iterdir())<if>path.is_file()<and>path.suffix<eq>'.sql']<block_end><def_stmt>connstr_replace_dbname connstr:str dbname:str<arrow>str<block_start>"""Replace dbname in existing connection string."""<if_stmt>connstr.endswith(' dbname=')<block_start><return>connstr+dbname<block_end><if_stmt>connstr.startswith('postgresql://')<block_start>url=urllib.parse.urlparse(connstr)<line_sep>url=url._replace(path=dbname)# pylint: disable=protected-access <return>url.geturl()<block_end><raise>RuntimeError(f'Unsupported PostgreSQL connection string format {connstr!r}' )<block_end>
<import_stmt>json<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>ios.ir Graph<import_from_stmt>ios.visualizer draw draw_block<import_from_stmt>ios.cost_model IOSCostModel<line_sep>argparser=argparse.ArgumentParser()<line_sep>argparser.add_argument('--edir' type=str required=<true>)<line_sep>argparser.add_argument('--ename' type=str required=<true>)<line_sep>argparser.add_argument('--device' type=str required=<true> choices=['k80' 'v100'])<line_sep>argparser.add_argument('--graph' type=str required=<true>)<line_sep>argparser.add_argument('--bs' type=int required=<true>)<line_sep>argparser.add_argument('--warmup' type=int required=<false> default=2)<line_sep>argparser.add_argument('--number' type=int required=<false> default=6)<line_sep>argparser.add_argument('--repeat' type=int required=<false> default=6)<line_sep>args=argparser.parse_args()<line_sep>expr_dir=f'./outputs/{args.edir}/{args.ename}-{args.device}-g{args.graph}-bs{args.bs}-{args.warmup}-{args.number}-{args.repeat}'<line_sep>#os.makedirs("./outputs", exist_ok=True) #os.makedirs(f"./outputs/{args.ename}", exist_ok=True) os.makedirs(expr_dir exist_ok=<true>)<def_stmt>summary_str latency<block_start><if_stmt>args.edir<eq>'batchsize'<block_start>g,e=args.ename.split('_')<line_sep>g=g[3:]<line_sep>e=e[3:]<line_sep><return>f'Optimized for BS {g:<3} Execute with BS {e:<3} Latency: {latency:.2f} ms'<block_end><elif_stmt>args.edir<eq>'device'<block_start>g,e=args.ename.split('_on_')<line_sep><return>f'Optimized for {g:<4} Execute with {e:<4} Latency: {latency:.2f} ms'<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end><def_stmt>main <block_start>logs={}<with_stmt>open(f'schedules/{args.graph}.json' 'r')<as>f<block_start>graph=Graph.from_config(json.load(f))<block_end>cost_model=IOSCostModel()<line_sep>name=graph.name<line_sep>graph_latency=cost_model.get_graph_latency(graph args.bs warmup=args.warmup number=args.number repeat=args.repeat)<line_sep>block_latency=[np.mean(cost_model.get_block_latency(block args.bs args.warmup args.number args.repeat))<for>block graph.blocks]<line_sep>logs[name]={}<line_sep>logs[name]['latency']=graph_latency<line_sep>logs[name]['mean']=float(np.mean(graph_latency))<line_sep>logs[name]['std']=float(np.std(graph_latency))<line_sep>logs[name]['block_latency']=block_latency<line_sep>summary=summary_str(np.mean(graph_latency))<line_sep>print(summary)<for_stmt>bindex,block enumerate(graph.blocks)<block_start>block_dir=f'{expr_dir}/{name}_blocks'<line_sep>os.makedirs(block_dir exist_ok=<true>)<line_sep>draw_block(block f'{block_dir}/{bindex}.png' f'{name} block {bindex}, latency {block_latency[bindex]:.3f}')<block_end>draw(graph f"{expr_dir}/{name}.png" label=f'{name}, latency {float(np.mean(graph_latency)):.3f}')<with_stmt>open(f"{expr_dir}/{name}.json" "w")<as>f<block_start>json.dump(graph.export_config() f indent=2)<block_end><with_stmt>open(f'{expr_dir}/latency.json' 'w')<as>f<block_start>json.dump(logs f indent=2)<block_end><with_stmt>open(f'{expr_dir}/summary.txt' 'w')<as>f<block_start>f.write(summary+"\n")<block_end><with_stmt>open(f'{expr_dir}/arguments.txt' 'w')<as>f<block_start>json.dump(args.__dict__ f indent=2)<block_end><block_end>main()<line_sep>
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this <import_from_stmt>OpenGL.raw.GL _types<as>_cs<line_sep># End users want this... <import_from_stmt>OpenGL.raw.GL._types *<import_from_stmt>OpenGL.raw.GL _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GL_VERSION_GL_4_4'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GL 'GL_VERSION_GL_4_4' error_checker=_errors._error_checker)<block_end>GL_BUFFER_IMMUTABLE_STORAGE=_C('GL_BUFFER_IMMUTABLE_STORAGE' 0x821F)<line_sep>GL_BUFFER_STORAGE_FLAGS=_C('GL_BUFFER_STORAGE_FLAGS' 0x8220)<line_sep>GL_CLEAR_TEXTURE=_C('GL_CLEAR_TEXTURE' 0x9365)<line_sep>GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT=_C('GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT' 0x00004000)<line_sep>GL_CLIENT_STORAGE_BIT=_C('GL_CLIENT_STORAGE_BIT' 0x0200)<line_sep>GL_DYNAMIC_STORAGE_BIT=_C('GL_DYNAMIC_STORAGE_BIT' 0x0100)<line_sep>GL_LOCATION_COMPONENT=_C('GL_LOCATION_COMPONENT' 0x934A)<line_sep>GL_MAP_COHERENT_BIT=_C('GL_MAP_COHERENT_BIT' 0x0080)<line_sep>GL_MAP_PERSISTENT_BIT=_C('GL_MAP_PERSISTENT_BIT' 0x0040)<line_sep>GL_MAP_READ_BIT=_C('GL_MAP_READ_BIT' 0x0001)<line_sep>GL_MAP_WRITE_BIT=_C('GL_MAP_WRITE_BIT' 0x0002)<line_sep>GL_MAX_VERTEX_ATTRIB_STRIDE=_C('GL_MAX_VERTEX_ATTRIB_STRIDE' 0x82E5)<line_sep>GL_MIRROR_CLAMP_TO_EDGE=_C('GL_MIRROR_CLAMP_TO_EDGE' 0x8743)<line_sep>GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED=_C('GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED' 0x8221)<line_sep>GL_QUERY_BUFFER=_C('GL_QUERY_BUFFER' 0x9192)<line_sep>GL_QUERY_BUFFER_BARRIER_BIT=_C('GL_QUERY_BUFFER_BARRIER_BIT' 0x00008000)<line_sep>GL_QUERY_BUFFER_BINDING=_C('GL_QUERY_BUFFER_BINDING' 0x9193)<line_sep>GL_QUERY_RESULT_NO_WAIT=_C('GL_QUERY_RESULT_NO_WAIT' 0x9194)<line_sep>GL_STENCIL_INDEX=_C('GL_STENCIL_INDEX' 0x1901)<line_sep>GL_STENCIL_INDEX8=_C('GL_STENCIL_INDEX8' 0x8D48)<line_sep>GL_TEXTURE_BUFFER_BINDING=_C('GL_TEXTURE_BUFFER_BINDING' 0x8C2A)<line_sep>GL_TRANSFORM_FEEDBACK_BUFFER=_C('GL_TRANSFORM_FEEDBACK_BUFFER' 0x8C8E)<line_sep>GL_TRANSFORM_FEEDBACK_BUFFER_INDEX=_C('GL_TRANSFORM_FEEDBACK_BUFFER_INDEX' 0x934B)<line_sep>GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE=_C('GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE' 0x934C)<line_sep>GL_UNSIGNED_INT_10F_11F_11F_REV=_C('GL_UNSIGNED_INT_10F_11F_11F_REV' 0x8C3B)<line_sep>@_f@_p.types(<none> _cs.GLenum _cs.GLuint _cs.GLsizei arrays.GLuintArray)<def_stmt>glBindBuffersBase target first count buffers<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLenum _cs.GLuint _cs.GLsizei arrays.GLuintArray ctypes.POINTER(_cs.GLintptr) ctypes.POINTER(_cs.GLsizeiptr))<def_stmt>glBindBuffersRange target first count buffers offsets sizes<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLsizei arrays.GLuintArray)<def_stmt>glBindImageTextures first count textures<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLsizei arrays.GLuintArray)<def_stmt>glBindSamplers first count samplers<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLsizei arrays.GLuintArray)<def_stmt>glBindTextures first count textures<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLsizei arrays.GLuintArray ctypes.POINTER(_cs.GLintptr) arrays.GLsizeiArray)<def_stmt>glBindVertexBuffers first count buffers offsets strides<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLenum _cs.GLsizeiptr ctypes.c_void_p _cs.GLbitfield)<def_stmt>glBufferStorage target size data flags<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLint _cs.GLenum _cs.GLenum ctypes.c_void_p)<def_stmt>glClearTexImage texture level format type data<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLuint _cs.GLint _cs.GLint _cs.GLint _cs.GLint _cs.GLsizei _cs.GLsizei _cs.GLsizei _cs.GLenum _cs.GLenum ctypes.c_void_p)<def_stmt>glClearTexSubImage texture level xoffset yoffset zoffset width height depth format type data<block_start><pass><block_end>
<import_stmt>os<import_from_stmt>tick.array.serialize tick_double_sparse2d_from_file tick_double_array_from_file<import_from_stmt>tick.linear_model.model_logreg ModelLogReg<import_from_stmt>tick.prox.prox_elasticnet ProxElasticNet<import_from_stmt>tick.solver.saga SAGA<line_sep># Create this dataset with benchmark_util dirpath=os.path.dirname(__file__)<line_sep>features_path=os.path.join(dirpath "data" "url.3.features.cereal")<line_sep>labels_path=os.path.join(dirpath "data" "url.3.labels.cereal")<line_sep>N_ITER=200<line_sep>n_samples=196000<line_sep>ALPHA=1./n_samples<line_sep>BETA=1e-10<line_sep>STRENGTH=ALPHA+BETA<line_sep>RATIO=BETA/STRENGTH<line_sep>THREADS=8<line_sep>features=tick_double_sparse2d_from_file(features_path)<line_sep>labels=tick_double_array_from_file(labels_path)<line_sep>model=ModelLogReg().fit(features labels)<line_sep>prox=ProxElasticNet(STRENGTH RATIO)<line_sep>saga=SAGA(max_iter=N_ITER tol=0 rand_type="unif" step=0.00257480411965 n_threads=THREADS verbose=<false> record_every=20 )<line_sep>saga.history.print_order<augadd>['time']<line_sep>saga.set_model(model).set_prox(prox)<line_sep>saga.solve()<line_sep>saga.print_history()<line_sep>
print("Hola, el mundo!")<line_sep>
<def_stmt>test <block_start><assert_stmt>(len(TRAINING_DATA)<eq>3) "Irgendetwas scheint mit deinen Daten nicht zu stimmen. Erwartet werden 3 Beispiele."<assert_stmt>all(len(entry)<eq>2<and>isinstance(entry[1] dict)<for>entry TRAINING_DATA) "Die Trainingsdaten haben nicht das richtige Format. Erwartet wird eine Liste von Tuples, bestehend aus Text und einem Dictionary als zweites Element."<line_sep>ents=[entry[1].get("entities" [])<for>entry TRAINING_DATA]<assert_stmt>len(ents[0])<eq>2 "Das erste Beispiel sollte zwei Entitäten enhalten."<line_sep>ent_0_0=(0 6 "WEBSITE")<line_sep>ent_0_1=(11 18 "WEBSITE")<assert_stmt>(ents[0][0]<eq>ent_0_0) "Überprüfe nochmal die erste Entität im ersten Beispiel."<assert_stmt>(ents[0][1]<eq>ent_0_1) "Überprüfe nochmal die zweite Entität im ersten Beispiel."<assert_stmt>len(ents[1])<eq>1 "Das zweite Beispiel sollte eine Entität enthalten."<assert_stmt>ents[1]<eq>[(28 35 "WEBSITE" )] "Überprüfe nochmal die Entität im zweiten Beispiel."<assert_stmt>len(ents[2])<eq>1 "Das dritte Beispiel sollte eine Entität enthalten."<assert_stmt>ents[2]<eq>[(15 21 "WEBSITE" )] "Überprüfe nochmal die Entität im dritten Beispiel."<line_sep>__msg__.good("Sehr schön!")<block_end>
#************************************************************************** #* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. * #* * #* Author: The ALICE Off-line Project. * #* Contributors are mentioned in the code where appropriate. * #* * #* Permission to use, copy, modify and distribute this software and its * #* documentation strictly for non-commercial purposes is hereby granted * #* without fee, provided that the above copyright notice appears in all * #* copies and that both the copyright notice and this permission notice * #* appear in the supporting documentation. The authors make no claims * #* about the suitability of this software for any purpose. It is * #* provided "as is" without express or implied warranty. * #************************************************************************** """ Comparison plot of trigger efficiencies in MC in different pt-hat bins including underlying data structure @author: <NAME> """<import_from_stmt>PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics SinglePanelPlot GraphicsObject Frame Style<import_from_stmt>PWG.PWGJE.EMCALJetTasks.Tracks.analysis.base.ComparisonData ComparisonData ComparisonObject ComparisonPlot<import_from_stmt>ROOT TFile kBlack<class_stmt>TriggerEfficiencyClassPtHat(ComparisonObject)<block_start><def_stmt>__init__ self pthatbin triggerdata style<block_start>ComparisonObject.__init__(self triggerdata style)<line_sep>self.__pthatbin=pthatbin<block_end><def_stmt>GetLegendTitle self<block_start><return>"p_{t}-hat bin %d"%(self.__pthatbin)<block_end><def_stmt>GetObjectName self<block_start><return>"pthat%d"%(self.__pthatbin)<block_end><block_end><class_stmt>TriggerEfficiencyClassTriggerType(ComparisonObject)<block_start><def_stmt>__init__ self triggername triggerdata style<block_start>ComparisonObject.__init__(self triggerdata style)<line_sep>self.__triggername=triggername<block_end><def_stmt>GetLegendTitle self<block_start><return>self.__triggername<block_end><def_stmt>GetObjectName self<block_start><return>self.__triggername<block_end><block_end><class_stmt>TriggerEfficiencyContainer(ComparisonData)<block_start>""" Underlying data structure for the comparison plot """<def_stmt>__init__ self<block_start>""" Initialise container """<line_sep>ComparisonData.__init__(self)<block_end><def_stmt>AddEfficiency self trclasstype key efficiencyCurve style<block_start>""" Add new trigger ifno """<line_sep>triggerdata=<none><if_stmt>trclasstype<eq>"pthat"<block_start>triggerdata=TriggerEfficiencyClassPtHat(key efficiencyCurve style)<block_end><elif_stmt>trclasstype<eq>"triggertype"<block_start>triggerdata=TriggerEfficiencyClassTriggerType(key efficiencyCurve style)<block_end>self.AddEntry(triggerdata)<block_end><block_end><class_stmt>TriggerEfficiencyFrame(Frame)<block_start>""" Frame class for trigger efficiency plots """<def_stmt>__init__ self name<block_start>""" Constructor """<line_sep>Frame.__init__(self name 0. 100. 0. 1.)<line_sep>self.SetXtitle("p_{t} (GeV/c)")<line_sep>self.SetYtitle("Trigger efficiency")<block_end><block_end><class_stmt>TriggerEfficiencyPlotMC(ComparisonPlot)<block_start>""" Comparison plot of trigger efficiencies in different pt-hat bins """<def_stmt>__init__ self<block_start>""" Constructor """<line_sep>ComparisonPlot.__init__(self)<line_sep>self._comparisonContainer=TriggerEfficiencyContainer()<line_sep>self.SetFrame(TriggerEfficiencyFrame("tframe"))<line_sep>self.SetLegendAttributes(0.65 0.15 0.89 0.5)<line_sep>self.__triggername=""<block_end><def_stmt>SetTriggerName self trname<block_start>""" Set triggername for the label """<line_sep>self.__triggername=trname<block_end><def_stmt>AddEfficiency self pthatbin efficiency style<block_start>""" Add new efficiency container to the data structure """<line_sep>self._comparisonContainer.AddEfficiency("pthat" pthatbin efficiency style)<block_end><def_stmt>Create self<block_start>""" Create the plot """<line_sep>self._Create("triggerEfficiencyMC" "MC trigger efficiency plot")<if_stmt>len(self.__triggername)<block_start>pad=self._GetFramedPad()<line_sep>pad.DrawLabel(0.15 0.8 0.5 0.85 self.__triggername)<block_end><block_end><block_end><class_stmt>TriggerEfficiencyPlotClasses(ComparisonPlot)<block_start>""" Plot comparing the trigger efficiency of different trigger types """<def_stmt>__init__ self<block_start>""" Constructor """<line_sep>ComparisonPlot.__init__(self)<line_sep>self._comparisonContainer=TriggerEfficiencyContainer()<line_sep>self.SetFrame(TriggerEfficiencyFrame("tframe"))<line_sep>self.SetLegendAttributes(0.65 0.15 0.89 0.5)<block_end><def_stmt>AddTriggerEfficiency self triggername efficiency style<block_start>""" Add trigger class to the comparison data """<line_sep>self._comparisonContainer.AddEfficiency("triggertype" triggername efficiency style)<block_end><def_stmt>Create self<block_start>self._Create("triggerclasses" "Trigger efficiencies")<block_end><block_end><class_stmt>TriggerEfficiencySumPlot(SinglePanelPlot)<block_start>""" Plot the summed trigger efficiency from different pt-hard bins """<def_stmt>__init__ self triggername triggerefficiency<block_start>""" Constructor """<line_sep>SinglePanelPlot.__init__(self)<line_sep>self.__triggername=triggername<line_sep>self.__triggereff=triggerefficiency<block_end><def_stmt>Create self<block_start>""" Create the plot """<line_sep>self._OpenCanvas("trgEffSumm" "Summed trigger efficiency")<line_sep>pad=self._GetFramedPad()<line_sep>pad.DrawFrame(TriggerEfficiencyFrame("tframe"))<line_sep>pad.DrawGraphicsObject(GraphicsObject(self.__triggereff.GetEfficiencyCurve() Style(kBlack 20)) <false> "Trigger Eff")<line_sep>pad.DrawLabel(0.5 0.2 0.89 0.25 "Trigger: %s"%(self.__triggername))<block_end><block_end>
########################################################################### # # # physical_validation, # # a python package to test the physical validity of MD results # # # # Written by <NAME> <<EMAIL>> # # <NAME> <<EMAIL>> # # # # Copyright (C) 2012 University of Virginia # # (C) 2017 University of Colorado Boulder # # # # This library is free software; you can redistribute it and/or # # modify it under the terms of the GNU Lesser General Public # # License as published by the Free Software Foundation; either # # version 2.1 of the License, or (at your option) any later version. # # # # This library is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # Lesser General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with this library; if not, write to the # # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # # Boston, MA 02110-1301 USA # # # ########################################################################### r""" gromacs_parser.py """<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>. parser<line_sep># py2.7 compatibility <import_from_stmt>.simulation_data SimulationData<import_from_stmt>.unit_data UnitData<import_from_stmt>.ensemble_data EnsembleData<import_from_stmt>.system_data SystemData<import_from_stmt>.observable_data ObservableData<import_from_stmt>.trajectory_data TrajectoryData<line_sep># replace lines above by this when py2.7 support is dropped: # from . import SimulationData, UnitData, EnsembleData, SystemData, ObservableData, TrajectoryData <import_from_stmt>..util.gromacs_interface GromacsInterface<import_from_stmt>..util error<as>pv_error<class_stmt>GromacsParser(parser.Parser)<block_start>""" GromacsParser """<line_sep>@staticmethod<def_stmt>units # Gromacs uses kJ/mol <block_start><return>UnitData(kb=8.314462435405199e-3 energy_str='kJ/mol' energy_conversion=1.0 length_str='nm' length_conversion=1.0 volume_str='nm^3' volume_conversion=1.0 temperature_str='K' temperature_conversion=1.0 pressure_str='bar' pressure_conversion=1.0 time_str='ps' time_conversion=1.0)<block_end><def_stmt>__init__ self exe=<none> includepath=<none><block_start>r""" Create a GromacsParser object Parameters ---------- exe: str, optional Path to a gmx executable (or simply the executable name, if it is in the path) Default: Looks for `gmx`, then for `gmx_d` in the path. If neither is found, `exe` is set to None, and any parsing including simulation trajectories (`edr`, `trr` and `gro` arguments in `get_simulation_data()`) will fail. includepath: str or List[str], optional Path or list of paths to location(s) of topology file. Is used for the lookup of `#include` statements in topologies. Default: None - no additional topology location. Lookup will be restricted to current directory and location of the `top` file given to `get_simulation_data()`, plus any include locations added to the `mdp` file. """<line_sep>super(GromacsParser self).__init__()<line_sep>self.__interface=GromacsInterface(exe=exe includepath=includepath)<line_sep># gmx energy codes self.__gmx_energy_names={'kinetic_energy':'Kinetic-En.' 'potential_energy':'Potential' 'total_energy':'Total-Energy' 'volume':'Volume' 'pressure':'Pressure' 'temperature':'Temperature' 'constant_of_motion':'Conserved-En.'}<block_end><def_stmt>get_simulation_data self mdp=<none> top=<none> edr=<none> trr=<none> gro=<none><block_start>r""" Parameters ---------- mdp: str, optional A string pointing to a .mdp file top: str, optional A string pointing to a .top file edr: str, optional A string pointing to a .edr file trr: str, optional A string pointing to a .trr file gro: str, optional A string pointing to a .gro file (Note: if also trr is given, gro is ignored) Returns ------- result: SimulationData A SimulationData filled with the results of the simulation as described by the provided GROMACS files. """<line_sep>result=SimulationData()<line_sep>result.units=self.units()<line_sep># trajectories (might be used later for the box...) trajectory_dict=<none><if_stmt>trr<is><not><none><block_start><if_stmt>gro<is><not><none><block_start>warnings.warn('`trr` and `gro` given. Ignoring `gro`.')<block_end>trajectory_dict=self.__interface.read_trr(trr)<line_sep>result.trajectory=TrajectoryData(trajectory_dict['position'] trajectory_dict['velocity'])<block_end><elif_stmt>gro<is><not><none><block_start>trajectory_dict=self.__interface.read_gro(gro)<line_sep>result.trajectory=TrajectoryData(trajectory_dict['position'] trajectory_dict['velocity'])<block_end># simulation parameters & system <if_stmt>mdp<is><not><none><and>top<is><not><none><block_start>mdp_options=self.__interface.read_mdp(mdp)<line_sep>define=<none><line_sep>include=<none><if_stmt>'define'<in>mdp_options<block_start>define=mdp_options['define']<block_end><if_stmt>'include'<in>mdp_options<block_start>include=mdp_options['include']<block_end>molecules=self.__interface.read_system_from_top(top define=define include=include)<if_stmt>'dt'<in>mdp_options<block_start>result.dt=float(mdp_options['dt'])<block_end>natoms=0<line_sep>mass=[]<line_sep>constraints_per_molec=[]<line_sep>angles=('constraints'<in>mdp_options<and>mdp_options['constraints']<eq>'all-angles')<line_sep>angles_h=(angles<or>'constraints'<in>mdp_options<and>mdp_options['constraints']<eq>'h-angles')<line_sep>bonds=(angles_h<or>'constraints'<in>mdp_options<and>mdp_options['constraints']<eq>'all-bonds')<line_sep>bonds_h=(bonds<or>'constraints'<in>mdp_options<and>mdp_options['constraints']<eq>'h-bonds')<line_sep>molecule_idx=[]<line_sep>next_molec=0<line_sep>molec_bonds=[]<line_sep>molec_bonds_constrained=[]<for_stmt>molecule molecules<block_start>natoms<augadd>molecule['nmolecs']<times>molecule['natoms']<for_stmt>n range(0 molecule['nmolecs'])<block_start>molecule_idx.append(next_molec)<line_sep>next_molec<augadd>molecule['natoms']<block_end>mass.extend(molecule['mass']<times>molecule['nmolecs'])<line_sep>constraints=0<line_sep>constrained_bonds=[]<line_sep>all_bonds=molecule['bonds']+molecule['bondsh']<if_stmt>molecule['settles']<block_start>constraints=3<line_sep>constrained_bonds=all_bonds<block_end><else_stmt><block_start><if_stmt>bonds<block_start>constraints<augadd>molecule['nbonds'][0]<line_sep>constrained_bonds.extend(molecule['bonds'])<block_end><if_stmt>bonds_h<block_start>constraints<augadd>molecule['nbonds'][1]<line_sep>constrained_bonds.extend(molecule['bondsh'])<block_end><if_stmt>angles<block_start>constraints<augadd>molecule['nangles'][0]<block_end><if_stmt>angles_h<block_start>constraints<augadd>molecule['nangles'][1]<block_end><block_end>constraints_per_molec.extend([constraints]<times>molecule['nmolecs'])<line_sep>molec_bonds.extend([all_bonds]<times>molecule['nmolecs'])<line_sep>molec_bonds_constrained.extend([constrained_bonds]<times>molecule['nmolecs'])<block_end>system=SystemData()<line_sep>system.natoms=natoms<line_sep>system.mass=mass<line_sep>system.molecule_idx=molecule_idx<line_sep>system.nconstraints=np.sum(constraints_per_molec)<line_sep>system.nconstraints_per_molecule=constraints_per_molec<line_sep>system.ndof_reduction_tra=3<line_sep>system.ndof_reduction_rot=0<if_stmt>'comm-mode'<in>mdp_options<block_start><if_stmt>mdp_options['comm-mode']<eq>'linear'<block_start>system.ndof_reduction_tra=3<block_end><elif_stmt>mdp_options['comm-mode']<eq>'angular'<block_start>system.ndof_reduction_tra=3<line_sep>system.ndof_reduction_rot=3<block_end><if_stmt>mdp_options['comm-mode']<eq>'none'<block_start>system.ndof_reduction_tra=0<block_end><block_end>system.bonds=molec_bonds<line_sep>system.constrained_bonds=molec_bonds_constrained<line_sep>result.system=system<line_sep>thermostat=('tcoupl'<in>mdp_options<and>mdp_options['tcoupl']<and>mdp_options['tcoupl']<ne>'no')<line_sep>stochastic_dyn=('integrator'<in>mdp_options<and>mdp_options['integrator']<in>['sd' 'sd2' 'bd'])<line_sep>constant_temp=thermostat<or>stochastic_dyn<line_sep>temperature=<none><if_stmt>constant_temp<block_start>ref_t=[float(t)<for>t mdp_options['ref-t'].split()]<if_stmt>len(ref_t)<eq>1<or>np.allclose(ref_t [ref_t[0]]<times>len(ref_t))<block_start>temperature=ref_t[0]<block_end><else_stmt><block_start><raise>pv_error.InputError('mdp' 'Ensemble definition ambiguous: Different t-ref values found.')<block_end><block_end>constant_press=('pcoupl'<in>mdp_options<and>mdp_options['pcoupl']<and>mdp_options['pcoupl']<ne>'no')<line_sep>volume=<none><line_sep>pressure=<none><if_stmt>constant_press<block_start>ref_p=[float(p)<for>p mdp_options['ref-p'].split()]<if_stmt>len(ref_p)<eq>1<or>np.allclose(ref_p [ref_p[0]]<times>len(ref_p))<block_start>pressure=ref_p[0]<block_end><else_stmt><block_start><raise>pv_error.InputError('mdp' 'Ensemble definition ambiguous: Different p-ref values found.')<block_end><block_end><else_stmt><block_start><if_stmt>trajectory_dict<is><not><none><block_start>box=trajectory_dict['box'][0]<line_sep># Different box shapes? volume=box[0]<times>box[1]<times>box[2]<block_end><else_stmt><block_start>warnings.warn('Constant volume simulation with undefined volume.')<block_end><block_end><if_stmt>constant_temp<and>constant_press<block_start>ens='NPT'<block_end><elif_stmt>constant_temp<block_start>ens='NVT'<block_end><else_stmt><block_start>ens='NVE'<block_end><if_stmt>ens<eq>'NVE'<block_start>self.__gmx_energy_names['constant_of_motion']='Total-Energy'<block_end><else_stmt><block_start>self.__gmx_energy_names['constant_of_motion']='Conserved-En.'<block_end>result.ensemble=EnsembleData(ens natoms=natoms volume=volume pressure=pressure temperature=temperature)<block_end><if_stmt>edr<is><not><none><block_start>observable_dict=self.__interface.get_quantities(edr self.__gmx_energy_names.values() args=['-dp'])<line_sep># constant volume simulations don't write out the volume in .edr file <if_stmt>(observable_dict['Volume']<is><none><and>result.ensemble<is><not><none><and>result.ensemble.volume<is><not><none>)<block_start>nframes=observable_dict['Pressure'].size<line_sep>observable_dict['Volume']=np.ones(nframes)<times>result.ensemble.volume<block_end>result.observables=ObservableData()<for_stmt>key,gmxkey self.__gmx_energy_names.items()<block_start>result.observables[key]=observable_dict[gmxkey]<block_end><block_end><return>result<block_end><block_end>
<import_stmt>logging<import_stmt>sys<import_stmt>time<import_stmt>boto3<import_stmt>click<line_sep>logger=logging.getLogger(name=__name__)<line_sep>logger.setLevel(logging.INFO)<line_sep>handler=logging.StreamHandler(sys.stderr)<line_sep>logger.addHandler(handler)<line_sep>yaml_template=''' {}: '64': {} '''.strip('\r\n')<def_stmt>copy_to_region image src_region dest_region<block_start>session=boto3.session.Session(region_name=dest_region)<line_sep>local_client=session.client('ec2')<line_sep>logger.info("creating image in region {}".format(dest_region))<line_sep>resp=local_client.copy_image(Name=image.name SourceImageId=image.image_id SourceRegion=src_region )<line_sep>local_ec2=session.resource('ec2')<line_sep>new_image=local_ec2.Image(resp['ImageId'])<line_sep><return>(new_image dest_region)<block_end><def_stmt>make_public_and_tag image region desc<block_start><while_stmt><true><block_start>image.load()<if_stmt>image.state<eq>'available'<block_start>image.modify_attribute(LaunchPermission={'Add':[{'Group':'all'}]})<line_sep># Can only modify one attribute at a time image.modify_attribute(Description={'Value':desc})<line_sep>logger.info("region {} ami {} is available".format(region image.id))<line_sep><break><block_end>time.sleep(5)<block_end><block_end><def_stmt>encode_desc dict_<block_start><return>" ".join("{0}={1}".format(*item)<for>item dict_.items())<block_end>@click.group()<def_stmt>aws <block_start><pass><block_end>@aws.command(name='copy-ami')@click.option('-r' '--src-region' default='us-east-1' help='AWS Region')@click.option('-q' '--quiet' is_flag=<true>)@click.argument('src_ami')<def_stmt>copy_ami src_region src_ami quiet<block_start><if_stmt>quiet<block_start>logger.setLevel(logging.WARN)<block_end>session=boto3.session.Session(region_name=src_region)<line_sep>client=session.client('ec2')<line_sep>dest_regions=[region['RegionName']<for>region client.describe_regions()['Regions']<if>region['RegionName']<ne>src_region]<line_sep>dest_regions.sort()<line_sep>logger.info("detected {} regions".format(len(dest_regions)))<line_sep>image=session.resource('ec2').Image(src_ami)<line_sep>description=encode_desc({i['Key']:i['Value']<for>i image.tags<or>[]})<line_sep># copy to all regions images=[copy_to_region(image src_region region)<for>region dest_regions]<line_sep># Add the original images.append((image src_region))<line_sep># print out the YAML <for_stmt>(image region) images<block_start>print(yaml_template.format(region image.id))<block_end>logger.info("waiting for all images to be available. In the mean time,"<concat>"that YAML can be pasted into the quickstart template.")<line_sep># wait for all images to be available <for_stmt>(image region) images<block_start>make_public_and_tag(image region description)<block_end><block_end>
<import_from_stmt>zeus.config db<import_from_stmt>zeus.db.mixins ApiTokenMixin RepositoryMixin StandardAttributes<import_from_stmt>zeus.db.utils model_repr<class_stmt>RepositoryApiToken(StandardAttributes RepositoryMixin ApiTokenMixin db.Model)<block_start>""" An API token associated to a repository. """<line_sep>__tablename__="repository_api_token"<line_sep>__repr__=model_repr("repository_id" "key")<def_stmt>get_token_key self<block_start><return>"r"<block_end><block_end>
a:str<line_sep>a:bool=<true><line_sep>my_long_var_aaaaaaaaaaaaaaaaaaaaaaaaaa:MyLongTypeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA<line_sep>my_long_var_aaaaaaaaaaaaaaaaaaaaaaaaaa:MyLongTypeAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=1<line_sep>
""" .. codeauthor:: <NAME> <<EMAIL>> """<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>sqlite3<import_from_stmt>collections OrderedDict defaultdict<import_from_stmt>sqlite3 Connection Cursor<import_from_stmt>typing Any Callable Dict List Optional Sequence Tuple Union cast<import_stmt>pathvalidate<import_stmt>typepy<import_from_stmt>dataproperty.typing TypeHint<import_from_stmt>mbstrdecoder MultiByteStrDecoder<import_from_stmt>sqliteschema SQLITE_SYSTEM_TABLES SQLiteSchemaExtractor<import_from_stmt>tabledata TableData<import_from_stmt>typepy extract_typepy_from_dtype<import_from_stmt>._common extract_table_metadata<import_from_stmt>._func copy_table validate_table_name<import_from_stmt>._logger logger<import_from_stmt>._sanitizer SQLiteTableDataSanitizer<import_from_stmt>.converter RecordConvertor<import_from_stmt>.error AttributeNotFoundError DatabaseError NameValidationError NullDatabaseConnectionError OperationalError TableNotFoundError <import_from_stmt>.query Attr AttrList Insert QueryItem Select Table Value WhereQuery make_index_name <import_from_stmt>.sqlquery SqlQuery<line_sep>MEMORY_DB_NAME=":memory:"<class_stmt>SimpleSQLite<block_start>""" Wrapper class for |sqlite3| module. :param str database_src: SQLite database source. Acceptable types are: (1) File path to a database to be connected. (2) sqlite3.Connection instance. (3) SimpleSQLite instance :param str mode: Open mode. :param bool delayed_connection: Delaying connection to a database until access to the database the first time, if the value is |True|. :param int max_workers: Maximum number of workers to generate a table. In default, the same as the total number of CPUs. :param bool profile: Recording SQL query execution time profile, if the value is |True|. .. seealso:: :py:meth:`.connect` :py:meth:`.get_profile` """<line_sep>dup_col_handler="error"<line_sep>global_debug_query=<false><line_sep>@property<def_stmt>database_path self<arrow>Optional[str]<block_start>""" :return: File path of the connected database. :rtype: str :Examples: >>> from simplesqlite import SimpleSQLite >>> con = SimpleSQLite("sample.sqlite", "w") >>> con.database_path '/tmp/sample.sqlite' >>> con.close() >>> print(con.database_path) None """<if_stmt>self.__delayed_connection_path<block_start><return>self.__delayed_connection_path<block_end><return>self.__database_path<block_end>@property<def_stmt>connection self<arrow>Optional[Connection]<block_start>""" :return: |Connection| instance of the connected database. :rtype: sqlite3.Connection """<line_sep>self.__delayed_connect()<line_sep><return>self.__connection<block_end>@property<def_stmt>schema_extractor self<arrow>SQLiteSchemaExtractor<block_start><return>SQLiteSchemaExtractor(self max_workers=self.__max_workers)<block_end>@property<def_stmt>total_changes self<arrow>int<block_start>""" .. seealso:: :py:attr:`sqlite3.Connection.total_changes` """<line_sep>self.check_connection()<line_sep><return>self.connection.total_changes<block_end># type: ignore @property<def_stmt>mode self<arrow>Optional[str]<block_start>""" :return: Connection mode: ``"r"``/``"w"``/``"a"``. :rtype: str .. seealso:: :py:meth:`.connect` """<line_sep><return>self.__mode<block_end><def_stmt>__initialize_connection self<arrow><none><block_start>self.__database_path:Optional[str]=<none><line_sep>self.__connection:Optional[Connection]=<none><line_sep>self.__mode:Optional[str]=<none><line_sep>self.__delayed_connection_path:Optional[str]=<none><line_sep>self.__dict_query_count:Dict[str int]=defaultdict(int)<line_sep>self.__dict_query_totalexectime:Dict[str float]=defaultdict(float)<block_end><def_stmt>__init__ self database_src:Union[Connection "SimpleSQLite" str] mode:str="a" delayed_connection:bool=<true> max_workers:Optional[int]=<none> profile:bool=<false> <arrow><none><block_start>self.debug_query=<false><line_sep>self.__initialize_connection()<line_sep>self.__mode=mode<line_sep>self.__max_workers=max_workers<line_sep>self.__is_profile=profile<if_stmt>database_src<is><none><block_start><raise>TypeError("database_src must be not None")<block_end><if_stmt>isinstance(database_src SimpleSQLite)<block_start>self.__connection=database_src.connection<line_sep>self.__database_path=database_src.database_path<line_sep>self.debug_query=database_src.debug_query<line_sep><return><block_end><if_stmt>isinstance(database_src sqlite3.Connection)<block_start>self.__connection=database_src<line_sep><return><block_end><if_stmt>delayed_connection<block_start>self.__delayed_connection_path=database_src<line_sep><return><block_end>self.connect(database_src mode)<block_end><def_stmt>__del__ self<arrow><none><block_start>self.close()<block_end><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self exc_type exc_value traceback<arrow><none><block_start>self.close()<block_end><def_stmt>is_connected self<arrow>bool<block_start>""" :return: |True| if the connection to a database is valid. :rtype: bool :Examples: >>> from simplesqlite import SimpleSQLite >>> con = SimpleSQLite("sample.sqlite", "w") >>> con.is_connected() True >>> con.close() >>> con.is_connected() False """<try_stmt><block_start>self.check_connection()<block_end><except_stmt>NullDatabaseConnectionError<block_start><return><false><block_end><return><true><block_end><def_stmt>check_connection self<arrow><none><block_start>""" :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :Sample Code: .. code:: python import simplesqlite con = simplesqlite.SimpleSQLite("sample.sqlite", "w") print("---- connected to a database ----") con.check_connection() print("---- disconnected from a database ----") con.close() try: con.check_connection() except simplesqlite.NullDatabaseConnectionError as e: print(e) :Output: .. code-block:: none ---- connected to a database ---- ---- disconnected from a database ---- null database connection """<if_stmt>self.connection<is><none><block_start><if_stmt><not>self.__delayed_connect()<block_start><raise>NullDatabaseConnectionError("null database connection")<block_end><block_end><block_end><def_stmt>connect self database_path:str mode:str="a"<arrow><none><block_start>""" Connect to a SQLite database. :param str database_path: Path to the SQLite database file to be connected. :param str mode: ``"r"``: Open for read only. ``"w"``: Open for read/write. Delete existing tables when connecting. ``"a"``: Open for read/write. Append to the existing tables. :raises ValueError: If ``database_path`` is invalid or |attr_mode| is invalid. :raises simplesqlite.DatabaseError: If the file is encrypted or is not a database. :raises simplesqlite.OperationalError: If unable to open the database file. """<line_sep>self.close()<line_sep>logger.debug(f"connect to a SQLite database: path='{database_path}', mode={mode}")<if_stmt>mode<eq>"r"<block_start>self.__verify_db_file_existence(database_path)<block_end><elif_stmt>mode<in>["w" "a"]<block_start>self.__validate_db_path(database_path)<block_end><else_stmt><block_start><raise>ValueError("unknown connection mode: "+mode)<block_end><if_stmt>database_path<eq>MEMORY_DB_NAME<block_start>self.__database_path=database_path<block_end><else_stmt><block_start>self.__database_path=os.path.realpath(database_path)<block_end><try_stmt><block_start>self.__connection=sqlite3.connect(database_path)<block_end><except_stmt>sqlite3.OperationalError<as>e<block_start><raise>OperationalError(e)<block_end>self.__mode=mode<try_stmt># validate connection after connect <block_start>self.fetch_table_names()<block_end><except_stmt>sqlite3.DatabaseError<as>e<block_start><raise>DatabaseError(e)<block_end><if_stmt>mode<ne>"w"<block_start><return><block_end><for_stmt>table self.fetch_table_names()<block_start>self.drop_table(table)<block_end><block_end><def_stmt>execute_query self query:Union[str QueryItem] caller:Optional[Tuple]=<none><arrow>Optional[Cursor]<block_start>""" Send arbitrary SQLite query to the database. :param query: Query to executed. :param tuple caller: Caller information. Expects the return value of :py:meth:`logging.Logger.findCaller`. :return: The result of the query execution. :rtype: sqlite3.Cursor :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| .. warning:: This method can execute an arbitrary query. i.e. No access permissions check by |attr_mode|. """<import_stmt>time<line_sep>self.check_connection()<if_stmt>typepy.is_null_string(query)<block_start><return><none><block_end><if_stmt>self.debug_query<or>self.global_debug_query<block_start>logger.debug(query)<block_end><if_stmt>self.__is_profile<block_start>exec_start_time=time.time()<block_end><assert_stmt>self.connection# to avoid type check error <try_stmt><block_start>result=self.connection.execute(str(query))<block_end><except_stmt>(sqlite3.OperationalError sqlite3.IntegrityError)<as>e<block_start><if_stmt>caller<is><none><block_start>caller=logging.getLogger().findCaller()<block_end>file_path,line_no,func_name=caller[:3]<line_sep><raise>OperationalError(message="\n".join(["failed to execute query at {:s}({:d}) {:s}".format(file_path line_no func_name) f" - query: {MultiByteStrDecoder(query).unicode_str}" f" - msg: {e}" f" - db: {self.database_path}" ]))<block_end><if_stmt>self.__is_profile<block_start>self.__dict_query_count[str(query)]<augadd>1<line_sep>elapse_time=time.time()-exec_start_time<line_sep>self.__dict_query_totalexectime[str(query)]<augadd>elapse_time<block_end><return>result<block_end><def_stmt>set_row_factory self row_factory:Optional[Callable]<arrow><none><block_start>""" Set row_factory to the database connection. """<line_sep>self.check_connection()<line_sep>self.__connection.row_factory=row_factory<block_end># type: ignore <def_stmt>select self select:Union[str AttrList] table_name:str where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> <arrow>Optional[Cursor]<block_start>""" Send a SELECT query to the database. :param select: Attribute for the ``SELECT`` query. :param str table_name: |arg_select_table_name| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Result of the query execution. :rtype: sqlite3.Cursor :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """<line_sep>self.verify_table_existence(table_name)<line_sep><return>self.execute_query(str(Select(select table_name where extra)) logging.getLogger().findCaller() )<block_end><def_stmt>select_as_dataframe self table_name:str columns:Optional[Sequence[str]]=<none> where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> <block_start>""" Get data in the database and return fetched data as a :py:class:`pandas.Dataframe` instance. :param str table_name: |arg_select_table_name| :param columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :param extra: |arg_select_extra| :return: Table data as a :py:class:`pandas.Dataframe` instance. :rtype: pandas.DataFrame :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dataframe` .. note:: ``pandas`` package required to execute this method. """<import_stmt>pandas<if_stmt>columns<is><none><block_start>columns=self.fetch_attr_names(table_name)<block_end>result=self.select(select=AttrList(columns) table_name=table_name where=where extra=extra)<if_stmt>result<is><none><block_start><return>pandas.DataFrame()<block_end><return>pandas.DataFrame(result.fetchall() columns=columns)<block_end><def_stmt>select_as_tabledata self table_name:str columns:Optional[Sequence[str]]=<none> where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> type_hints:Optional[Dict[str TypeHint]]=<none> <arrow>TableData<block_start>""" Get data in the database and return fetched data as a :py:class:`tabledata.TableData` instance. :param str table_name: |arg_select_table_name| :param columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a :py:class:`tabledata.TableData` instance. :rtype: tabledata.TableData :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| .. note:: ``pandas`` package required to execute this method. """<if_stmt>columns<is><none><block_start>columns=self.fetch_attr_names(table_name)<block_end>result=self.select(select=AttrList(columns) table_name=table_name where=where extra=extra)<if_stmt>result<is><none><block_start><return>TableData(<none> [] [])<block_end><if_stmt>type_hints<is><none><block_start>type_hints=self.fetch_data_types(table_name)<block_end><return>TableData(table_name columns result.fetchall() type_hints=[type_hints.get(col)<for>col columns] max_workers=self.__max_workers )<block_end><def_stmt>select_as_dict self table_name:str columns:Optional[Sequence[str]]=<none> where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> <arrow>"Optional[List[OrderedDict[str, Any]]]"<block_start>""" Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` """<line_sep><return>self.select_as_tabledata(table_name columns where extra).as_dict().get(table_name)<block_end><def_stmt>select_as_memdb self table_name:str columns:Optional[Sequence[str]]=<none> where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> <block_start>""" Get data in the database and return fetched data as a in-memory |SimpleSQLite| instance. :param str table_name: |arg_select_table_name| :param columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as a |SimpleSQLite| instance that connected to in memory database. :rtype: |SimpleSQLite| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """<line_sep>table_schema=self.schema_extractor.fetch_table_schema(table_name)<line_sep>memdb=connect_memdb(max_workers=self.__max_workers)<line_sep>memdb.create_table_from_tabledata(self.select_as_tabledata(table_name columns where extra) primary_key=table_schema.primary_key index_attrs=table_schema.index_list )<line_sep><return>memdb<block_end><def_stmt>insert self table_name:str record:Any attr_names:Optional[Sequence[str]]=<none><arrow><none><block_start>""" Send an INSERT query to the database. :param str table_name: Table name of executing the query. :param record: Record to be inserted. :type record: |dict|/|namedtuple|/|list|/|tuple| :raises IOError: |raises_write_permission| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-insert-records` """<line_sep>self.insert_many(table_name records=[record] attr_names=attr_names)<block_end><def_stmt>insert_many self table_name:str records:Sequence[Union[Dict Sequence]] attr_names:Optional[Sequence[str]]=<none> <arrow>int<block_start>""" Send an INSERT query with multiple records to the database. :param str table: Table name of executing the query. :param records: Records to be inserted. :type records: list of |dict|/|namedtuple|/|list|/|tuple| :return: Number of inserted records. :rtype: int :raises IOError: |raises_write_permission| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-insert-records` """<line_sep>self.validate_access_permission(["w" "a"])<line_sep>self.verify_table_existence(table_name allow_view=<false>)<if_stmt>attr_names<block_start>logger.debug("insert {number} records into {table}({attrs})".format(number=len(records)<if>records<else>0 table=table_name attrs=attr_names))<block_end><else_stmt><block_start>logger.debug("insert {number} records into {table}".format(number=len(records)<if>records<else>0 table=table_name))<block_end><if_stmt>typepy.is_empty_sequence(records)<block_start><return>0<block_end><if_stmt>attr_names<is><none><block_start>attr_names=self.fetch_attr_names(table_name)<block_end>records=RecordConvertor.to_records(attr_names records)<line_sep>query=Insert(table_name AttrList(attr_names)).to_query()<if_stmt>self.debug_query<or>self.global_debug_query<block_start>logging_count=8<line_sep>num_records=len(records)<line_sep>logs=[query]+[f" record {i:4d}: {record}"<for>i,record enumerate(records[:logging_count])]<if_stmt>num_records-logging_count<g>0<block_start>logs.append(f" and other {num_records-logging_count} records will be inserted")<block_end>logger.debug("\n".join(logs))<block_end><assert_stmt>self.connection# to avoid type check error <try_stmt><block_start>self.connection.executemany(query records)<block_end><except_stmt>(sqlite3.OperationalError sqlite3.IntegrityError)<as>e<block_start>caller=logging.getLogger().findCaller()<line_sep>file_path,line_no,func_name=caller[:3]<line_sep><raise>OperationalError(f"{file_path:s}({line_no:d}) {func_name:s}: failed to execute query:\n"+f" query={query}\n"+f" msg='{e}'\n"+f" db={self.database_path}\n"+f" records={records[:2]}\n")<block_end><return>len(records)<block_end><def_stmt>update self table_name:str set_query:Optional[str] where:Optional[WhereQuery]=<none><arrow>Optional[Cursor]<block_start>"""Execute an UPDATE query. Args: table_name (|str|): Table name of executing the query. set_query (|str|): ``SET`` clause for the update query. where (|arg_where_type| , optional): ``WHERE`` clause for the update query. Defaults to |None|. Raises: IOError: |raises_write_permission| simplesqlite.NullDatabaseConnectionError: |raises_check_connection| simplesqlite.TableNotFoundError: |raises_verify_table_existence| simplesqlite.OperationalError: |raises_operational_error| """<line_sep>self.validate_access_permission(["w" "a"])<line_sep>self.verify_table_existence(table_name allow_view=<false>)<line_sep>query=SqlQuery.make_update(table_name set_query where)<line_sep><return>self.execute_query(query logging.getLogger().findCaller())<block_end><def_stmt>delete self table_name:str where:Optional[WhereQuery]=<none><arrow>Optional[Cursor]<block_start>""" Send a DELETE query to the database. :param str table_name: Table name of executing the query. :param where: |arg_select_where| :type where: |arg_where_type| """<line_sep>self.validate_access_permission(["w" "a"])<line_sep>self.verify_table_existence(table_name allow_view=<false>)<line_sep>query=f"DELETE FROM {table_name:s}"<if_stmt>where<block_start>query<augadd>f" WHERE {where:s}"<block_end><return>self.execute_query(query logging.getLogger().findCaller())<block_end><def_stmt>fetch_value self select:str table_name:str where:Optional[WhereQuery]=<none> extra:Optional[str]=<none> <arrow>Optional[int]<block_start>""" Fetch a value from the table. Return |None| if no value matches the conditions, or the table not found in the database. :param str select: Attribute for SELECT query :param str table_name: Table name of executing the query. :param where: |arg_select_where| :type where: |arg_where_type| :return: Result of execution of the query. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| """<try_stmt><block_start>self.verify_table_existence(table_name)<block_end><except_stmt>DatabaseError<as>e<block_start>logger.debug(e)<line_sep><return><none><block_end>result=self.execute_query(Select(select table_name where extra) logging.getLogger().findCaller())<if_stmt>result<is><none><block_start><return><none><block_end>fetch=result.fetchone()<if_stmt>fetch<is><none><block_start><return><none><block_end><return>fetch[0]<block_end><def_stmt>fetch_values self select table_name where=<none> extra=<none><arrow>List<block_start>result=self.select(select=select table_name=table_name where=where extra=extra)<if_stmt>result<is><none><block_start><return>[]<block_end><return>[record[0]<for>record result.fetchall()]<block_end><def_stmt>fetch_table_names self include_system_table:bool=<false> include_view:bool=<true><arrow>List[str]<block_start>""" :return: List of table names in the database. :rtype: list :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| :Sample Code: .. code:: python from simplesqlite import SimpleSQLite con = SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( "hoge", ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.fetch_table_names()) :Output: .. code-block:: python ['hoge'] """<line_sep>self.check_connection()<line_sep><return>self.schema_extractor.fetch_table_names(include_system_table=include_system_table include_view=include_view)<block_end><def_stmt>fetch_view_names self<arrow>List[str]<block_start>""" :return: List of table names in the database. :rtype: list """<line_sep>self.check_connection()<line_sep><return>self.schema_extractor.fetch_view_names()<block_end><def_stmt>fetch_attr_names self table_name:str<arrow>List[str]<block_start>""" :return: List of attribute names in the table. :rtype: list :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.fetch_attr_names(table_name)) try: print(con.fetch_attr_names("not_existing")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: ['attr_a', 'attr_b'] 'not_existing' table not found in /tmp/sample.sqlite """<line_sep>self.verify_table_existence(table_name)<line_sep><return>self.schema_extractor.fetch_table_schema(table_name).get_attr_names()<block_end><def_stmt>fetch_attr_type self table_name:str<arrow>Dict[str str]<block_start>""" :return: Dictionary of attribute names and attribute types in the table. :rtype: dict :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| """<line_sep>self.verify_table_existence(table_name allow_view=<false>)<line_sep>result=self.execute_query("SELECT sql FROM sqlite_master WHERE type='table' and name={:s}".format(Value(table_name)))<assert_stmt>result# to avoid type check error query=result.fetchone()[0]<line_sep>match=re.search("[(].*[)]" query)<assert_stmt>match# to avoid type check error <def_stmt>get_entry items<block_start>key=" ".join(items[:-1])<line_sep>value=items[-1]<line_sep><return>[key value]<block_end><return>dict([get_entry(item.split(" "))<for>item match.group().strip("()").split(", ")])<block_end><def_stmt>fetch_num_records self table_name:str where:Optional[WhereQuery]=<none><arrow>Optional[int]<block_start>""" Fetch the number of records in a table. :param str table_name: Table name to get number of records. :param where: |arg_select_where| :type where: |arg_where_type| :return: Number of records in the table. |None| if no value matches the conditions, or the table not found in the database. :rtype: int """<line_sep><return>self.fetch_value(select="COUNT(*)" table_name=table_name where=where)<block_end><def_stmt>fetch_data_types self table_name:str<arrow>Dict[str TypeHint]<block_start>_,_,type_hints=extract_table_metadata(self table_name)<line_sep><return>type_hints<block_end><def_stmt>get_profile self profile_count:int=50<arrow>List[Any]<block_start>""" Get profile of query execution time. :param int profile_count: Number of profiles to retrieve, counted from the top query in descending order by the cumulative execution time. :return: Profile information for each query. :rtype: list of |namedtuple| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-get-profile` """<import_from_stmt>collections namedtuple<line_sep>profile_table_name="sql_profile"<line_sep>value_matrix=[[query execute_time self.__dict_query_count.get(query 0)]<for>query,execute_time self.__dict_query_totalexectime.items()]<line_sep>attr_names=("sql_query" "cumulative_time" "count")<line_sep>con_tmp=connect_memdb(max_workers=self.__max_workers)<try_stmt><block_start>con_tmp.create_table_from_data_matrix(profile_table_name attr_names data_matrix=value_matrix)<block_end><except_stmt>ValueError<block_start><return>[]<block_end><try_stmt><block_start>result=con_tmp.select(select="{:s},SUM({:s}),SUM({:s})".format(*attr_names) table_name=profile_table_name extra="GROUP BY {:s} ORDER BY {:s} DESC LIMIT {:d}".format(attr_names[0] attr_names[1] profile_count) )<block_end><except_stmt>sqlite3.OperationalError<block_start><return>[]<block_end><if_stmt>result<is><none><block_start><return>[]<block_end>SqliteProfile=namedtuple("SqliteProfile" " ".join(attr_names))# type: ignore <return>[SqliteProfile(*profile)<for>profile result.fetchall()]<block_end><def_stmt>fetch_sqlite_master self<arrow>List[Dict]<block_start>""" Get sqlite_master table information as a list of dictionaries. :return: sqlite_master table information. :rtype: list :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :Sample Code: .. code:: python import json from simplesqlite import SimpleSQLite con = SimpleSQLite("sample.sqlite", "w") data_matrix = [ [1, 1.1, "aaa", 1, 1], [2, 2.2, "bbb", 2.2, 2.2], [3, 3.3, "ccc", 3, "ccc"], ] con.create_table_from_data_matrix( "sample_table", ["a", "b", "c", "d", "e"], data_matrix, index_attrs=["a"]) print(json.dumps(con.fetch_sqlite_master(), indent=4)) :Output: .. code-block:: json [ { "tbl_name": "sample_table", "sql": "CREATE TABLE 'sample_table' ('a' INTEGER, 'b' REAL, 'c' TEXT, 'd' REAL, 'e' TEXT)", "type": "table", "name": "sample_table", "rootpage": 2 }, { "tbl_name": "sample_table", "sql": "CREATE INDEX sample_table_a_index ON sample_table('a')", "type": "index", "name": "sample_table_a_index", "rootpage": 3 } ] """<line_sep>self.check_connection()<line_sep><return>self.schema_extractor.fetch_sqlite_master()<block_end><def_stmt>has_table self table_name:str include_view:bool=<true><arrow>bool<block_start>""" :param str table_name: Table name to be tested. :return: |True| if the database has the table. :rtype: bool :Sample Code: .. code:: python from simplesqlite import SimpleSQLite con = SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( "hoge", ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_table("hoge")) print(con.has_table("not_existing")) :Output: .. code-block:: python True False """<try_stmt><block_start>validate_table_name(table_name)<block_end><except_stmt>NameValidationError<block_start><return><false><block_end><return>table_name<in>self.fetch_table_names(include_view=include_view)<block_end><def_stmt>has_view self view_name:str<arrow>bool<block_start>""" :param str table_name: Table name to be tested. :return: |True| if the database has the table. :rtype: bool """<try_stmt><block_start>validate_table_name(view_name)<block_end><except_stmt>NameValidationError<block_start><return><false><block_end><return>view_name<in>self.fetch_view_names()<block_end><def_stmt>has_attr self table_name:str attr_name:Optional[str]<arrow>bool<block_start>""" :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.DatabaseError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """<line_sep>self.verify_table_existence(table_name allow_view=<false>)<if_stmt>typepy.is_null_string(attr_name)<block_start><return><false><block_end><return>attr_name<in>self.fetch_attr_names(table_name)<block_end><def_stmt>has_attrs self table_name:str attr_names:Sequence[str]<arrow>bool<block_start>""" :param str table_name: Table name that attributes exists. :param attr_names: Attribute names to tested. :return: |True| if the table has all of the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attrs(table_name, ["attr_a"])) print(con.has_attrs(table_name, ["attr_a", "attr_b"])) print(con.has_attrs(table_name, ["attr_a", "attr_b", "not_existing"])) try: print(con.has_attr("not_existing", ["attr_a"])) except simplesqlite.DatabaseError as e: print(e) :Output: .. parsed-literal:: True True False 'not_existing' table not found in /tmp/sample.sqlite """<if_stmt>typepy.is_empty_sequence(attr_names)<block_start><return><false><block_end>not_exist_fields=[attr_name<for>attr_name attr_names<if><not>self.has_attr(table_name attr_name)]<if_stmt>not_exist_fields<block_start><return><false><block_end><return><true><block_end><def_stmt>verify_table_existence self table_name:str allow_view:bool=<true><arrow><none><block_start>""" :param str table_name: Table name to be tested. :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.NameValidationError: |raises_validate_table_name| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) con.verify_table_existence(table_name) try: con.verify_table_existence("not_existing") except simplesqlite.DatabaseError as e: print(e) :Output: .. parsed-literal:: 'not_existing' table not found in /tmp/sample.sqlite """<line_sep>validate_table_name(table_name)<if_stmt>self.has_table(table_name include_view=allow_view)<block_start><return><block_end><raise>TableNotFoundError(f"'{table_name}' not found in '{self.database_path}' database")<block_end><def_stmt>verify_attr_existence self table_name:str attr_name:str<arrow><none><block_start>""" :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to tested. :raises simplesqlite.AttributeNotFoundError: If attribute not found in the table :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python from simplesqlite import ( SimpleSQLite, DatabaseError, AttributeNotFoundError ) table_name = "sample_table" con = SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) con.verify_attr_existence(table_name, "attr_a") try: con.verify_attr_existence(table_name, "not_existing") except AttributeNotFoundError as e: print(e) try: con.verify_attr_existence("not_existing", "attr_a") except DatabaseError as e: print(e) :Output: .. parsed-literal:: 'not_existing' attribute not found in 'sample_table' table 'not_existing' table not found in /tmp/sample.sqlite """<line_sep>self.verify_table_existence(table_name allow_view=<false>)<if_stmt>self.has_attr(table_name attr_name)<block_start><return><block_end><raise>AttributeNotFoundError(f"'{attr_name}' attribute not found in '{table_name}' table")<block_end><def_stmt>validate_access_permission self valid_permissions:Sequence[str]<arrow><none><block_start>""" :param valid_permissions: List of permissions that access is allowed. :type valid_permissions: |list|/|tuple| :raises ValueError: If the |attr_mode| is invalid. :raises IOError: If the |attr_mode| not in the ``valid_permissions``. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| """<line_sep>self.check_connection()<if_stmt>typepy.is_null_string(self.mode)<block_start><raise>ValueError("mode is not set")<block_end><if_stmt>self.mode<not><in>valid_permissions<block_start><raise>OSError("invalid access: expected-mode='{}', current-mode='{}'".format("' or '".join(valid_permissions) self.mode))<block_end><block_end><def_stmt>drop_table self table_name:str<arrow><none><block_start>""" :param str table_name: Table name to drop. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises IOError: |raises_write_permission| """<line_sep>self.validate_access_permission(["w" "a"])<if_stmt>table_name<in>SQLITE_SYSTEM_TABLES# warning message <block_start><return><block_end><if_stmt>self.has_table(table_name include_view=<false>)<block_start>query=f"DROP TABLE IF EXISTS '{table_name:s}'"<line_sep>self.execute_query(query logging.getLogger().findCaller())<block_end><elif_stmt>self.has_view(table_name)<block_start>self.execute_query(f"DROP VIEW IF EXISTS {table_name}")<block_end>self.commit()<block_end><def_stmt>create_table self table_name:str attr_descriptions:Sequence[str]<arrow>bool<block_start>""" :param str table_name: Table name to create. :param list attr_descriptions: List of table description. :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises IOError: |raises_write_permission| """<line_sep>self.validate_access_permission(["w" "a"])<line_sep>table_name=table_name.strip()<if_stmt>self.has_table(table_name)<block_start><return><true><block_end>query="CREATE TABLE IF NOT EXISTS '{:s}' ({:s})".format(table_name ", ".join(attr_descriptions))<line_sep>logger.debug(query)<if_stmt>self.execute_query(query logging.getLogger().findCaller())<is><none><block_start><return><false><block_end><return><true><block_end><def_stmt>create_index self table_name:str attr_name:str<arrow><none><block_start>""" :param str table_name: Table name that contains the attribute to be indexed. :param str attr_name: Attribute name to create index. :raises IOError: |raises_write_permission| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| """<line_sep>self.verify_table_existence(table_name allow_view=<false>)<line_sep>self.validate_access_permission(["w" "a"])<line_sep>query_format="CREATE INDEX IF NOT EXISTS {index:s} ON {table}({attr})"<line_sep>query=query_format.format(index=make_index_name(table_name attr_name) table=Table(table_name) attr=Attr(attr_name) )<line_sep>logger.debug(query)<line_sep>self.execute_query(query logging.getLogger().findCaller())<block_end><def_stmt>create_index_list self table_name:str attr_names:Sequence[str]<arrow><none><block_start>""" :param str table_name: Table name that exists attribute. :param list attr_names: List of attribute names to create indices. Ignore attributes that are not existing in the table. .. seealso:: :py:meth:`.create_index` """<line_sep>self.validate_access_permission(["w" "a"])<if_stmt>typepy.is_empty_sequence(attr_names)<block_start><return><block_end>table_attr_set=set(self.fetch_attr_names(table_name))<line_sep>index_attr_set=set(AttrList.sanitize(attr_names))# type: ignore <for_stmt>attribute list(table_attr_set.intersection(index_attr_set))<block_start>self.create_index(table_name attribute)<block_end><block_end><def_stmt>create_table_from_data_matrix self table_name:str attr_names:Sequence[str] data_matrix:Any primary_key:Optional[str]=<none> add_primary_key_column:bool=<false> index_attrs:Optional[Sequence[str]]=<none> type_hints:Optional[Sequence[TypeHint]]=<none> <arrow><none><block_start>""" Create a table if not exists. Moreover, insert data into the created table. :param str table_name: Table name to create. :param list attr_names: Attribute names of the table. :param data_matrix: Data to be inserted into the table. :type data_matrix: List of |dict|/|namedtuple|/|list|/|tuple| :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :raises simplesqlite.NameValidationError: |raises_validate_table_name| :raises simplesqlite.NameValidationError: |raises_validate_attr_name| :raises ValueError: If the ``data_matrix`` is empty. :Example: :ref:`example-create-table-from-data-matrix` .. seealso:: :py:meth:`.create_table` :py:meth:`.insert_many` :py:meth:`.create_index_list` """<line_sep>self.__create_table_from_tabledata(TableData(table_name headers=attr_names rows=data_matrix type_hints=type_hints max_workers=self.__max_workers ) primary_key add_primary_key_column index_attrs )<block_end><def_stmt>create_table_from_tabledata self table_data:TableData primary_key:Optional[str]=<none> add_primary_key_column:bool=<false> index_attrs:Optional[Sequence[str]]=<none> <arrow><none><block_start>""" Create a table from :py:class:`tabledata.TableData`. :param tabledata.TableData table_data: Table data to create. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| .. seealso:: :py:meth:`.create_table_from_data_matrix` """<line_sep>self.__create_table_from_tabledata(table_data primary_key add_primary_key_column index_attrs)<block_end><def_stmt>create_table_from_csv self csv_source:str table_name:str="" attr_names:Sequence[str]=() delimiter:str="," quotechar:str='"' encoding:str="utf-8" primary_key:Optional[str]=<none> add_primary_key_column:bool=<false> index_attrs:Optional[Sequence[str]]=<none> <arrow><none><block_start>""" Create a table from a CSV file/text. :param str csv_source: Path to the CSV file or CSV text. :param str table_name: Table name to create. Using CSV file basename as the table name if the value is empty. :param list attr_names: Attribute names of the table. Use the first line of the CSV file as attributes if ``attr_names`` is empty. :param str delimiter: A one-character string used to separate fields. :param str quotechar: A one-character string used to quote fields containing special characters, such as the ``delimiter`` or ``quotechar``, or which contain new-line characters. :param str encoding: CSV file encoding. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :raises ValueError: If the CSV data is invalid. :Dependency Packages: - `pytablereader <https://github.com/thombashi/pytablereader>`__ :Example: :ref:`example-create-table-from-csv` .. seealso:: :py:meth:`.create_table_from_data_matrix` :py:func:`csv.reader` :py:meth:`.pytablereader.CsvTableFileLoader.load` :py:meth:`.pytablereader.CsvTableTextLoader.load` """<import_stmt>pytablereader<as>ptr<line_sep>loader=ptr.CsvTableFileLoader(csv_source)<if_stmt>typepy.is_not_null_string(table_name)<block_start>loader.table_name=table_name<block_end>loader.headers=attr_names<line_sep>loader.delimiter=delimiter<line_sep>loader.quotechar=quotechar<line_sep>loader.encoding=encoding<try_stmt><block_start><for_stmt>table_data loader.load()<block_start>self.__create_table_from_tabledata(table_data primary_key add_primary_key_column index_attrs)<block_end><return><block_end><except_stmt>(ptr.InvalidFilePathError OSError)<block_start><pass><block_end>loader=ptr.CsvTableTextLoader(csv_source)<if_stmt>typepy.is_not_null_string(table_name)<block_start>loader.table_name=table_name<block_end>loader.headers=attr_names<line_sep>loader.delimiter=delimiter<line_sep>loader.quotechar=quotechar<line_sep>loader.encoding=encoding<for_stmt>table_data loader.load()<block_start>self.__create_table_from_tabledata(table_data primary_key add_primary_key_column index_attrs)<block_end><block_end><def_stmt>create_table_from_json self json_source:str table_name:str="" primary_key:Optional[str]=<none> add_primary_key_column:bool=<false> index_attrs:Optional[Sequence[str]]=<none> <arrow><none><block_start>""" Create a table from a JSON file/text. :param str json_source: Path to the JSON file or JSON text. :param str table_name: Table name to create. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :Dependency Packages: - `pytablereader <https://github.com/thombashi/pytablereader>`__ :Examples: :ref:`example-create-table-from-json` .. seealso:: :py:meth:`.pytablereader.JsonTableFileLoader.load` :py:meth:`.pytablereader.JsonTableTextLoader.load` """<import_stmt>pytablereader<as>ptr<line_sep>loader=ptr.JsonTableFileLoader(json_source)<if_stmt>typepy.is_not_null_string(table_name)<block_start>loader.table_name=table_name<block_end><try_stmt><block_start><for_stmt>table_data loader.load()<block_start>self.__create_table_from_tabledata(table_data primary_key add_primary_key_column index_attrs)<block_end><return><block_end><except_stmt>(ptr.InvalidFilePathError OSError)<block_start><pass><block_end>loader=ptr.JsonTableTextLoader(json_source)<if_stmt>typepy.is_not_null_string(table_name)<block_start>loader.table_name=table_name<block_end><for_stmt>table_data loader.load()<block_start>self.__create_table_from_tabledata(table_data primary_key add_primary_key_column index_attrs)<block_end><block_end><def_stmt>create_table_from_dataframe self dataframe table_name:str="" primary_key:Optional[str]=<none> add_primary_key_column:bool=<false> index_attrs:Optional[Sequence[str]]=<none> <arrow><none><block_start>""" Create a table from a pandas.DataFrame instance. :param pandas.DataFrame dataframe: DataFrame instance to convert. :param str table_name: Table name to create. :param str primary_key: |primary_key| :param tuple index_attrs: |index_attrs| :Examples: :ref:`example-create-table-from-df` """<line_sep>self.__create_table_from_tabledata(TableData.from_dataframe(dataframe=dataframe table_name=table_name type_hints=[extract_typepy_from_dtype(dtype)<for>dtype dataframe.dtypes] ) primary_key add_primary_key_column index_attrs )<block_end><def_stmt>dump self db_path:str mode:str="a"<arrow><none><block_start><with_stmt>SimpleSQLite(db_path mode=mode max_workers=self.__max_workers)<as>dst_con<block_start><for_stmt>table_name self.fetch_table_names(include_view=<false>)<block_start>copy_table(self dst_con src_table_name=table_name dst_table_name=table_name)<block_end><block_end><block_end><def_stmt>rollback self<arrow><none><block_start>""" .. seealso:: :py:meth:`sqlite3.Connection.rollback` """<try_stmt><block_start>self.check_connection()<block_end><except_stmt>NullDatabaseConnectionError<block_start><return><block_end>logger.debug(f"rollback: path='{self.database_path}'")<assert_stmt>self.connection# to avoid type check error self.connection.rollback()<block_end><def_stmt>commit self<arrow><none><block_start>""" .. seealso:: :py:meth:`sqlite3.Connection.commit` """<try_stmt><block_start>self.check_connection()<block_end><except_stmt>NullDatabaseConnectionError<block_start><return><block_end>logger.debug(f"commit: path='{self.database_path}'")<assert_stmt>self.connection# to avoid type check error <try_stmt><block_start>self.connection.commit()<block_end><except_stmt>sqlite3.ProgrammingError<block_start><pass><block_end><block_end><def_stmt>close self<arrow><none><block_start>""" Commit and close the connection. .. seealso:: :py:meth:`sqlite3.Connection.close` """<if_stmt>self.__delayed_connection_path<and>self.__connection<is><none><block_start>self.__initialize_connection()<line_sep><return><block_end><try_stmt><block_start>self.check_connection()<block_end><except_stmt>(SystemError NullDatabaseConnectionError)<block_start><return><block_end>logger.debug(f"close connection to a SQLite database: path='{self.database_path}'")<line_sep>self.commit()<assert_stmt>self.connection# to avoid type check error self.connection.close()<line_sep>self.__initialize_connection()<block_end>@staticmethod<def_stmt>__validate_db_path database_path:str<arrow><none><block_start><if_stmt>typepy.is_null_string(database_path)<block_start><raise>ValueError("null path")<block_end><if_stmt>database_path<eq>MEMORY_DB_NAME<block_start><return><block_end><try_stmt><block_start>pathvalidate.validate_filename(os.path.basename(database_path))<block_end><except_stmt>AttributeError<block_start><raise>TypeError(f"database path must be a string: actual={type(database_path)}")<block_end><block_end><def_stmt>__verify_db_file_existence self database_path:str<arrow><none><block_start>""" :raises SimpleSQLite.OperationalError: If unable to open database file. """<line_sep>self.__validate_db_path(database_path)<if_stmt><not>os.path.isfile(os.path.realpath(database_path))<block_start><raise>OSError("file not found: "+database_path)<block_end><try_stmt><block_start>connection=sqlite3.connect(database_path)<block_end><except_stmt>sqlite3.OperationalError<as>e<block_start><raise>OperationalError(e)<block_end>connection.close()<block_end><def_stmt>__delayed_connect self<arrow>bool<block_start><if_stmt>self.__delayed_connection_path<is><none><block_start><return><false><block_end># save and clear delayed_connection_path to avoid infinite recursion before # calling the connect method connection_path=self.__delayed_connection_path<line_sep>self.__delayed_connection_path=<none><line_sep>self.connect(connection_path cast(str self.__mode))<line_sep><return><true><block_end><def_stmt>__extract_attr_descs_from_tabledata self table_data primary_key add_primary_key_column<block_start><if_stmt>primary_key<and><not>add_primary_key_column<and>primary_key<not><in>table_data.headers<block_start><raise>ValueError("primary key must be one of the values of attributes")<block_end>attr_description_list=[]<if_stmt>add_primary_key_column<block_start><if_stmt><not>primary_key<block_start>primary_key="id"<block_end><if_stmt>primary_key<in>table_data.headers<block_start><raise>ValueError("a primary key field that will be added should not conflict "<concat>"with existing fields.")<block_end>attr_description_list.append(f"{primary_key} INTEGER PRIMARY KEY AUTOINCREMENT")<block_end><for_stmt>col,value_type sorted(self.__extract_col_type_from_tabledata(table_data).items())<block_start>attr_name=table_data.headers[col]<line_sep>attr_description=f"{Attr(attr_name)} {value_type:s}"<if_stmt>attr_name<eq>primary_key<block_start>attr_description<augadd>" PRIMARY KEY"<block_end>attr_description_list.append(attr_description)<block_end><return>attr_description_list<block_end>@staticmethod<def_stmt>__extract_col_type_from_tabledata table_data:TableData<arrow>Dict<block_start>""" Extract data type name for each column as SQLite names. :param tabledata.TableData table_data: :return: { column_number : column_data_type } :rtype: dictionary """<line_sep>typename_table={typepy.Typecode.INTEGER:"INTEGER" typepy.Typecode.REAL_NUMBER:"REAL" typepy.Typecode.STRING:"TEXT" }<line_sep><return>{col_idx:typename_table.get(col_dp.typecode "TEXT")<for>col_idx,col_dp enumerate(table_data.column_dp_list)}<block_end><def_stmt>__create_table_from_tabledata self table_data:TableData primary_key:Optional[str] add_primary_key_column:bool index_attrs:Optional[Sequence[str]] <block_start>self.validate_access_permission(["w" "a"])<line_sep>debug_msg_list=["__create_table_from_tabledata:" f" tbldata={table_data}"]<if_stmt>primary_key<block_start>debug_msg_list.append(f" primary_key={primary_key}")<block_end><if_stmt>add_primary_key_column<block_start>debug_msg_list.append(f" add_primary_key_column={add_primary_key_column}")<block_end><if_stmt>index_attrs<block_start>debug_msg_list.append(f" index_attrs={index_attrs}")<block_end>logger.debug("\n".join(debug_msg_list))<if_stmt>table_data.is_empty()<block_start><raise>ValueError(f"input table_data is empty: {table_data}")<block_end>table_data=SQLiteTableDataSanitizer(table_data dup_col_handler=self.dup_col_handler max_workers=self.__max_workers).normalize()<line_sep>table_name=table_data.table_name<assert_stmt>table_name<line_sep>self.create_table(table_name self.__extract_attr_descs_from_tabledata(table_data primary_key add_primary_key_column) )<if_stmt>add_primary_key_column<block_start>self.insert_many(table_name [[<none>]+row<for>row table_data.value_matrix])<block_end><else_stmt><block_start>self.insert_many(table_name table_data.value_matrix)<block_end><if_stmt>typepy.is_not_empty_sequence(index_attrs)<block_start>self.create_index_list(table_name AttrList.sanitize(index_attrs))# type: ignore <block_end>self.commit()<block_end><block_end><def_stmt>connect_memdb max_workers:Optional[int]=<none><arrow>SimpleSQLite<block_start>""" :return: Instance of an in memory database. :rtype: SimpleSQLite :Example: :ref:`example-connect-sqlite-db-mem` """<line_sep><return>SimpleSQLite(MEMORY_DB_NAME "w" max_workers=max_workers)<block_end>
<import_from_stmt>mongoengine StringField<import_from_stmt>cloudtunes.base.models EmbeddedDocument<import_from_stmt>cloudtunes.services.models ServiceAccount<class_stmt>DropboxAccount(ServiceAccount)<block_start>country=StringField(max_length=2)<line_sep>display_name=StringField()<line_sep>oauth_token_key=StringField()<line_sep>oauth_token_secret=StringField()<line_sep>delta_cursor=StringField()<line_sep>service_name='Dropbox'<def_stmt>get_username self<block_start><return>self.display_name<block_end><def_stmt>get_picture self<block_start><return><none><block_end><def_stmt>get_url self<block_start><return><none><block_end><block_end><class_stmt>DropboxTrack(EmbeddedDocument)<block_start>path=StringField(required=<true>)<block_end>
# -*- coding: utf-8 -*- """ Profile: http://hl7.org/fhir/StructureDefinition/BodyStructure Release: R4 Version: 4.0.1 Build ID: 9346c8cc45 Last updated: 2019-11-01T09:29:23.356+11:00 """<import_from_stmt>pydantic.validators bytes_validator# noqa: F401 <import_from_stmt>.. fhirtypes# noqa: F401 <import_from_stmt>.. bodystructure<def_stmt>impl_bodystructure_1 inst<block_start><assert_stmt>inst.description<eq>"EDD 1/1/2017 confirmation by LMP"<assert_stmt>inst.id<eq>"fetus"<assert_stmt>(inst.identifier[0].system<eq>"http://goodhealth.org/bodystructure/identifiers")<assert_stmt>inst.identifier[0].value<eq>"12345"<assert_stmt>inst.location.coding[0].code<eq>"83418008"<assert_stmt>inst.location.coding[0].display<eq>"Entire fetus (body structure)"<assert_stmt>inst.location.coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.location.text<eq>"Fetus"<assert_stmt>inst.meta.tag[0].code<eq>"HTEST"<assert_stmt>inst.meta.tag[0].display<eq>"test health data"<assert_stmt>(inst.meta.tag[0].system<eq>"http://terminology.hl7.org/CodeSystem/v3-ActReason")<assert_stmt>inst.patient.reference<eq>"Patient/example"<assert_stmt>inst.text.status<eq>"generated"<block_end><def_stmt>test_bodystructure_1 base_settings<block_start>"""No. 1 tests collection for BodyStructure. Test File: bodystructure-example-fetus.json """<line_sep>filename=base_settings["unittest_data_dir"]/"bodystructure-example-fetus.json"<line_sep>inst=bodystructure.BodyStructure.parse_file(filename content_type="application/json" encoding="utf-8")<assert_stmt>"BodyStructure"<eq>inst.resource_type<line_sep>impl_bodystructure_1(inst)<line_sep># testing reverse by generating data from itself and create again. data=inst.dict()<assert_stmt>"BodyStructure"<eq>data["resourceType"]<line_sep>inst2=bodystructure.BodyStructure(**data)<line_sep>impl_bodystructure_1(inst2)<block_end><def_stmt>impl_bodystructure_2 inst<block_start><assert_stmt>inst.description<eq>"7 cm maximum diameter"<assert_stmt>inst.id<eq>"tumor"<assert_stmt>(inst.identifier[0].system<eq>"http://goodhealth.org/bodystructure/identifiers")<assert_stmt>inst.identifier[0].value<eq>"12345"<assert_stmt>inst.image[0].contentType<eq>"application/dicom"<assert_stmt>inst.image[0].url<eq>("http://imaging.acme.com/wado/server?requestType=WADO&amp;wad"<concat>"o_details")<assert_stmt>inst.location.coding[0].code<eq>"78961009"<assert_stmt>inst.location.coding[0].display<eq>"Splenic structure (body structure)"<assert_stmt>inst.location.coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.location.text<eq>"Spleen"<assert_stmt>inst.meta.tag[0].code<eq>"HTEST"<assert_stmt>inst.meta.tag[0].display<eq>"test health data"<assert_stmt>(inst.meta.tag[0].system<eq>"http://terminology.hl7.org/CodeSystem/v3-ActReason")<assert_stmt>inst.morphology.coding[0].code<eq>"4147007"<assert_stmt>inst.morphology.coding[0].display<eq>"Mass (morphologic abnormality)"<assert_stmt>inst.morphology.coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.morphology.text<eq>"Splenic mass"<assert_stmt>inst.patient.reference<eq>"Patient/example"<assert_stmt>inst.text.status<eq>"generated"<block_end><def_stmt>test_bodystructure_2 base_settings<block_start>"""No. 2 tests collection for BodyStructure. Test File: bodystructure-example-tumor.json """<line_sep>filename=base_settings["unittest_data_dir"]/"bodystructure-example-tumor.json"<line_sep>inst=bodystructure.BodyStructure.parse_file(filename content_type="application/json" encoding="utf-8")<assert_stmt>"BodyStructure"<eq>inst.resource_type<line_sep>impl_bodystructure_2(inst)<line_sep># testing reverse by generating data from itself and create again. data=inst.dict()<assert_stmt>"BodyStructure"<eq>data["resourceType"]<line_sep>inst2=bodystructure.BodyStructure(**data)<line_sep>impl_bodystructure_2(inst2)<block_end><def_stmt>impl_bodystructure_3 inst<block_start><assert_stmt>inst.active<is><false><assert_stmt>inst.description<eq>"inner surface (volar) of the left forearm"<assert_stmt>inst.id<eq>"skin-patch"<assert_stmt>(inst.identifier[0].system<eq>"http://goodhealth.org/bodystructure/identifiers")<assert_stmt>inst.identifier[0].value<eq>"12345"<assert_stmt>inst.location.coding[0].code<eq>"14975008"<assert_stmt>inst.location.coding[0].display<eq>"Forearm"<assert_stmt>inst.location.coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.location.text<eq>"Forearm"<assert_stmt>inst.locationQualifier[0].coding[0].code<eq>"419161000"<assert_stmt>inst.locationQualifier[0].coding[0].display<eq>"Unilateral left"<assert_stmt>inst.locationQualifier[0].coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.locationQualifier[0].text<eq>"Left"<assert_stmt>inst.locationQualifier[1].coding[0].code<eq>"263929005"<assert_stmt>inst.locationQualifier[1].coding[0].display<eq>"Volar"<assert_stmt>inst.locationQualifier[1].coding[0].system<eq>"http://snomed.info/sct"<assert_stmt>inst.locationQualifier[1].text<eq>"Volar"<assert_stmt>inst.meta.tag[0].code<eq>"HTEST"<assert_stmt>inst.meta.tag[0].display<eq>"test health data"<assert_stmt>(inst.meta.tag[0].system<eq>"http://terminology.hl7.org/CodeSystem/v3-ActReason")<assert_stmt>inst.morphology.text<eq>"Skin patch"<assert_stmt>inst.patient.reference<eq>"Patient/example"<assert_stmt>inst.text.status<eq>"generated"<block_end><def_stmt>test_bodystructure_3 base_settings<block_start>"""No. 3 tests collection for BodyStructure. Test File: bodystructure-example-skin-patch.json """<line_sep>filename=(base_settings["unittest_data_dir"]/"bodystructure-example-skin-patch.json")<line_sep>inst=bodystructure.BodyStructure.parse_file(filename content_type="application/json" encoding="utf-8")<assert_stmt>"BodyStructure"<eq>inst.resource_type<line_sep>impl_bodystructure_3(inst)<line_sep># testing reverse by generating data from itself and create again. data=inst.dict()<assert_stmt>"BodyStructure"<eq>data["resourceType"]<line_sep>inst2=bodystructure.BodyStructure(**data)<line_sep>impl_bodystructure_3(inst2)<block_end>
# https://semgrep.live/DdG <if_stmt>3<eq>3<block_start>print('3!')<block_end>
<import_from_stmt>odoo fields models<class_stmt>DeliveryCarrier(models.Model)<block_start>_inherit='delivery.carrier'<line_sep>partner_id=fields.Many2one('res.partner' string='Transportadora')<block_end>
<import_stmt>copy<import_stmt>random<import_from_stmt>crosshair.core_and_libs proxy_for_type standalone_statespace<import_from_stmt>crosshair.libimpl.randomlib ExplicitRandom<def_stmt>test_ExplicitRandom <block_start>rng=ExplicitRandom([1 2])<assert_stmt>rng.randrange(0 10)<eq>1<assert_stmt>rng.choice(["a" "b" "c"])<eq>"c"<assert_stmt>rng.choice(["a" "b" "c"])<eq>"a"<assert_stmt>rng.random()<eq>0.0<assert_stmt>repr(rng)<eq>"crosshair.libimpl.randomlib.ExplicitRandom([1, 2, 0, 0.0])"<block_end><def_stmt>test_ExplicitRandom_copy <block_start>rng=ExplicitRandom([1 2])<line_sep>(rng2 )=copy.deepcopy([rng])<assert_stmt>rng.randint(0 5)<eq>1<assert_stmt>rng2.randint(0 5)<eq>1<assert_stmt>rng.randint(0 5)<eq>2<assert_stmt>rng2.randint(0 5)<eq>2<block_end><def_stmt>test_proxy_random <block_start><with_stmt>standalone_statespace<as>space<block_start>rng=proxy_for_type(random.Random "rng")<line_sep>i=rng.randrange(5 10)<assert_stmt>space.is_possible(i.var<eq>5)<assert_stmt>space.is_possible(i.var<eq>9)<assert_stmt><not>space.is_possible(i.var<eq>4)<block_end><block_end>
# Generated by Django 2.2 on 2019-05-02 17:34 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('openbook_posts' '0030_post_is_closed') ]<line_sep>operations=[migrations.AddField(model_name='postcomment' name='parent_comment' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='replies' to='openbook_posts.PostComment') ) ]<block_end>
<import_stmt>magma<as>m<import_stmt>magma.testing<def_stmt>test_2d_array_from_verilog <block_start>main=m.define_from_verilog(f""" module transpose_buffer ( input logic clk, output logic [2:0] index_inner, output logic [2:0] index_outer, input logic [3:0] input_data [63:0], input logic [2:0] range_inner, input logic [2:0] range_outer, input logic rst_n, input logic [2:0] stride ); always_ff @(posedge clk, negedge rst_n) begin if (~rst_n) begin index_outer <= 3'h0; index_inner <= 3'h0; end else begin if (index_outer == (range_outer - 3'h1)) begin index_outer <= 3'h0; end else index_outer <= index_outer + 3'h1; if (index_inner == (range_inner - 3'h1)) begin index_inner <= 3'h0; end else index_inner <= index_inner + 3'h1; end end endmodule // transpose_buffer """)[0]<line_sep>m.compile("build/2d_array_from_verilog" main output="verilog")<assert_stmt>m.testing.check_files_equal(__file__ f"build/2d_array_from_verilog.v" f"gold/2d_array_from_verilog.v")<block_end>
# MVT设计模式,model-view-template # model负责和数据库交互来获取model数据 # view相当于MVC中的Controller负责处理网络请求http response # template相当于MVC中的View负责封装html,css,js等内置模板引擎 # 具体流程:客户端发出网页请求 --> View接受网络请求 --> 找mdel去数据库找数据 -->找回的model数据返回给view # --> view可以直接返回无修饰的model原始数据给客户端 --> 或者找template去美化数据,添加css,html等,动态生成一个html文件返回给View # --> View将动态生成的html返回给客户端, MVT中的View充当中间人,两头链接M和T # django安装的时候会默认安装在一个公共的路径下,这样开发不同项目的时候,可能会用到不同版本的django,因为安装在公共陆空 # 这样就会版本覆盖,其他项目可能会产生版本不兼容的异常 # 所以安装django的时候会搭建虚拟环境,一个项目对应一个环境 """ django的配置,直接使用pycharm专业版,在设置中解释器中使用pip安装django 1- 安装成功后,整体的使用类似于angular的使用方法,关键字django-admin 2- cd到对应的目录下,django-admin startproject 项目名称 _init_.py --> 项目初始化文件,表示该项目可以被当作一个package引入 settings.py --> 项目的整体配置文件,比如在这里关联Book这个app urls.py --> 项目的url配置文件 wsgi.py --> 项目和WSGI兼容的Web服务器入口 manage.py --> 项目运行的入口,指定配置文件路径,里面包含main函数 3- cd到项目名称下面才可以: python manage.py startapp 应用名称 (创建应用,类似于angular中的模块?) init.py --> 应用初始化文件,表示该项目可以被当作一个package引入 admin.py --> 后台的站点管理注册文件 apps.py --> 当前app的基本信息 models.py --> 数据模型,里面存放各种bean tests.py --> 单元测试 views.py --> 处理业务逻辑,MVT中的中间人 migrations --> 模型model迁移的,将model类制作成数据库中的表 4- 配置刚刚创建的app,在项目的settings.py中的installed_apps中添加当前app,进行组装 """<line_sep>""" 站点管理: 1- settings.py中设置语言和时区 2- python manage.py createsuperuser 创建管理员 3- 启动服务,到 http://127.0.0.1:8000/admin进行登陆 4- 在admin.py中注册自己的数据models用来在后台显示 """<line_sep>""" ORM: object-relation-mapping 对象关系映射 优点:面向对象编程,不再是面向数据库写代码 实现了数据模型和数据库的解耦,不在关注用的是oracle,mysql还是其他数据库 缺点: object需要花费一点时间转换为sql语句,有性能损失(不过可忽略不计) """<line_sep>
# -*- coding: utf-8 -*- # @Time : 2017/7/27 13:47 # @Author : play4fun # @File : HoughCircles_camera.py # @Software: PyCharm """ HoughCircles_camera.py: 用围棋-棋子来测试 """<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>skimage.measure compare_mse<as>mse<import_stmt>string random<def_stmt>id_generator size=6 chars=string.ascii_uppercase+string.digits<block_start><return>''.join(random.choice(chars)<for>_ range(size))<block_end>cap=cv2.VideoCapture(0)<line_sep># ret = cap.set(3, 640) # ret = cap.set(4, 480) # margin = 60 margin=30<def_stmt>draw_line_rectangle frame margin<block_start>rows,cols,ch=frame.shape# (720, 1280, 3) half=int(cols/2)<line_sep># 中间 cv2.line(frame (half 0) (half rows) (0 0 255) 2)<line_sep># margin = 40 # 左边 up_left1=(margin margin)# 左上点 down_right1=(cols-margin rows-margin)# 右下点 # print(up_left, down_right) cv2.rectangle(frame up_left1 down_right1 (0 255 0) 3)<block_end>ret,temp=cap.read()<line_sep>tm=0<while_stmt>cap.isOpened()<block_start>key=cv2.waitKey(1)<if_stmt>key<eq>ord("q")<block_start><break><block_end><if_stmt>key<eq>ord('s')<block_start>cv2.imwrite(id_generator()+'.jpg' frame2)<block_end># Capture frame-by-frame ret,frame=cap.read()<line_sep>m=mse(cv2.cvtColor(temp cv2.COLOR_BGR2GRAY) cv2.cvtColor(frame cv2.COLOR_BGR2GRAY))<line_sep>print('mse' m '----\n')<if_stmt>abs(m-tm)<l>2# 静止画面,不用重复计算 <block_start><continue><block_end><else_stmt><block_start>temp=frame.copy()<line_sep>tm=m<block_end># # print(margin,frame.shape[0] - margin, margin,frame.shape[1] - margin)#40 680 40 1240 frame2=frame[margin:frame.shape[0]-margin margin:frame.shape[1]-margin]# .copy() # cv2.imshow('frame2', frame2) gray=cv2.cvtColor(frame2 cv2.COLOR_BGR2GRAY)<line_sep># edges = cv2.Canny(gray, 50, 150, apertureSize=3) # HoughCircles(image, method, dp, minDist, circles=None, param1=None, param2=None, minRadius=None, maxRadius=None) # circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0) circles=cv2.HoughCircles(gray cv2.HOUGH_GRADIENT 1 20 param1=100 param2=30 minRadius=10 maxRadius=40)<line_sep># circles = circles1[0, :, :] # 提取为二维 # circles = np.uint16(np.around(circles1)) print(circles)<line_sep>cimg=frame2<if_stmt>circles<is><not><none><block_start><for_stmt>i circles[0 :]# for i in circles[:]: # draw the outer circle <block_start>cv2.circle(cimg (i[0] i[1]) i[2] (0 255 0) 2)<line_sep># draw the center of the circle cv2.circle(cimg (i[0] i[1]) 2 (0 0 255) 3)<block_end><block_end># cv2.imshow('detected circles', cimg) draw_line_rectangle(frame margin)<line_sep>cv2.imshow("houghlines" frame)<line_sep># cv2.imwrite('frame3.jpg', frame[margin:frame.shape[0] - margin, margin:frame.shape[1] - margin]) <block_end># When everything done, release the capture cap.release()<line_sep>cv2.destroyAllWindows()<line_sep>
r"""Learned Perceptual Image Patch Similarity (LPIPS) This module implements the LPIPS in PyTorch. Credits: Inspired by [lpips-pytorch](https://github.com/S-aiueo32/lpips-pytorch) References: [1] The Unreasonable Effectiveness of Deep Features as a Perceptual Metric (Zhang et al., 2018) https://arxiv.org/abs/1801.03924 """<import_stmt>inspect<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision.models<as>models<import_stmt>torch.hub<as>hub<import_from_stmt>piqa.utils _jit _assert_type _reduce<import_from_stmt>typing Dict List<line_sep>_SHIFT=torch.tensor([0.485 0.456 0.406])<line_sep>_SCALE=torch.tensor([0.229 0.224 0.225])<line_sep>_WEIGHTS_URL=('https://github.com/richzhang/PerceptualSimilarity'<concat>'/raw/master/lpips/weights/{}/{}.pth')<def_stmt>get_weights network:str='alex' version:str='v0.1' <arrow>Dict[str torch.Tensor]<block_start>r"""Returns the official LPIPS weights for `network`. Args: network: Specifies the perception network that is used: `'alex'` | `'squeeze'` | `'vgg'`. version: Specifies the official version release: `'v0.0'` | `'v0.1'`. Example: >>> w = get_weights(network='alex') >>> w.keys() dict_keys(['0.1.weight', '1.1.weight', '2.1.weight', '3.1.weight', '4.1.weight']) """<line_sep># Load from URL weights=hub.load_state_dict_from_url(_WEIGHTS_URL.format(version network) map_location='cpu' )<line_sep># Format keys weights={k.replace('lin' '').replace('.model' ''):v<for>(k v) weights.items()}<line_sep><return>weights<block_end><class_stmt>Intermediary(nn.Module)<block_start>r"""Module that catches and returns the outputs of indermediate target layers of a sequential module during its forward pass. Args: layers: A sequential module. targets: A list of target layer indexes. """<def_stmt>__init__ self layers:nn.Sequential targets:List[int]<block_start>r""""""<line_sep>super().__init__()<line_sep>self.layers=nn.ModuleList()<line_sep>j=0<line_sep>seq:List[nn.Module]=[]<for_stmt>i,layer enumerate(layers)<block_start>seq.append(layer)<if_stmt>i<eq>targets[j]<block_start>self.layers.append(nn.Sequential(*seq))<line_sep>seq.clear()<line_sep>j<augadd>1<if_stmt>j<eq>len(targets)<block_start><break><block_end><block_end><block_end><block_end><def_stmt>forward self input:torch.Tensor<arrow>List[torch.Tensor]<block_start>r"""Defines the computation performed at every call. """<line_sep>output=[]<for_stmt>layer self.layers<block_start>input=layer(input)<line_sep>output.append(input)<block_end><return>output<block_end><block_end><class_stmt>LPIPS(nn.Module)<block_start>r"""Creates a criterion that measures the LPIPS between an input \(x\) and a target \(y\). $$ \text{LPIPS}(x, y) = \sum_{l \, \in \, \mathcal{F}} w_l \cdot \text{MSE}(\hat{\phi}_l(x), \hat{\phi}_l(y)) $$ where \(\hat{\phi}_l\) represents the normalized output of an intermediate layer \(l\) in a perception network \(\mathcal{F}\). Args: network: Specifies the perception network \(\mathcal{F}\) to use: `'alex'` | `'squeeze'` | `'vgg'`. scaling: Whether the input and target need to be scaled w.r.t. ImageNet. dropout: Whether dropout is used or not. pretrained: Whether the official weights \(w_l\) are used or not. eval: Whether to initialize the object in evaluation mode or not. reduction: Specifies the reduction to apply to the output: `'none'` | `'mean'` | `'sum'`. Shapes: * Input: \((N, 3, H, W)\) * Target: \((N, 3, H, W)\) * Output: \((N,)\) or \(()\) depending on `reduction` Note: `LPIPS` is a *trainable* metric. Example: >>> criterion = LPIPS().cuda() >>> x = torch.rand(5, 3, 256, 256, requires_grad=True).cuda() >>> y = torch.rand(5, 3, 256, 256).cuda() >>> l = criterion(x, y) >>> l.size() torch.Size([]) >>> l.backward() """<def_stmt>__init__ self network:str='alex' scaling:bool=<true> dropout:bool=<false> pretrained:bool=<true> eval:bool=<true> reduction:str='mean' <block_start>r""""""<line_sep>super().__init__()<line_sep># ImageNet scaling self.scaling=scaling<line_sep>self.register_buffer('shift' _SHIFT.view(1 -1 1 1))<line_sep>self.register_buffer('scale' _SCALE.view(1 -1 1 1))<line_sep># Perception layers <if_stmt>network<eq>'alex'# AlexNet <block_start>layers=models.alexnet(pretrained=<true>).features<line_sep>targets=[1 4 7 9 11]<line_sep>channels=[64 192 384 256 256]<block_end><elif_stmt>network<eq>'squeeze'# SqueezeNet <block_start>layers=models.squeezenet1_1(pretrained=<true>).features<line_sep>targets=[1 4 7 9 10 11 12]<line_sep>channels=[64 128 256 384 384 512 512]<block_end><elif_stmt>network<eq>'vgg'# VGG16 <block_start>layers=models.vgg16(pretrained=<true>).features<line_sep>targets=[3 8 15 22 29]<line_sep>channels=[64 128 256 512 512]<block_end><else_stmt><block_start><raise>ValueError(f'Unknown network architecture {network}')<block_end>self.net=Intermediary(layers targets)<for_stmt>p self.net.parameters()<block_start>p.requires_grad=<false><block_end># Linear comparators self.lins=nn.ModuleList([nn.Sequential(nn.Dropout(inplace=<true>)<if>dropout<else>nn.Identity() nn.Conv2d(c 1 kernel_size=1 bias=<false>) )<for>c channels])<if_stmt>pretrained<block_start>self.lins.load_state_dict(get_weights(network=network))<block_end><if_stmt>eval<block_start>self.eval()<block_end>self.reduction=reduction<block_end><def_stmt>forward self input:torch.Tensor target:torch.Tensor <arrow>torch.Tensor<block_start>r"""Defines the computation performed at every call. """<line_sep>_assert_type([input target] device=self.shift.device dim_range=(4 4) n_channels=3 value_range=(0. 1.)<if>self.scaling<else>(0. -1.) )<line_sep># ImageNet scaling <if_stmt>self.scaling<block_start>input=(input-self.shift)/self.scale<line_sep>target=(target-self.shift)/self.scale<block_end># LPIPS residuals=[]<for_stmt>lin,fx,fy zip(self.lins self.net(input) self.net(target))<block_start>fx=fx/torch.linalg.norm(fx dim=1 keepdim=<true>)<line_sep>fy=fy/torch.linalg.norm(fy dim=1 keepdim=<true>)<line_sep>mse=((fx-fy)<power>2).mean(dim=(-1 -2) keepdim=<true>)<line_sep>residuals.append(lin(mse).flatten())<block_end>l=torch.stack(residuals dim=-1).sum(dim=-1)<line_sep><return>_reduce(l self.reduction)<block_end><block_end>
<import_from_future_stmt> annotations<import_from_stmt>typing Any<import_stmt>numpy<as>np<line_sep>AR_i8:np.ndarray[Any np.dtype[np.int_]]=np.arange(10)<line_sep>ar_iter=np.lib.Arrayterator(AR_i8)<line_sep>ar_iter.var<line_sep>ar_iter.buf_size<line_sep>ar_iter.start<line_sep>ar_iter.stop<line_sep>ar_iter.step<line_sep>ar_iter.shape<line_sep>ar_iter.flat<line_sep>ar_iter.__array__()<for_stmt>i ar_iter<block_start><pass><block_end>ar_iter[0]<line_sep>ar_iter[<ellipsis>]<line_sep>ar_iter[:]<line_sep>ar_iter[0 0 0]<line_sep>ar_iter[<ellipsis> 0 :]<line_sep>
""" This module holds tests for compatibility with other py.test plugins. Created on Apr 15, 2014 @author: pupssman """<import_from_stmt>hamcrest assert_that contains has_property<def_stmt>test_maxfail report_for<block_start>""" Check that maxfail generates proper report """<line_sep>report=report_for(""" def test_a(): assert False def test_b(): assert True """ extra_run_args=['-x'])<line_sep>assert_that(report.findall('.//test-case') contains(has_property('name' 'test_a')))<block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Truncated Newton (TNC) optimizer. """<import_from_stmt>typing Optional<import_from_stmt>.scipy_optimizer SciPyOptimizer<class_stmt>TNC(SciPyOptimizer)<block_start>""" Truncated Newton (TNC) optimizer. TNC uses a truncated Newton algorithm to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the :class:`CG` method as it wraps a C implementation and allows each variable to be given upper and lower bounds. Uses scipy.optimize.minimize TNC For further detail, please refer to See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html """<line_sep>_OPTIONS=["maxiter" "disp" "accuracy" "ftol" "xtol" "gtol" "eps"]<line_sep># pylint: disable=unused-argument <def_stmt>__init__ self maxiter:int=100 disp:bool=<false> accuracy:float=0 ftol:float=-1 xtol:float=-1 gtol:float=-1 tol:Optional[float]=<none> eps:float=1e-08 options:Optional[dict]=<none> max_evals_grouped:int=1 **kwargs <arrow><none><block_start>""" Args: maxiter: Maximum number of function evaluation. disp: Set to True to print convergence messages. accuracy: Relative precision for finite difference calculations. If <= machine_precision, set to sqrt(machine_precision). Defaults to 0. ftol: Precision goal for the value of f in the stopping criterion. If ftol < 0.0, ftol is set to 0.0 defaults to -1. xtol: Precision goal for the value of x in the stopping criterion (after applying x scaling factors). If xtol < 0.0, xtol is set to sqrt(machine_precision). Defaults to -1. gtol: Precision goal for the value of the projected gradient in the stopping criterion (after applying x scaling factors). If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy). Setting it to 0.0 is not recommended. Defaults to -1. tol: Tolerance for termination. eps: Step size used for numerical approximation of the Jacobian. options: A dictionary of solver options. max_evals_grouped: Max number of default gradient evaluations performed simultaneously. kwargs: additional kwargs for scipy.optimize.minimize. """<if_stmt>options<is><none><block_start>options={}<block_end><for_stmt>k,v list(locals().items())<block_start><if_stmt>k<in>self._OPTIONS<block_start>options[k]=v<block_end><block_end>super().__init__("TNC" options=options tol=tol max_evals_grouped=max_evals_grouped **kwargs )<block_end><block_end>
<import_stmt>torch<import_stmt>numpy<as>np<import_stmt>torch.nn<as>nn<import_stmt>torch.distributed<as>dist<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch Tensor<import_from_stmt>typing Any<import_from_stmt>typing Dict<import_from_stmt>typing Tuple<import_from_stmt>typing Optional<import_from_stmt>cftool.misc update_dict<import_from_stmt>cftool.misc shallow_copy_dict<import_from_stmt>torch.nn.parallel DistributedDataParallel<as>DDP<import_from_stmt>..encoder Encoder1DBase<import_from_stmt>....data CVLoader<import_from_stmt>....types tensor_dict_type<import_from_stmt>....protocol StepOutputs<import_from_stmt>....protocol TrainerState<import_from_stmt>....protocol MetricsOutputs<import_from_stmt>....protocol ModelWithCustomSteps<import_from_stmt>....constants LOSS_KEY<import_from_stmt>....constants INPUT_KEY<import_from_stmt>....constants LATENT_KEY<import_from_stmt>....misc.toolkit to_device<import_from_stmt>....misc.toolkit l2_normalize<import_from_stmt>....misc.toolkit get_world_size<import_from_stmt>....misc.toolkit has_batch_norms<def_stmt>_get_dino_defaults name:str<arrow>Dict[str Any]<block_start><if_stmt>name<eq>"vit"<block_start><return>{"patch_size":16 "drop_path_rate":0.1}<block_end><return>{}<block_end><class_stmt>Scheduler<block_start><def_stmt>__init__ self values:np.ndarray<block_start>self.values=values<line_sep>self.max_idx=len(values)-1<block_end><def_stmt>__getitem__ self index:int<arrow>Any<block_start><return>self.values[min(index self.max_idx)]<block_end><block_end><def_stmt>cosine_scheduler base_value:float final_value:float epochs:int num_step_per_epoch:int warmup_epochs:int=0 start_warmup_value:int=0 <arrow>Scheduler<block_start>warmup_schedule=np.array([])<line_sep>warmup_iters=warmup_epochs<times>num_step_per_epoch<if_stmt>warmup_epochs<g>0<block_start>warmup_schedule=np.linspace(start_warmup_value base_value warmup_iters)<block_end>iters=np.arange(epochs<times>num_step_per_epoch-warmup_iters)<line_sep>diff=base_value-final_value<line_sep>schedule=final_value+0.5<times>diff<times>(1.0+np.cos(np.pi<times>iters/len(iters)))<line_sep>schedule=np.concatenate((warmup_schedule schedule))<assert_stmt>len(schedule)<eq>epochs<times>num_step_per_epoch<line_sep><return>Scheduler(schedule)<block_end><class_stmt>MultiCropWrapper(nn.Module)<block_start><def_stmt>__init__ self backbone:nn.Module head:nn.Module<block_start>super().__init__()<line_sep>backbone.fc,backbone.head=nn.Identity() nn.Identity()<line_sep>self.backbone=backbone<line_sep>self.head=head<block_end><def_stmt>forward self batch_idx:int batch:tensor_dict_type state:Optional[TrainerState]=<none> * img_end_idx:Optional[int]=<none> **kwargs:Any <arrow>Tensor<block_start>img_crops=batch[INPUT_KEY]<if_stmt><not>isinstance(img_crops list)<block_start>img_crops=batch[INPUT_KEY]=[img_crops]<block_end><if_stmt>img_end_idx<is><not><none><block_start>img_crops=img_crops[:img_end_idx]<block_end>idx_crops=torch.cumsum(torch.unique_consecutive(torch.tensor([img_crop.shape[-1]<for>img_crop img_crops]) return_counts=<true> )[1] 0 )<line_sep>outputs=[]<line_sep>start_idx=0<for_stmt>end_idx idx_crops<block_start>local_batch=shallow_copy_dict(batch)<line_sep>local_batch[INPUT_KEY]=torch.cat(img_crops[start_idx:end_idx])<line_sep>idx_rs=self.backbone(batch_idx local_batch state **kwargs)<line_sep>idx_out=idx_rs[LATENT_KEY]<if_stmt>isinstance(idx_out tuple)<block_start>idx_out=idx_out[0]<block_end>outputs.append(idx_out)<line_sep>start_idx=end_idx<block_end><return>self.head(torch.cat(outputs))<block_end><block_end><class_stmt>DINOHead(nn.Module)<block_start><def_stmt>__init__ self in_dim:int out_dim:int batch_norm:bool=<false> norm_last_layer:bool=<true> * num_layers:int=3 latent_dim:int=2048 bottleneck_dim:int=256 <block_start>super().__init__()<line_sep>num_layers=max(num_layers 1)<if_stmt>num_layers<eq>1<block_start>self.mapping=nn.Linear(in_dim bottleneck_dim)<block_end><else_stmt><block_start>blocks=[nn.Linear(in_dim latent_dim)]<if_stmt>batch_norm<block_start>blocks.append(nn.BatchNorm1d(latent_dim))<block_end>blocks.append(nn.GELU())<for_stmt>_ range(num_layers-2)<block_start>blocks.append(nn.Linear(latent_dim latent_dim))<if_stmt>batch_norm<block_start>blocks.append(nn.BatchNorm1d(latent_dim))<block_end>blocks.append(nn.GELU())<block_end>blocks.append(nn.Linear(latent_dim bottleneck_dim))<line_sep>self.mapping=nn.Sequential(*blocks)<block_end>self.apply(self._init_weights)<line_sep>last=nn.Linear(bottleneck_dim out_dim bias=<false>)<line_sep>self.last_layer=nn.utils.weight_norm(last)<line_sep>self.last_layer.weight_g.data.fill_(1)<if_stmt>norm_last_layer<block_start>self.last_layer.weight_g.requires_grad=<false><block_end><block_end><def_stmt>_init_weights self m:nn.Module<arrow><none><block_start><if_stmt>isinstance(m nn.Linear)<block_start>nn.init.trunc_normal_(m.weight std=0.02)<if_stmt>isinstance(m nn.Linear)<and>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><def_stmt>forward self net:Tensor<arrow>Tensor<block_start>net=self.mapping(net)<line_sep>net=nn.functional.normalize(net dim=-1 p=2)<line_sep>net=self.last_layer(net)<line_sep><return>net<block_end><block_end><class_stmt>DINOLoss(nn.Module)<block_start>center:torch.Tensor<def_stmt>__init__ self out_dim:int teacher_temp:float warmup_teacher_temp:float warmup_teacher_temp_epochs:int teacher_temp_epochs:int * student_temp:float=0.1 center_momentum:float=0.9 <block_start>super().__init__()<line_sep>self.student_temp=student_temp<line_sep>self.center_momentum=center_momentum<line_sep>self.register_buffer("center" torch.zeros(1 out_dim))<line_sep>teacher_temp_constant_epochs=teacher_temp_epochs-warmup_teacher_temp_epochs<line_sep>self.teacher_temp_schedule=Scheduler(np.concatenate((np.linspace(warmup_teacher_temp teacher_temp warmup_teacher_temp_epochs ) np.ones(teacher_temp_constant_epochs)<times>teacher_temp )))<line_sep>self.num_epochs=teacher_temp_epochs<block_end><def_stmt>forward self epoch:int num_crops:int student_output:Tensor teacher_output:Tensor <arrow>Tensor<block_start>student_logits=student_output/self.student_temp<line_sep>student_logits_list=student_logits.chunk(num_crops)<line_sep>temp=self.teacher_temp_schedule[epoch]<line_sep>teacher_logits=F.softmax((teacher_output-self.center)/temp dim=-1)<line_sep>teacher_logits_list=teacher_logits.detach().chunk(2)<line_sep>total_loss=0.0<line_sep>num_loss_terms=0<for_stmt>it,t_logit enumerate(teacher_logits_list)<block_start><for_stmt>iv,v_logit enumerate(student_logits_list)<block_start><if_stmt>iv<eq>it<block_start><continue><block_end>loss=torch.sum(-t_logit<times>F.log_softmax(v_logit dim=-1) dim=-1)<line_sep>total_loss<augadd>loss.mean()<line_sep>num_loss_terms<augadd>1<block_end><block_end>total_loss<augdiv>num_loss_terms<line_sep>self.update_center(teacher_output)<line_sep><return>total_loss<block_end>@torch.no_grad()<def_stmt>update_center self teacher_output:Tensor<arrow><none><block_start>batch_center=torch.sum(teacher_output dim=0 keepdim=<true>)<if_stmt>dist.is_initialized()<block_start>dist.all_reduce(batch_center)<block_end>batch_center=batch_center/(len(teacher_output)<times>get_world_size())<line_sep>m=self.center_momentum<line_sep>self.center=self.center<times>m+batch_center<times>(1.0-m)<block_end><block_end><class_stmt>DINOEvaluateLoss<block_start><def_stmt>__init__ self train_loss:DINOLoss<block_start>self.train_loss=train_loss<block_end><def_stmt>__call__ self epoch:int student_output:Tensor teacher_output:Tensor <arrow>float<block_start>s_logits=student_output/self.train_loss.student_temp<line_sep>temp=self.train_loss.teacher_temp_schedule[epoch]<line_sep>centered=teacher_output-self.train_loss.center<line_sep>t_logits=F.softmax(centered/temp dim=-1)<line_sep>loss=torch.sum(-t_logits<times>F.log_softmax(s_logits dim=-1) dim=-1).mean()<line_sep><return>loss.item()<block_end><block_end>@ModelWithCustomSteps.register("dino")<class_stmt>DINO(ModelWithCustomSteps)<block_start>custom_params_groups=<true><line_sep>custom_ddp_initialization=<true><line_sep>lr_schedule:Optional[Scheduler]<line_sep>wd_schedule:Optional[Scheduler]<line_sep>momentum_schedule:Optional[Scheduler]<def_stmt>__init__ self encoder1d:str="vit" encoder1d_config:Optional[Dict[str Any]]=<none> student_specific:Optional[Dict[str Any]]=<none> teacher_specific:Optional[Dict[str Any]]=<none> * out_dim:int=65536 use_bn_in_head:bool=<false> norm_last_layer:bool=<true> teacher_temp:float=0.07 momentum_teacher:float=0.996 warmup_teacher_temp:float=0.04 warmup_teacher_temp_epochs:int=30 teacher_temp_epochs:int freeze_last_layer:int=1 weight_decay:float=0.04 weight_decay_end:float=0.4 warmup_epochs:int=10 <block_start>super().__init__()<line_sep>base=update_dict(encoder1d_config<or>{} _get_dino_defaults(encoder1d))<line_sep>student_cfg=update_dict(student_specific<or>{} shallow_copy_dict(base))<line_sep>teacher_cfg=update_dict(teacher_specific<or>{} shallow_copy_dict(base))<line_sep>student=Encoder1DBase.make(encoder1d student_cfg)<line_sep>teacher=Encoder1DBase.make(encoder1d teacher_cfg)<line_sep>self.ddp_student=self.ddp_teacher=<none><line_sep>self.student=MultiCropWrapper(student DINOHead(student.latent_dim out_dim use_bn_in_head norm_last_layer ) )<line_sep>self.teacher=MultiCropWrapper(teacher DINOHead(teacher.latent_dim out_dim use_bn_in_head) )<line_sep>self.freeze_last_layer=freeze_last_layer<line_sep>self.teacher.load_state_dict(self.student.state_dict())<line_sep>self.loss=DINOLoss(out_dim teacher_temp warmup_teacher_temp warmup_teacher_temp_epochs teacher_temp_epochs )<line_sep>self.evaluate_loss=DINOEvaluateLoss(self.loss)<line_sep>self.momentum_teacher=momentum_teacher<line_sep>self.teacher_temp_epochs=teacher_temp_epochs<line_sep>self.weight_decay=weight_decay<line_sep>self.weight_decay_end=weight_decay_end<line_sep>self.warmup_epochs=warmup_epochs<line_sep>self.lr_schedule=<none><line_sep>self.wd_schedule=<none><line_sep>self.momentum_schedule=<none><block_end>@property<def_stmt>student_for_training self<arrow>MultiCropWrapper<block_start><return>self.ddp_student<or>self.student<block_end>@property<def_stmt>teacher_for_training self<arrow>MultiCropWrapper<block_start><return>self.ddp_teacher<or>self.teacher<block_end><def_stmt>forward self batch_idx:int batch:tensor_dict_type state:Optional[TrainerState]=<none> **kwargs:Any <arrow>tensor_dict_type<block_start>net=self.student.backbone(batch_idx batch state **kwargs)[LATENT_KEY]<line_sep>net=l2_normalize(net)<line_sep><return>{LATENT_KEY:net}<block_end><def_stmt>onnx_forward self batch:tensor_dict_type<arrow>Any<block_start>inp=batch[INPUT_KEY]<line_sep>net=self.get_latent(inp determinate=<true>)<line_sep><return>net.view(inp.shape[0] self.student.backbone.latent_dim)<block_end><def_stmt>get_latent self net:Tensor **kwargs:Any<arrow>Tensor<block_start><return>self.forward(0 {INPUT_KEY:net} **kwargs)[LATENT_KEY]<block_end><def_stmt>get_logits self net:Tensor<arrow>Tensor<block_start><return>self.student(0 {INPUT_KEY:net})<block_end><def_stmt>state_dict self destination:Any=<none> prefix:str="" keep_vars:bool=<false> <arrow>Any<block_start>states=super().state_dict(destination prefix keep_vars)<for_stmt>k list(states.keys())<block_start><if_stmt>k.startswith("ddp")<block_start>states.pop(k)<block_end><block_end><return>states<block_end><def_stmt>summary_forward self batch_idx:int batch:tensor_dict_type<arrow><none><block_start>self.student(batch_idx to_device(batch self.device))<block_end><def_stmt>_get_outputs self batch_idx:int batch:tensor_dict_type trainer:Any forward_kwargs:Dict[str Any] <arrow>tensor_dict_type<block_start>teacher_output=self.teacher_for_training(batch_idx batch trainer.state img_end_idx=2 **forward_kwargs )<line_sep>student_output=self.student_for_training(batch_idx batch trainer.state **forward_kwargs )<line_sep><return>{"student":student_output "teacher":teacher_output}<block_end><def_stmt>_get_loss self batch_idx:int batch:tensor_dict_type trainer:Any forward_kwargs:Dict[str Any] <arrow>Tuple[tensor_dict_type Tensor]<block_start><with_stmt>torch.cuda.amp.autocast(enabled=trainer.use_amp)<block_start>outputs=self._get_outputs(batch_idx batch trainer forward_kwargs)<line_sep>epoch=trainer.state.epoch<line_sep>num_crops=len(batch[INPUT_KEY])<line_sep>student_output=outputs["student"]<line_sep>teacher_output=outputs["teacher"]<line_sep>loss=self.loss(epoch num_crops student_output teacher_output)<block_end><return>outputs loss<block_end><def_stmt>train_step self batch_idx:int batch:tensor_dict_type trainer:Any forward_kwargs:Dict[str Any] loss_kwargs:Dict[str Any] <arrow>StepOutputs<block_start>state=trainer.state<if_stmt>self.lr_schedule<is><none><block_start>self.lr_schedule=cosine_scheduler(self.lr<times>(len(batch[INPUT_KEY][0])<times>get_world_size())/256.0 # type: ignore self.min_lr self.teacher_temp_epochs state.num_step_per_epoch warmup_epochs=self.warmup_epochs )<block_end><if_stmt>self.wd_schedule<is><none><block_start>self.wd_schedule=cosine_scheduler(self.weight_decay self.weight_decay_end self.teacher_temp_epochs state.num_step_per_epoch )<block_end># manual scheduling optimizer=trainer.optimizers["all"]<for_stmt>i,param_group enumerate(optimizer.param_groups)<block_start>param_group["lr"]=self.lr_schedule[state.step]<if_stmt>i<eq>0<block_start>param_group["weight_decay"]=self.wd_schedule[state.step]<block_end><block_end># forward pass rs,loss=self._get_loss(batch_idx batch trainer forward_kwargs)<line_sep># backward pass optimizer.zero_grad()<line_sep>trainer.grad_scaler.scale(loss).backward()<line_sep># clip norm <if_stmt>trainer.clip_norm<g>0.0<block_start>trainer.grad_scaler.unscale_(optimizer)<line_sep>nn.utils.clip_grad_norm_(self.student_for_training.parameters() max_norm=trainer.clip_norm )<block_end># freeze last layer <if_stmt>state.epoch<le>self.freeze_last_layer<block_start><for_stmt>n,p self.student.named_parameters()<block_start><if_stmt>"last_layer"<in>n<block_start>p.grad=<none><block_end><block_end><block_end># update parameters trainer.grad_scaler.step(optimizer)<line_sep>trainer.grad_scaler.update()<line_sep># update momentum teacher <if_stmt>self.momentum_schedule<is><none><block_start>self.momentum_schedule=cosine_scheduler(self.momentum_teacher 1.0 self.teacher_temp_epochs state.num_step_per_epoch )<block_end><with_stmt>torch.no_grad()<block_start>m=self.momentum_schedule[state.step]<for_stmt>param_q,param_k zip(self.student.parameters() self.teacher.parameters() )<block_start>param_k.data.mul_(m).add_((1.0-m)<times>param_q.detach().data)<block_end><block_end># return <return>StepOutputs(rs {LOSS_KEY:loss.item()})<block_end><def_stmt>evaluate_step # type: ignore self loader:CVLoader portion:float trainer:Any <arrow>MetricsOutputs<block_start>losses=[]<for_stmt>i,batch enumerate(loader)<block_start><if_stmt>i/len(loader)<ge>portion<block_start><break><block_end>batch=to_device(batch self.device)<line_sep>outputs=self._get_outputs(i batch trainer {})<line_sep>losses.append(self.evaluate_loss(trainer.state.epoch outputs["student"] outputs["teacher"] ))<block_end># gather mean_loss=sum(losses)/len(losses)<line_sep><return>MetricsOutputs(-mean_loss {"loss":mean_loss "lr":self.lr_schedule[trainer.state.step] # type: ignore "wd":self.wd_schedule[trainer.state.step] # type: ignore } )<block_end>@staticmethod<def_stmt>params_groups m:nn.Module<arrow>Any<block_start>regularized=[]<line_sep>bias_and_norm=[]<for_stmt>name,param m.named_parameters()<block_start><if_stmt><not>param.requires_grad<block_start><continue><block_end><if_stmt>name.endswith(".bias")<or>len(param.shape)<eq>1<block_start>bias_and_norm.append(param)<block_end><else_stmt><block_start>regularized.append(param)<block_end><block_end><return>[{"params":regularized} {"params":bias_and_norm "weight_decay":0.0}]<block_end><def_stmt>_init_with_trainer self trainer:Any<arrow><none><block_start>self.teacher_for_training.requires_grad_(<false>)<block_end><def_stmt>init_ddp self trainer:Any<arrow><none><block_start><if_stmt>has_batch_norms(self.student)<block_start>self.student=nn.SyncBatchNorm.convert_sync_batchnorm(self.student)<line_sep>self.teacher=nn.SyncBatchNorm.convert_sync_batchnorm(self.teacher)<block_end>self.ddp_student=DDP(self.student device_ids=[trainer.rank])<line_sep>self.ddp_teacher=DDP(self.teacher device_ids=[trainer.rank])<line_sep>self.ddp_teacher.requires_grad_(<false>)<block_end># type: ignore <def_stmt>permute_trainer_config self trainer_config:Dict[str Any]<arrow><none># TODO : make `permute_trainer_config` more general <block_start><if_stmt>trainer_config["clip_norm"]<eq>0.0<block_start>trainer_config["clip_norm"]=3.0<block_end><if_stmt>trainer_config["lr"]<is><none><block_start>trainer_config["lr"]=0.0005<block_end>self.lr=trainer_config["lr"]<line_sep>self.min_lr=trainer_config.pop("min_lr" 1.0e-6)<if_stmt>trainer_config["optimizer_name"]<is><none><block_start>trainer_config["optimizer_name"]="adamw"<block_end>trainer_config["scheduler_name"]="none"<block_end><block_end>__all__=["DINO" ]<line_sep>
<import_from_future_stmt> absolute_import<line_sep># external modules <import_from_stmt>past.builtins basestring<import_stmt>numpy<as>num<line_sep># ANUGA modules <import_stmt>anuga.utilities.log<as>log<import_from_stmt>anuga.config netcdf_mode_r netcdf_mode_w netcdf_mode_a netcdf_float<import_from_stmt>.asc2dem asc2dem<def_stmt>dem2array filename variable_name='elevation' easting_min=<none> easting_max=<none> northing_min=<none> northing_max=<none> use_cache=<false> verbose=<false> <block_start>"""Read Digitial Elevation model from the following NetCDF format (.dem) Example: ncols 3121 nrows 1800 xllcorner 722000 yllcorner 5893000 cellsize 25 NODATA_value -9999 138.3698 137.4194 136.5062 135.5558 .......... name_in should be .dem file to be read. """<import_stmt>os<import_from_stmt>anuga.file.netcdf NetCDFFile<line_sep>msg='Filename must be a text string'<assert_stmt>isinstance(filename basestring) msg<line_sep>msg='Extension should be .dem'<assert_stmt>os.path.splitext(filename)[1]<in>['.dem'] msg<line_sep>msg='Variable name must be a text string'<assert_stmt>isinstance(variable_name basestring) msg<line_sep># Get NetCDF infile=NetCDFFile(filename netcdf_mode_r)<if_stmt>verbose<block_start>log.critical('Reading DEM from %s'%(filename))<block_end>ncols=int(infile.ncols)<line_sep>nrows=int(infile.nrows)<line_sep>xllcorner=float(infile.xllcorner)# Easting of lower left corner yllcorner=float(infile.yllcorner)# Northing of lower left corner cellsize=float(infile.cellsize)<line_sep>NODATA_value=float(infile.NODATA_value)<line_sep>zone=int(infile.zone)<line_sep>false_easting=float(infile.false_easting)<line_sep>false_northing=float(infile.false_northing)<line_sep># Text strings projection=infile.projection<line_sep>datum=infile.datum<line_sep>units=infile.units<line_sep>Z=infile.variables[variable_name][:]<line_sep>Z=Z.reshape(nrows ncols)<line_sep>Z=num.where(Z<eq>NODATA_value num.nan Z)<line_sep>#changed the orientation of Z array to make it consistent with grd2array result Z=num.fliplr(Z.T)<line_sep>#print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone x=num.linspace(xllcorner xllcorner+(ncols-1)<times>cellsize ncols)<line_sep>y=num.linspace(yllcorner yllcorner+(nrows-1)<times>cellsize nrows)<line_sep><return>x y Z<block_end>
<import_stmt>datetime<import_stmt>json<import_from_stmt>nameko.events EventDispatcher event_handler<import_from_stmt>simplebank.chassis init_logger init_statsd<class_stmt>FeesService<block_start>name="fees_service"<line_sep>statsd=init_statsd('simplebank-demo.fees' 'statsd')<line_sep>logger=init_logger()<line_sep>@event_handler("market_service" "order_placed")@statsd.timer('charge_fee')<def_stmt>charge_fee self payload<block_start>self.logger.debug("this is a debug message from fees service" extra={"uuid":payload})<line_sep>self.logger.info("charging fees" extra={"uuid":payload})<line_sep><return>payload<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>.transform Transform<import_from_stmt>.transform_util TransformUtil<import_from_stmt>..expressions Expressions<import_from_stmt>..types TypeID<class_stmt>Identity(Transform)<block_start>@staticmethod<def_stmt>get type_var<block_start><return>Identity(type_var)<block_end><def_stmt>__init__ self type_var<block_start>self.type_var=type_var<block_end><def_stmt>apply self value<block_start><return>value<block_end><def_stmt>can_transform self type_var<block_start><return>type_var.is_primitive_type()<block_end><def_stmt>get_result_type self source_type<block_start><return>source_type<block_end><def_stmt>project self name predicate<block_start><return>self.project_strict(name predicate)<block_end><def_stmt>project_strict self name predicate<block_start><if_stmt>predicate.lit<is><not><none><block_start><return>Expressions.predicate(predicate.op name predicate.lit.value)<block_end><else_stmt><block_start><return>Expressions.predicate(predicate.op name)<block_end><block_end><def_stmt>to_human_string self value<block_start><if_stmt>value<is><none><block_start><return>"null"<block_end><if_stmt>self.type_var.type_id<eq>TypeID.DATE<block_start><return>TransformUtil.human_day(value)<block_end><elif_stmt>self.type_var.type_id<eq>TypeID.TIME<block_start><return>TransformUtil.human_time(value)<block_end><elif_stmt>self.type_var.type_id<eq>TypeID.TIMESTAMP<block_start><if_stmt>self.type_var.adjust_to_utc<block_start><return>TransformUtil.human_timestamp_with_timezone(value)<block_end><else_stmt><block_start><return>TransformUtil.human_timestamp_without_timezone(value)<block_end><block_end><elif_stmt>self.type_var.type_id<in>(TypeID.BINARY TypeID.FIXED)<block_start><raise>NotImplementedError()<line_sep># if isinstance(value, bytearray): # return base64.b64encode(value) # elif isinstance(value, bytes): # return base64.b64encode(bytes(value)) # else: # raise RuntimeError("Unsupported binary type: %s" % value.__class__.__name__) <block_end><else_stmt><block_start><return>str(value)<block_end><block_end><def_stmt>__str__ self<block_start><return>"identity"<block_end><def_stmt>__eq__ self other<block_start><if_stmt>id(self)<eq>id(other)<block_start><return><true><block_end><if_stmt>other<is><none><or><not>isinstance(other Identity)<block_start><return><false><block_end><return>self.type_var<eq>other.type_var<block_end><def_stmt>__hash__ self<block_start><return>hash(self.__key())<block_end><def_stmt>__key self<block_start><return>Identity.__class__ self.type_var<block_end><block_end>
"""This plugin finds every memory access and comments the row with address and value"""<import_from_stmt>yapsy.IPlugin IPlugin<import_from_stmt>core.api Api<class_stmt>PluginCommentMemAccesses(IPlugin)<block_start><def_stmt>execute self api:Api<block_start>want_to_continue=api.ask_user("Warning" "This plugin may replace some of your comments, continue?")<if_stmt><not>want_to_continue<block_start><return><block_end>trace_data=api.get_trace_data()<line_sep>trace=api.get_visible_trace()<for_stmt>i,t enumerate(trace)<block_start><if_stmt>'mem'<in>t<and>t['mem']<block_start>comment=""<for_stmt>mem t['mem']<block_start>addr=hex(mem['addr'])<line_sep>value=mem['value']<if_stmt>mem['access']<eq>"READ"<block_start>comment<augadd>f"[{addr}] -> {hex(value)} "<block_end><elif_stmt>mem['access']<eq>"WRITE"<block_start>comment<augadd>f"[{addr}] <- {hex(value)} "<block_end><if_stmt>0x20<le>value<le>0x7e<block_start>comment<augadd>f"'{chr(value)}' "<block_end><block_end># Add comment to full trace row=t["id"]<line_sep>trace_data.trace[row]['comment']=comment<line_sep># Add comment to visible trace too because it could be filtered_trace trace[i]['comment']=comment<block_end><block_end>api.update_trace_table()<block_end><block_end>
# pip install pycocotools opencv-python opencv-contrib-python # wget https://github.com/opencv/opencv_extra/raw/master/testdata/cv/ximgproc/model.yml.gz <import_stmt>os<import_stmt>copy<import_stmt>time<import_stmt>argparse<import_stmt>contextlib<import_stmt>multiprocessing<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>cv2.ximgproc<import_stmt>matplotlib.patches<import_stmt>matplotlib.pyplot<as>plt<import_stmt>torch<import_from_stmt>torchvision.datasets CocoDetection<import_from_stmt>pycocotools.coco COCO<import_from_stmt>pycocotools.cocoeval COCOeval<def_stmt>imshow_with_boxes img boxes_xywh savefig<block_start>plt.figure()<line_sep>plt.imshow(img)<line_sep>plt.axis('off')<for_stmt>x,y,w,h boxes_xywh.tolist()<block_start>plt.gca().add_patch(matplotlib.patches.Rectangle((x y) w h linewidth=1 edgecolor='r' facecolor='none'))<block_end>plt.savefig(savefig)<line_sep>plt.close()<line_sep><return>savefig<block_end><def_stmt>selective_search img fast topk<block_start>algo=cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()<line_sep>algo.setBaseImage(img)<if_stmt>fast<block_start>algo.switchToSelectiveSearchFast()<block_end><else_stmt><block_start>algo.switchToSelectiveSearchQuality()<block_end>boxes_xywh=algo.process().astype(np.float32)<line_sep>scores=np.ones((len(boxes_xywh) ))<line_sep><return>boxes_xywh[:topk] scores[:topk]<block_end><def_stmt>edge_boxes img fast topk bgr2rgb=(2 1 0) algo_edgedet=cv2.ximgproc.createStructuredEdgeDetection('model.yml.gz')<if>os.path.exists('model.yml.gz')<else><none><block_start>edges=algo_edgedet.detectEdges(img[<ellipsis> bgr2rgb].astype(np.float32)/255.0)<line_sep>orimap=algo_edgedet.computeOrientation(edges)<line_sep>edges=algo_edgedet.edgesNms(edges orimap)<line_sep>algo_edgeboxes=cv2.ximgproc.createEdgeBoxes()<line_sep>algo_edgeboxes.setMaxBoxes(topk)<line_sep>boxes_xywh,scores=algo_edgeboxes.getBoundingBoxes(edges orimap)<if_stmt>scores<is><none><block_start>boxes_xywh,scores=np.array([[0 0.0 img.shape[1] img.shape[0]]]) np.ones((1 ))<block_end><return>boxes_xywh scores.squeeze()<block_end><def_stmt>process_image image_id img_extra fast resize algo rgb2bgr=(2 1 0) category_other=-1 topk=1000<block_start>img=np.asarray(img_extra[0])[<ellipsis> rgb2bgr]<line_sep>h,w=img.shape[:2]<line_sep>img_det=img<if>resize<eq>1<else>cv2.resize(img (resize resize))<line_sep>boxes_xywh,scores=algo(img_det fast topk)<line_sep>boxes_xywh=boxes_xywh.astype(np.float32)<times>(1<if>resize<eq>1<else>np.array([w h w h])/resize)<line_sep>labels=np.full((len(boxes_xywh) ) category_other dtype=int)<line_sep><return>image_id dict(boxes=boxes_xywh scores=scores labels=labels)<block_end><def_stmt>process_loaded image_id loaded category_other=-1<block_start>boxes_xyxy=loaded['pred_boxes_'].clamp(min=0)<line_sep>boxes_xywh=torch.stack([boxes_xyxy[: 0] boxes_xyxy[: 1] boxes_xyxy[: 2]-boxes_xyxy[: 0] boxes_xyxy[: 3]-boxes_xyxy[: 1]] dim=-1)<line_sep>labels=np.full((len(boxes_xywh) ) category_other dtype=int)<line_sep>num_classes=loaded['pred_logits'].shape[-1]<line_sep>scores=loaded['pred_logits'][: 1::num_classes-2][: 0]<line_sep>I=scores.argsort(descending=<true>)<line_sep>scores=scores[I]<line_sep>boxes_xywh=boxes_xywh[I]<line_sep>labels=labels[I]<line_sep><return>image_id dict(boxes=boxes_xywh scores=scores labels=labels)<block_end><class_stmt>CocoEvaluator(object)<block_start><def_stmt>__init__ self coco_gt iou_type='bbox' useCats=0 maxDets=100<block_start>self.coco_gt=copy.deepcopy(coco_gt)<line_sep>self.coco_eval=COCOeval(coco_gt iouType=iou_type)<if_stmt>maxDets<ne>[100]<block_start>self.coco_eval.params.maxDets=maxDets<block_end><if_stmt><not>useCats<block_start>self.coco_eval.params.useCats=useCats<line_sep>self.coco_eval.params.catIds=[-1]<line_sep>coco_gt.loadAnns=<lambda>imgIds loadAnns=coco_gt.loadAnns:[gt.update(dict(category_id=-1))<or>gt<for>gt loadAnns(imgIds)]<block_end>self.accumulate,self.summarize=self.coco_eval.accumulate self.coco_eval.summarize<block_end>@staticmethod<def_stmt>call_without_stdout func *args<block_start><with_stmt>open(os.devnull 'w')<as>devnull<block_start><with_stmt>contextlib.redirect_stdout(devnull)<block_start><return>func(*args)<block_end><block_end><block_end><def_stmt>update self predictions<block_start>tolist=<lambda>a:[a.tolist()]<if>a.ndim<eq>0<else>a.tolist()<line_sep>detection_results=[dict(image_id=image_id bbox=bbox score=score category_id=category_id)<for>image_id,pred predictions.items()<if>pred<for>bbox,score,category_id zip(pred['boxes'].tolist() tolist(pred['scores']) pred['labels'].tolist())]<line_sep>self.coco_eval.cocoDt=self.call_without_stdout(COCO.loadRes self.coco_gt detection_results)<if>detection_results<else>COCO()<line_sep>self.coco_eval.params.imgIds=list(predictions)<line_sep>self.call_without_stdout(self.coco_eval.evaluate)<block_end><block_end><def_stmt>main args<block_start>coco_mode='instances'<line_sep>PATHS=dict(train=(os.path.join(args.dataset_root f'train{args.dataset_year}') os.path.join(args.dataset_root 'annotations' f'{coco_mode}_train{args.dataset_year}.json')) val=(os.path.join(args.dataset_root f'val{args.dataset_year}') os.path.join(args.dataset_root 'annotations' f'{coco_mode}_val{args.dataset_year}.json')) )<line_sep>dataset=CocoDetection(*PATHS[args.dataset_split])<line_sep>coco_evaluator=CocoEvaluator(dataset.coco maxDets=args.max_dets)<line_sep>tic=time.time()<if_stmt>args.output_dir<block_start>os.makedirs(args.output_dir exist_ok=<true>)<block_end><if_stmt>args.algo<ne>'process_loaded'<block_start>preds=dict(multiprocessing.Pool(processes=args.num_workers).starmap(process_image zip(dataset.ids dataset [args.fast]<times>len(dataset) [args.resize]<times>len(dataset) [globals()[args.algo]]<times>len(dataset))))<block_end><else_stmt><block_start>preds=[]<for_stmt>i,t enumerate(zip(dataset.ids dataset [args.fast]<times>len(dataset) [args.resize]<times>len(dataset) [globals()[args.algo]]<times>len(dataset)))<block_start>loaded=torch.load(os.path.join(args.input_dir str(t[0])+'.pt') map_location='cpu')<line_sep>preds.append(process_loaded(t[0] loaded))<if_stmt>args.output_dir<block_start>imshow_with_boxes(t[1][0] preds[-1][1]['boxes'][:5] os.path.join(args.output_dir str(t[0])+'.jpg'))<block_end>print(i)<if>i%50<eq>0<else><none><block_end><block_end>preds=dict(preds)<line_sep>print('proposals' time.time()-tic)<line_sep>tic=time.time()<line_sep>coco_evaluator.update(preds)<line_sep>coco_evaluator.accumulate()<line_sep>coco_evaluator.summarize()<line_sep>print('evaluator' time.time()-tic)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--input-dir' '-i')<line_sep>parser.add_argument('--output-dir' '-o')<line_sep>parser.add_argument('--dataset-root')<line_sep>parser.add_argument('--dataset-split' default='val' choices=['train' 'val'])<line_sep>parser.add_argument('--dataset-year' type=int default=2017)<line_sep>parser.add_argument('--num-workers' type=int default=16)<line_sep>parser.add_argument('--algo' default='selective_search' choices=['selective_search' 'edge_boxes' 'process_loaded'])<line_sep>parser.add_argument('--fast' action='store_true')<line_sep>parser.add_argument('--resize' type=int default=128)<line_sep>parser.add_argument('--max-dets' type=int nargs='*' default=[100])<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep>main(args)<block_end>
<import_from_stmt>names_dataset.nd_v1 NameDatasetV1# noqa <import_from_stmt>names_dataset.nd_v2 NameDataset# noqa
<import_from_stmt>flask g current_app jsonify<import_from_stmt>sqlalchemy asc desc func<import_from_stmt>apps.interface.models.interfaceapimsg InterfaceApiMsg<import_from_stmt>apps.interface.models.interfacecase InterfaceCase<import_from_stmt>apps.interface.models.interfacemodule InterfaceModule<import_from_stmt>apps.interface.models.interfaceproject InterfaceProject<import_from_stmt>apps.interface.util.utils *<import_from_stmt>library.api.db db<import_from_stmt>library.api.transfer transfer2json<class_stmt>InterfaceModuleBusiness(object)<block_start>@classmethod<def_stmt>project_permission cls pid=<none> id=<none><block_start><if_stmt>g.is_admin<block_start><return>0<block_end><if_stmt>pid<block_start><return>0<if>pid<in>g.projectid<else>1<block_end><else_stmt><block_start>ret=InterfaceModule.query.add_columns(InterfaceModule.project_id.label('projectid')).filter(InterfaceModule.id<eq>id).first()<line_sep><return>0<if>ret.projectid<in>g.projectid<else>1<block_end><block_end>@classmethod<def_stmt>_query cls<block_start><return>InterfaceModule.query.add_columns(InterfaceModule.id.label('id') InterfaceModule.name.label('name') InterfaceModule.project_id.label('projectid') InterfaceModule.num.label('num') InterfaceModule.weight.label('weight') InterfaceModule.status.label('status') )<block_end>@classmethod@transfer2json('?id|!name|!projectid|!num|!weight|!status')<def_stmt>query_all_json cls limit offset<block_start>ret=cls._query().filter(InterfaceModule.status<eq>InterfaceModule.ACTIVE).order_by(desc(InterfaceModule.id)).limit(limit).offset(offset).all()<line_sep><return>ret<block_end>@classmethod<def_stmt>module_create cls name project_id num<block_start><try_stmt><block_start>m=InterfaceModule(name=name project_id=project_id num=num )<line_sep>db.session.add(m)<line_sep>db.session.commit()<line_sep><return>0 <none><block_end><except_stmt>Exception<as>e<block_start>current_app.logger.error(str(e))<line_sep><return>102 str(e)<block_end><block_end>@classmethod<def_stmt>module_delete cls id<block_start><try_stmt><block_start>m=InterfaceModule.query.get(id)<line_sep>m.status=InterfaceModule.DISABLE<line_sep>db.session.add(m)<line_sep>db.session.commit()<line_sep><return>0<block_end><except_stmt>Exception<as>e<block_start>current_app.logger.error(str(e))<line_sep><return>105 str(e)<block_end><block_end>@classmethod<def_stmt>module_modify cls id name project_id<block_start><try_stmt><block_start>m=InterfaceModule.query.get(id)<line_sep>m.name=name<line_sep>m.project_id=project_id<line_sep>db.session.add(m)<line_sep>db.session.commit()<line_sep><return>0 <none><block_end><except_stmt>Exception<as>e<block_start>current_app.logger.error(str(e))<line_sep><return>102 str(e)<block_end><block_end>@classmethod@transfer2json('?id|!name|!projectid|!num|!weight|!status')<def_stmt>query_json_by_id cls id<block_start>ret=cls._query().filter(InterfaceModule.status<eq>InterfaceModule.ACTIVE InterfaceModule.id<eq>id).all()<line_sep><return>ret<block_end>@classmethod<def_stmt>_query_total cls<block_start><return>InterfaceModule.query.outerjoin(InterfaceCase InterfaceCase.module_id<eq>InterfaceModule.id).add_columns(InterfaceModule.id.label('id') InterfaceModule.name.label('name') InterfaceModule.project_id.label('projectid') InterfaceModule.num.label('num') InterfaceModule.weight.label('weight') InterfaceModule.status.label('status') func.count('*').label('total') )<block_end>@classmethod@transfer2json('?id|!name|!projectid|!num|!weight|!status|!total')<def_stmt>query_by_project_id_total cls pid# TODO : here need case import # ret = cls._query_total().filter(InterfaceModule.status == InterfaceModule.ACTIVE, # InterfaceModule.project_id == pid, Case.status != Case.DISABLE).order_by( # desc(InterfaceModule.id)).group_by(Case.module_id).all() <block_start>ret=[]<line_sep><return>ret<block_end>@classmethod@transfer2json('?id|!name|!projectid|!num|!weight|!status')<def_stmt>query_by_project_ids cls pid<block_start>ret=cls._query().filter(InterfaceModule.status<eq>InterfaceModule.ACTIVE InterfaceModule.project_id<eq>pid).order_by(desc(InterfaceModule.id)).all()<line_sep><return>ret<block_end>@classmethod<def_stmt>query_by_project_id cls pid<block_start>tlist=[]<line_sep>total_ret=cls.query_by_project_id_total(pid)<for_stmt>a total_ret<block_start>tlist.append(a['id'])<block_end>ret=cls.query_by_project_ids(pid)<for_stmt>i range(len(ret))<block_start><if_stmt>ret[i]['id']<not><in>tlist<block_start>ret[i]['total']=0<line_sep>total_ret.append(ret[i])<block_end><block_end>total_ret=sorted(total_ret key=<lambda>x:x['id'] reverse=<true>)<line_sep><return>total_ret<block_end>@classmethod<def_stmt>find_model cls page per_page project_name<block_start><if_stmt><not>project_name<block_start><return>jsonify({'msg':'请先选择项目' 'status':0})<block_end>peoject_id=InterfaceProject.query.filter_by(name=project_name status=InterfaceProject.ACTIVE).first().id<line_sep>all_module=InterfaceModule.query.filter_by(status=InterfaceModule.ACTIVE project_id=peoject_id).order_by(InterfaceModule.num.asc())<line_sep>pagination=all_module.paginate(page per_page=per_page error_out=<false>)<line_sep>my_module=pagination.items<line_sep>total=pagination.total<line_sep>my_module=[{'name':c.name 'moduleId':c.id 'num':c.num}<for>c my_module]<line_sep># 查询出所有的接口模块是为了接口录入的时候可以选所有的模块 _all_module=[{'name':s.name 'moduleId':s.id 'num':s.num}<for>s all_module.all()]<line_sep><return>jsonify({'data':my_module 'total':total 'status':1 'all_module':_all_module})<block_end>@classmethod<def_stmt>add_model cls project_name name ids number<block_start><if_stmt><not>project_name<block_start><return>jsonify({'msg':'请先创建项目' 'status':0})<block_end><if_stmt><not>name<block_start><return>jsonify({'msg':'模块名称不能为空' 'status':0})<block_end>project_id=InterfaceProject.query.filter_by(name=project_name status=InterfaceProject.ACTIVE).first().id<line_sep>num=auto_num(number InterfaceModule project_id=project_id status=InterfaceModule.ACTIVE)<if_stmt>ids<block_start>old_data=InterfaceModule.query.filter_by(id=ids status=InterfaceModule.ACTIVE).first()<line_sep>old_num=old_data.num<line_sep>list_data=InterfaceModule.query.filter(InterfaceModule.status<eq>InterfaceModule.ACTIVE InterfaceModule.project_id<eq>project_id).order_by(InterfaceModule.num.asc()).all()<if_stmt>InterfaceModule.query.filter_by(name=name project_id=project_id status=InterfaceModule.ACTIVE).first()<and>name<ne>old_data.name<block_start><return>jsonify({'msg':'模块名字重复' 'status':0})<block_end>num_sort(num old_num list_data old_data)<line_sep>InterfaceModuleBusiness.module_modify(ids name project_id)<line_sep><return>jsonify({'msg':'修改成功' 'status':1})<block_end><else_stmt><block_start><if_stmt>InterfaceModule.query.filter_by(name=name project_id=project_id status=InterfaceModule.ACTIVE).first()<block_start><return>jsonify({'msg':'模块名字重复' 'status':0})<block_end><else_stmt><block_start>InterfaceModuleBusiness.module_create(name project_id num)<line_sep><return>jsonify({'msg':'新建成功' 'status':1})<block_end><block_end><block_end>@classmethod<def_stmt>del_model cls ids# _edit = InterfaceModule.query.filter_by(id=ids).first() # if current_user.id != Project.query.filter_by(id=_edit.project_id).first().user_id: # return jsonify({'msg': '不能删除别人项目下的模块', 'status': 0}) <block_start><if_stmt>InterfaceApiMsg.query.filter(InterfaceApiMsg.module_id<eq>ids InterfaceApiMsg.status<eq>InterfaceApiMsg.ACTIVE).order_by(asc(InterfaceApiMsg.num)).all()<block_start><return>jsonify({'msg':'请先删除模块下的接口用例' 'status':0})<block_end>InterfaceModuleBusiness.module_delete(ids)<line_sep><return>jsonify({'msg':'删除成功' 'status':1})<block_end>@classmethod<def_stmt>stick_module cls module_id project_name<block_start>old_data=InterfaceModule.query.filter_by(id=module_id status=InterfaceModule.ACTIVE).first()<line_sep>old_num=old_data.num<line_sep>list_data_id=InterfaceProject.query.filter_by(name=project_name status=InterfaceProject.ACTIVE).first().id<line_sep>list_data=InterfaceModule.query.filter_by(project_id=list_data_id status=InterfaceModule.ACTIVE).order_by(InterfaceModule.num.asc()).all()<line_sep>num_sort(1 old_num list_data old_data)<line_sep>db.session.commit()<line_sep><return>jsonify({'msg':'置顶完成' 'status':1})<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_stmt>torch.nn.functional<as>F<import_stmt>os<import_stmt>cv2<import_stmt>math<import_stmt>random<import_stmt>json<import_stmt>csv<import_stmt>pickle<import_stmt>os.path<as>osp<import_from_stmt>glob glob<import_stmt>raft3d.projective_ops<as>pops<import_from_stmt>. frame_utils<import_from_stmt>.augmentation RGBDAugmentor SparseAugmentor<class_stmt>KITTIEval(data.Dataset)<block_start>crop=80<def_stmt>__init__ self image_size=<none> root='datasets/KITTI' do_augment=<true><block_start>self.init_seed=<none><line_sep>mode="testing"<line_sep>self.image1_list=sorted(glob(osp.join(root mode "image_2/*10.png")))<line_sep>self.image2_list=sorted(glob(osp.join(root mode "image_2/*11.png")))<line_sep>self.disp1_ga_list=sorted(glob(osp.join(root mode "disp_ganet_{}/*10.png".format(mode))))<line_sep>self.disp2_ga_list=sorted(glob(osp.join(root mode "disp_ganet_{}/*11.png".format(mode))))<line_sep>self.calib_list=sorted(glob(osp.join(root mode "calib_cam_to_cam/*.txt")))<line_sep>self.intrinsics_list=[]<for_stmt>calib_file self.calib_list<block_start><with_stmt>open(calib_file)<as>f<block_start>reader=csv.reader(f delimiter=' ')<for_stmt>row reader<block_start><if_stmt>row[0]<eq>'K_02:'<block_start>K=np.array(row[1:] dtype=np.float32).reshape(3 3)<line_sep>kvec=np.array([K[0 0] K[1 1] K[0 2] K[1 2]])<line_sep>self.intrinsics_list.append(kvec)<block_end><block_end><block_end><block_end><block_end>@staticmethod<def_stmt>write_prediction index disp1 disp2 flow<block_start><def_stmt>writeFlowKITTI filename uv<block_start>uv=64.0<times>uv+2<power>15<line_sep>valid=np.ones([uv.shape[0] uv.shape[1] 1])<line_sep>uv=np.concatenate([uv valid] axis=-1).astype(np.uint16)<line_sep>cv2.imwrite(filename uv[<ellipsis> ::-1])<block_end><def_stmt>writeDispKITTI filename disp<block_start>disp=(256<times>disp).astype(np.uint16)<line_sep>cv2.imwrite(filename disp)<block_end>disp1=np.pad(disp1 ((KITTIEval.crop 0) (0 0)) mode='edge')<line_sep>disp2=np.pad(disp2 ((KITTIEval.crop 0) (0 0)) mode='edge')<line_sep>flow=np.pad(flow ((KITTIEval.crop 0) (0 0) (0 0)) mode='edge')<line_sep>disp1_path='kitti_submission/disp_0/%06d_10.png'%index<line_sep>disp2_path='kitti_submission/disp_1/%06d_10.png'%index<line_sep>flow_path='kitti_submission/flow/%06d_10.png'%index<line_sep>writeDispKITTI(disp1_path disp1)<line_sep>writeDispKITTI(disp2_path disp2)<line_sep>writeFlowKITTI(flow_path flow)<block_end><def_stmt>__len__ self<block_start><return>len(self.image1_list)<block_end><def_stmt>__getitem__ self index<block_start>intrinsics=self.intrinsics_list[index]<line_sep>image1=cv2.imread(self.image1_list[index])<line_sep>image2=cv2.imread(self.image2_list[index])<line_sep>disp1=cv2.imread(self.disp1_ga_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>disp2=cv2.imread(self.disp2_ga_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>image1=image1[self.crop:]<line_sep>image2=image2[self.crop:]<line_sep>disp1=disp1[self.crop:]<line_sep>disp2=disp2[self.crop:]<line_sep>intrinsics[3]<augsub>self.crop<line_sep>image1=torch.from_numpy(image1).float().permute(2 0 1)<line_sep>image2=torch.from_numpy(image2).float().permute(2 0 1)<line_sep>disp1=torch.from_numpy(disp1).float()<line_sep>disp2=torch.from_numpy(disp2).float()<line_sep>intrinsics=torch.from_numpy(intrinsics).float()<line_sep><return>image1 image2 disp1 disp2 intrinsics<block_end><block_end><class_stmt>KITTI(data.Dataset)<block_start><def_stmt>__init__ self image_size=<none> root='datasets/KITTI' do_augment=<true><block_start><import_stmt>csv<line_sep>self.init_seed=<none><line_sep>self.crop=80<if_stmt>do_augment<block_start>self.augmentor=SparseAugmentor(image_size)<block_end><else_stmt><block_start>self.augmentor=<none><block_end>self.image1_list=sorted(glob(osp.join(root "training" "image_2/*10.png")))<line_sep>self.image2_list=sorted(glob(osp.join(root "training" "image_2/*11.png")))<line_sep>self.disp1_list=sorted(glob(osp.join(root "training" "disp_occ_0/*10.png")))<line_sep>self.disp2_list=sorted(glob(osp.join(root "training" "disp_occ_1/*10.png")))<line_sep>self.disp1_ga_list=sorted(glob(osp.join(root "training" "disp_ganet/*10.png")))<line_sep>self.disp2_ga_list=sorted(glob(osp.join(root "training" "disp_ganet/*11.png")))<line_sep>self.flow_list=sorted(glob(osp.join(root "training" "flow_occ/*10.png")))<line_sep>self.calib_list=sorted(glob(osp.join(root "training" "calib_cam_to_cam/*.txt")))<line_sep>self.intrinsics_list=[]<for_stmt>calib_file self.calib_list<block_start><with_stmt>open(calib_file)<as>f<block_start>reader=csv.reader(f delimiter=' ')<for_stmt>row reader<block_start><if_stmt>row[0]<eq>'K_02:'<block_start>K=np.array(row[1:] dtype=np.float32).reshape(3 3)<line_sep>kvec=np.array([K[0 0] K[1 1] K[0 2] K[1 2]])<line_sep>self.intrinsics_list.append(kvec)<block_end><block_end><block_end><block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.image1_list)<block_end><def_stmt>__getitem__ self index<block_start><if_stmt><not>self.init_seed<block_start>worker_info=torch.utils.data.get_worker_info()<if_stmt>worker_info<is><not><none><block_start>torch.manual_seed(worker_info.id)<line_sep>np.random.seed(worker_info.id)<line_sep>random.seed(worker_info.id)<line_sep>self.init_seed=<true><block_end><block_end>image1=cv2.imread(self.image1_list[index])<line_sep>image2=cv2.imread(self.image2_list[index])<line_sep>disp1=cv2.imread(self.disp1_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>disp2=cv2.imread(self.disp2_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>disp1_dense=cv2.imread(self.disp1_ga_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>disp2_dense=cv2.imread(self.disp2_ga_list[index] cv2.IMREAD_ANYDEPTH)/256.0<line_sep>flow,valid=frame_utils.readFlowKITTI(self.flow_list[index])<line_sep>intrinsics=self.intrinsics_list[index]<line_sep>SCALE=np.random.uniform(0.08 0.15)<line_sep># crop top 80 pixels, no ground truth information image1=image1[self.crop:]<line_sep>image2=image2[self.crop:]<line_sep>disp1=disp1[self.crop:]<line_sep>disp2=disp2[self.crop:]<line_sep>flow=flow[self.crop:]<line_sep>valid=valid[self.crop:]<line_sep>disp1_dense=disp1_dense[self.crop:]<line_sep>disp2_dense=disp2_dense[self.crop:]<line_sep>intrinsics[3]<augsub>self.crop<line_sep>image1=torch.from_numpy(image1).float().permute(2 0 1)<line_sep>image2=torch.from_numpy(image2).float().permute(2 0 1)<line_sep>disp1=torch.from_numpy(disp1/intrinsics[0])/SCALE<line_sep>disp2=torch.from_numpy(disp2/intrinsics[0])/SCALE<line_sep>disp1_dense=torch.from_numpy(disp1_dense/intrinsics[0])/SCALE<line_sep>disp2_dense=torch.from_numpy(disp2_dense/intrinsics[0])/SCALE<line_sep>dz=(disp2-disp1_dense).unsqueeze(dim=-1)<line_sep>depth1=1.0/disp1_dense.clamp(min=0.01).float()<line_sep>depth2=1.0/disp2_dense.clamp(min=0.01).float()<line_sep>intrinsics=torch.from_numpy(intrinsics)<line_sep>valid=torch.from_numpy(valid)<line_sep>flow=torch.from_numpy(flow)<line_sep>valid=valid<times>(disp2<g>0).float()<line_sep>flow=torch.cat([flow dz] -1)<if_stmt>self.augmentor<is><not><none><block_start>image1,image2,depth1,depth2,flow,valid,intrinsics=self.augmentor(image1 image2 depth1 depth2 flow valid intrinsics)<block_end><return>image1 image2 depth1 depth2 flow valid intrinsics<block_end><block_end>
<def_stmt>test_content_create api_client_authenticated<block_start>response=api_client_authenticated.post("/content/" json={"title":"hello test" "text":"this is just a test" "published":<true> "tags":["test" "hello"] } )<assert_stmt>response.status_code<eq>200<line_sep>result=response.json()<assert_stmt>result["slug"]<eq>"hello-test"<block_end><def_stmt>test_content_list api_client_authenticated<block_start>response=api_client_authenticated.get("/content/")<assert_stmt>response.status_code<eq>200<line_sep>result=response.json()<assert_stmt>result[0]["slug"]<eq>"hello-test"<block_end>
# Generated by Django 3.1.13 on 2021-07-16 21:44 <import_from_stmt>django.db migrations models<import_stmt>nautobot.extras.models.models<import_stmt>uuid<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("extras" "0010_change_cf_validation_max_min_field_to_bigint") ]<line_sep>operations=[migrations.CreateModel(name="FileAttachment" fields=[("id" models.UUIDField(default=uuid.uuid4 editable=<false> primary_key=<true> serialize=<false> unique=<true>) ) ("bytes" models.BinaryField()) ("filename" models.CharField(max_length=255)) ("mimetype" models.CharField(max_length=50)) ] options={"ordering":["filename"]} ) migrations.CreateModel(name="FileProxy" fields=[("id" models.UUIDField(default=uuid.uuid4 editable=<false> primary_key=<true> serialize=<false> unique=<true>) ) ("name" models.CharField(max_length=255)) ("file" models.FileField(storage=nautobot.extras.models.models.database_storage upload_to="extras.FileAttachment/bytes/filename/mimetype" ) ) ("uploaded_at" models.DateTimeField(auto_now_add=<true>)) ] options={"get_latest_by":"uploaded_at" "ordering":["name"] "verbose_name_plural":"file proxies" } ) migrations.AlterModelOptions(name="jobresult" options={"get_latest_by":"created" "ordering":["-created"]} ) ]<block_end>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """Accessor for Provider Authentication from koku database."""<import_from_stmt>api.provider.models ProviderAuthentication<import_from_stmt>masu.database.koku_database_access KokuDBAccess<class_stmt>ProviderAuthDBAccessor(KokuDBAccess)<block_start>"""Class to interact with the koku database for Provider Authentication Data."""<def_stmt>__init__ self auth_id=<none> credentials=<none><block_start>""" Establish Provider Authentication database connection. Args: auth_id (string) the provider authentication unique database id credentials (dict) the credentials dictionary """<line_sep>super().__init__("public")<line_sep>self._auth_id=auth_id<line_sep>self._credentials=credentials<line_sep>self._table=ProviderAuthentication<block_end><def_stmt>_get_db_obj_query self<block_start>""" Return the sqlachemy query for the provider auth object. Args: None Returns: (django.db.query.QuerySet): QuerySet of objects matching the given filters """<if_stmt>self._auth_id<and>self._credentials<block_start>query=self._table.objects.filter(id=self._auth_id credentials=self._credentials)<block_end><elif_stmt>self._auth_id<block_start>query=self._table.objects.filter(id=self._auth_id)<block_end><elif_stmt>self._credentials<block_start>query=self._table.objects.filter(credentials=self._credentials)<block_end><else_stmt><block_start>query=self._table.objects.none()<block_end><return>query<block_end><def_stmt>get_auth_id self<block_start>""" Return the database id. Args: None Returns: (Integer): "1", """<line_sep>auth_obj=self._get_db_obj_query().first()<line_sep><return>auth_obj.id<if>auth_obj<else><none><block_end><def_stmt>get_uuid self<block_start>""" Return the provider uuid. Args: None Returns: (String): "UUID v4", example: "edf94475-235e-4b64-ba18-0b81f2de9c9e" """<line_sep>obj=self._get_db_obj_query().first()<line_sep><return>obj.uuid<block_end><def_stmt>get_credentials self<block_start>""" Return the provider resource name. Args: None Returns: (dtring): "Provider Resource Name. i.e. AWS: RoleARN", example: {"role_arn": "arn:aws:iam::111111111111:role/CostManagement"} """<line_sep>obj=self._get_db_obj_query().first()<line_sep><return>obj.credentials<block_end><block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>sys<line_sep>sys.path.append('..')<import_from_stmt>auto_scan_test FusePassAutoScanTest IgnoreReasons<import_from_stmt>program_config TensorConfig ProgramConfig OpConfig CxxConfig TargetType PrecisionType DataLayoutType Place<import_stmt>numpy<as>np<import_from_stmt>functools partial<import_from_stmt>typing Optional List Callable Dict Any Set<import_stmt>unittest<import_stmt>hypothesis<import_from_stmt>hypothesis given settings seed example assume reproduce_failure<import_from_stmt>test_elementwise_util trim_trailing_singular_dims check_input_shape_available<import_stmt>hypothesis.strategies<as>st<class_stmt>TestElementwiseScaleFuse(FusePassAutoScanTest)<block_start><def_stmt>__init__ self *args **kwargs<block_start>FusePassAutoScanTest.__init__(self *args **kwargs)<line_sep>opencl_places=[Place(TargetType.OpenCL PrecisionType.FP16 DataLayoutType.ImageDefault) Place(TargetType.OpenCL PrecisionType.FP16 DataLayoutType.ImageFolder) Place(TargetType.OpenCL PrecisionType.FP32 DataLayoutType.NCHW) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.ImageDefault) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.ImageFolder) Place(TargetType.OpenCL PrecisionType.Any DataLayoutType.NCHW) Place(TargetType.Host PrecisionType.FP32)]<line_sep>self.enable_testing_on_place(places=opencl_places)<block_end><def_stmt>is_program_valid self program_config:ProgramConfig predictor_config:CxxConfig<arrow>bool<block_start><if_stmt>len(program_config.inputs["input_data_x"].shape)<g>4<or>len(program_config.inputs["input_data_y"].shape)<g>4<or>program_config.ops[1].attrs["bias_after_scale"]<eq><false><block_start><return><false><block_end><return><true><block_end><def_stmt>sample_program_configs self draw<block_start>in_shape_x=draw(st.lists(st.integers(min_value=1 max_value=20) min_size=2 max_size=5))<line_sep>in_shape_y=draw(st.lists(st.integers(min_value=1 max_value=20) min_size=2 max_size=5))<line_sep>axis=draw(st.integers(min_value=-1 max_value=max(len(in_shape_x) len(in_shape_y))))<line_sep>assume(check_input_shape_available(in_shape_x=in_shape_x in_shape_y=in_shape_y axis=axis)<eq><true>)<line_sep>#scale param scale=draw(st.floats(min_value=0.5 max_value=5))<line_sep>bias=draw(st.floats(min_value=0 max_value=1))<line_sep>bias_after_scale=draw(st.sampled_from([<false> <true>]))<line_sep>elementwise_op=OpConfig(type='elementwise_mul' inputs={"X":["input_data_x"] "Y":["input_data_y"]} outputs={"Out":["elementwise_output_data"]} attrs={"data_format":'nchw' "axis":axis})<line_sep>scale_op=OpConfig(type='scale' inputs={"X":["elementwise_output_data"]} outputs={"Out":["output_data"]} attrs={"scale":scale "bias":bias "bias_after_scale":bias_after_scale})<line_sep>ops=[elementwise_op scale_op]<line_sep>program_config=ProgramConfig(ops=ops weights={} inputs={"input_data_x":TensorConfig(shape=in_shape_x) "input_data_y":TensorConfig(shape=in_shape_y)} outputs=["output_data"])<line_sep><return>program_config<block_end><def_stmt>sample_predictor_configs self<block_start>config=CxxConfig()<line_sep><return>self.get_predictor_configs() ['elementwise_mul'] (1e-5 1e-5)<block_end><def_stmt>add_ignore_pass_case self<block_start><pass><block_end><def_stmt>test self *args **kwargs<block_start>self.run_and_statis(quant=<false> max_examples=1000 passes=["lite_elementwise_scale_fuse_pass"])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main(argv=[''])<block_end>
# Binary Search Tree (BST) Implementation <class_stmt>BSTNode<block_start><def_stmt>__init__ selfNode nodeData# Node Structure <block_start>selfNode.nodeData=nodeData<line_sep>selfNode.left=<none><line_sep>selfNode.right=<none><line_sep>selfNode.parent=<none><block_end># Insertion Operation <def_stmt>insert selfNode node<block_start><if_stmt>selfNode.nodeData<g>node.nodeData<block_start><if_stmt>selfNode.left<is><none><block_start>selfNode.left=node<line_sep>node.parent=selfNode<block_end><else_stmt><block_start>selfNode.left.insert(node)<block_end><block_end><elif_stmt>selfNode.nodeData<l>node.nodeData<block_start><if_stmt>selfNode.right<is><none><block_start>selfNode.right=node<line_sep>node.parent=selfNode<block_end><else_stmt><block_start>selfNode.right.insert(node)<block_end><block_end><block_end># Removal Operation Functions <def_stmt>replace_node_of_parent selfNode new_node<block_start><if_stmt>selfNode.parent<is><not><none><block_start><if_stmt>new_node<is><not><none><block_start>new_node.parent=selfNode.parent<block_end><if_stmt>selfNode.parent.left<eq>selfNode<block_start>selfNode.parent.left=new_node<block_end><elif_stmt>selfNode.parent.right<eq>selfNode<block_start>selfNode.parent.right=new_node<block_end><block_end><else_stmt><block_start>selfNode.nodeData=new_node.nodeData<line_sep>selfNode.left=new_node.left<line_sep>selfNode.right=new_node.right<if_stmt>new_node.left<is><not><none><block_start>new_node.left.parent=selfNode<block_end><if_stmt>new_node.right<is><not><none><block_start>new_node.right.parent=selfNode<block_end><block_end><block_end><def_stmt>find_min selfNode<block_start>current=selfNode<while_stmt>current.left<is><not><none><block_start>current=current.left<block_end><return>current<block_end><def_stmt>remove selfNode<block_start><if_stmt>(selfNode.left<is><not><none><and>selfNode.right<is><not><none>)<block_start>successor=selfNode.right.find_min()<line_sep>selfNode.nodeData=successor.nodeData<line_sep>successor.remove()<block_end><elif_stmt>selfNode.left<is><not><none><block_start>selfNode.replace_node_of_parent(selfNode.left)<block_end><elif_stmt>selfNode.right<is><not><none><block_start>selfNode.replace_node_of_parent(selfNode.right)<block_end><else_stmt><block_start>selfNode.replace_node_of_parent(<none>)<block_end><block_end># Search required data within BST <def_stmt>search selfNode nodeData<block_start><if_stmt>selfNode.nodeData<g>nodeData<block_start><if_stmt>selfNode.left<is><not><none><block_start><return>selfNode.left.search(nodeData)<block_end><else_stmt><block_start><return><none><block_end><block_end><elif_stmt>selfNode.nodeData<l>nodeData<block_start><if_stmt>selfNode.right<is><not><none><block_start><return>selfNode.right.search(nodeData)<block_end><else_stmt><block_start><return><none><block_end><block_end><return>selfNode<block_end># InOrder Traversal Operation <def_stmt>inorder selfNode<block_start><if_stmt>selfNode.left<is><not><none><block_start>selfNode.left.inorder()<block_end>print(selfNode.nodeData end=' ')<if_stmt>selfNode.right<is><not><none><block_start>selfNode.right.inorder()<block_end><block_end># PostOrder Traversal Operation <def_stmt>postorder selfNode<block_start><if_stmt>selfNode.left<is><not><none><block_start>selfNode.left.inorder()<block_end><if_stmt>selfNode.right<is><not><none><block_start>selfNode.right.inorder()<block_end>print(selfNode.nodeData end=' ')<block_end># PreOrder Traversal Operation <def_stmt>preorder selfNode<block_start>print(selfNode.nodeData end=' ')<if_stmt>selfNode.left<is><not><none><block_start>selfNode.left.inorder()<block_end><if_stmt>selfNode.right<is><not><none><block_start>selfNode.right.inorder()<block_end><block_end><block_end><class_stmt>BSTree# Structure of Binary Search Tree <block_start><def_stmt>__init__ selfNode<block_start>selfNode.root=<none><block_end><def_stmt>inorder selfNode<block_start><if_stmt>selfNode.root<is><not><none><block_start>selfNode.root.inorder()<block_end><block_end><def_stmt>preorder selfNode<block_start><if_stmt>selfNode.root<is><not><none><block_start>selfNode.root.preorder()<block_end><block_end><def_stmt>postorder selfNode<block_start><if_stmt>selfNode.root<is><not><none><block_start>selfNode.root.postorder()<block_end><block_end><def_stmt>add selfNode nodeData<block_start>new_node=BSTNode(nodeData)<if_stmt>selfNode.root<is><none><block_start>selfNode.root=new_node<block_end><else_stmt><block_start>selfNode.root.insert(new_node)<block_end><block_end><def_stmt>remove selfNode nodeData<block_start>to_remove=selfNode.search(nodeData)<if_stmt>(selfNode.root<eq>to_remove<and>selfNode.root.left<is><none><and>selfNode.root.right<is><none>)<block_start>selfNode.root=<none><block_end><else_stmt><block_start>to_remove.remove()<block_end><block_end><def_stmt>search selfNode nodeData<block_start><if_stmt>selfNode.root<is><not><none><block_start><return>selfNode.root.search(nodeData)<block_end><block_end><block_end>bstree=BSTree()# Object of class BSTree # Menu of Operations on BST Tree print('BST Tree Operation Menu')<line_sep>print('Add <data>')<line_sep>print('Remove <data>')<line_sep>print('Inorder')<line_sep>print('Preorder')<line_sep>print('Postorder')<line_sep>print('Quit')<while_stmt><true><block_start>do=input('Enter your action => ').split()<line_sep>operation=do[0].strip().lower()<if_stmt>operation<eq>'add'<block_start>nodeData=int(do[1])<line_sep>bstree.add(nodeData)<block_end><elif_stmt>operation<eq>'remove'<block_start>nodeData=int(do[1])<line_sep>bstree.remove(nodeData)<block_end><elif_stmt>operation<eq>'inorder'<block_start>print('Inorder Traversal: ' end='')<line_sep>bstree.inorder()<line_sep>print()<block_end><elif_stmt>operation<eq>'postorder'<block_start>print('Postorder Traversal: ' end='')<line_sep>bstree.postorder()<line_sep>print()<block_end><elif_stmt>operation<eq>'preorder'<block_start>print('Preorder Traversal: ' end='')<line_sep>bstree.preorder()<line_sep>print()<block_end><elif_stmt>operation<eq>'quit'<block_start>print("BST Tree Implementation finished.")<line_sep><break><block_end><block_end>
"""App signals. """<import_stmt>logging<import_from_stmt>django.db.models.signals post_save<import_from_stmt>django.dispatch receiver<import_from_stmt>..azure_projects.models Project<import_from_stmt>.models TrainingStatus<line_sep>logger=logging.getLogger(__name__)<line_sep>@receiver(signal=post_save sender=Project dispatch_uid="training_status_project_created_listener" )<def_stmt>training_status_project_created_listener **kwargs<block_start>"""Project create change."""<line_sep>instance=kwargs["instance"]<line_sep>created=kwargs["created"]<if_stmt><not>created<block_start>logger.info("Project not created. Pass...")<line_sep><return><block_end>logger.info("Azure Project created. Create TrainingStatus object.")<line_sep>TrainingStatus.objects.update_or_create(project_id=instance.id defaults={"status":"ok" "log":"Status : Has not configured" "performance":"{}" } )<block_end>
<import_from_stmt>django.conf.urls url<import_from_stmt>. views<line_sep>urlpatterns=[url(r'^get_work/(?P<victim_id>\d+)$' views.get_work name='get_work') url(r'^get_work$' views.get_work name='get_work') url(r'^work_completed/(?P<victim_id>\d+)$' views.work_completed name='work_completed') url(r'^work_completed$' views.work_completed name='work_completed') url(r'^target$' views.TargetView.as_view() name='TargetView') url(r'^victim$' views.VictimListView.as_view() name='VictimListView') url(r'^attack$' views.AttackView.as_view() name='AttackView') url(r'^victim/(?P<victim_id>\d+)/$' views.VictimDetailView.as_view() name='VictimDetailView') url(r'^victim/notstarted/$' views.DiscoveredVictimsView.as_view() name='DiscoveredVictimsView')]<line_sep>
<import_stmt>datetime<import_from_stmt>django.test TestCase<import_from_stmt>django.contrib.auth.models User<import_from_stmt>addressbook.models Address Country<import_from_stmt>.models Invoice<class_stmt>InvoiceTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>usr=User.objects.create(username='test' first_name='John' last_name='Doe' email='<EMAIL>')<line_sep>country=Country.objects.create(name='TestCountry')<line_sep>address=Address.objects.create(contact_name='<NAME>' address_one='Street' town='Town' postcode='PostCode' country=country)<line_sep>self.inv=Invoice.objects.create(user=usr address=address)<block_end><def_stmt>testInvoiceId self<block_start>inv=self.inv<line_sep>self.assertEquals(inv.invoice_id u'TTH9R')<line_sep>inv.invoice_id=<false><line_sep>inv.save()<line_sep>self.assertEquals(inv.invoice_id u'TTH9R')<block_end><def_stmt>testGetDue self<block_start>inv=self.inv<line_sep>inv.draft=<true><line_sep>inv.save()<line_sep>self.assertEquals(len(Invoice.objects.get_due()) 0)<line_sep>inv.draft=<false><line_sep>inv.save()<line_sep>self.assertEquals(len(Invoice.objects.get_due()) 1)<line_sep>inv.invoiced=<true><line_sep>inv.save()<line_sep>self.assertEquals(len(Invoice.objects.get_due()) 0)<line_sep>today=datetime.date.today()<line_sep>yesterday=today-datetime.timedelta(1)<line_sep>tomorrow=today+datetime.timedelta(1)<line_sep>inv.invoiced=<false><line_sep>inv.invoice_date=yesterday<line_sep>inv.save()<line_sep>self.assertEquals(len(Invoice.objects.get_due()) 1)<line_sep>inv.invoice_date=tomorrow<line_sep>inv.save()<line_sep>self.assertEquals(len(Invoice.objects.get_due()) 0)<block_end><block_end>
#A Dynamic Programming based Python Program for the Egg Dropping Puzzle <import_stmt>sys<line_sep># Function to get minimum number of trials # needed in worst case with n eggs and k floors <def_stmt>eggDrop n k# If there are no floors, then no trials needed. If there is one floor, one trial needed. <block_start><if_stmt>(k<eq>0<or>k<eq>1)<block_start><return>k<block_end># We need k trials for one egg and k floors <if_stmt>(n<eq>1)<block_start><return>k<block_end>min=sys.maxsize<line_sep># Consider all droppings from 1st floor to kth floor and return the minimum of these values plus 1. <for_stmt>x range(1 k+1)<block_start>res=max(eggDrop(n-1 x-1) eggDrop(n k-x))<if_stmt>(res<l>min)<block_start>min=res<block_end><block_end><return>min+1<block_end><if_stmt>__name__<eq>"__main__"<block_start>n=2<line_sep>k=36<line_sep>#or #n=int(input("Enter number of eggs : ")) #k=int(input("Enter number of floors : ")) print("Minimum number of trials in worst case with" n "eggs and" k "floors is" eggDrop(n k))<block_end>
<import_from_stmt>pydantic BaseModel constr<def_stmt>other_func regex<block_start><pass><block_end><class_stmt>Model(BaseModel)<block_start>abc:str=other_func(regex='<caret>[^a-zA-Z]+')<block_end>
# @dependency 001-main/002-createrepository.py SHA1="66f25ae79dcc5e200b136388771b5924a1b5ae56"<with_stmt>repository.workcopy()<as>work<block_start>REMOTE_URL=instance.repository_url("alice")<line_sep>work.run(["checkout" "-b" "008-branch" SHA1])<line_sep>work.run(["rebase" "--force-rebase" "HEAD~5"])<line_sep>work.run(["push" REMOTE_URL "008-branch"])<line_sep>sha1=work.run(["rev-parse" "HEAD"]).strip()<try_stmt><block_start>instance.unittest("api.branch" ["basic"] args=["--sha1="+sha1 "--name=008-branch"])<block_end><finally_stmt><block_start>work.run(["push" REMOTE_URL ":008-branch"])<block_end><block_end>
<import_from_stmt>pyecharts options<as>opts<import_from_stmt>pyecharts.charts Scatter<import_from_stmt>pyecharts.commons.utils JsCode<import_from_stmt>pyecharts.faker Faker<line_sep>c=(Scatter().add_xaxis(Faker.choose()).add_yaxis("商家A" [list(z)<for>z zip(Faker.values() Faker.choose())] label_opts=opts.LabelOpts(formatter=JsCode("function(params){return params.value[1] +' : '+ params.value[2];}")) ).set_global_opts(title_opts=opts.TitleOpts(title="Scatter-多维度数据") tooltip_opts=opts.TooltipOpts(formatter=JsCode("function (params) {return params.name + ' : ' + params.value[2];}")) visualmap_opts=opts.VisualMapOpts(type_="color" max_=150 min_=20 dimension=1) ).render("scatter_multi_dimension.html"))<line_sep>
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: h5part.py # # Programmer: <NAME> # Date: January, 2009 # # Modifications: # <NAME>, Wed Jan 21 09:36:13 PST 2009 # Took Gunther's original code and integrated it with test suite. # # ---------------------------------------------------------------------------- RequiredDatabasePlugin("H5Part")<line_sep>TurnOffAllAnnotations()<line_sep>OpenDatabase(data_path("h5part_test_data/sample.h5part") 0)<line_sep>AddPlot("Pseudocolor" "GaussianField" 1 0)<line_sep>DrawPlots()<line_sep>Test("h5part_01")<line_sep>ChangeActivePlotsVar("LinearField")<line_sep>View3DAtts=GetView3D()<line_sep>View3DAtts.viewNormal=(1.000000 0.000000 0.0000000)<line_sep>View3DAtts.focus=(31.5 31.5 31.5)<line_sep>View3DAtts.viewUp=(0.000000 1.000000 0.0000000)<line_sep>View3DAtts.viewAngle=30<line_sep>View3DAtts.parallelScale=54.5596<line_sep>View3DAtts.nearPlane=-109.119<line_sep>View3DAtts.farPlane=109.119<line_sep>View3DAtts.imagePan=(0 0)<line_sep>View3DAtts.imageZoom=1<line_sep>View3DAtts.perspective=1<line_sep>View3DAtts.eyeAngle=2<line_sep>View3DAtts.centerOfRotationSet=0<line_sep>View3DAtts.centerOfRotation=(31.5 31.5 31.5)<line_sep>SetView3D(View3DAtts)<line_sep>Test("h5part_02")<line_sep>DeleteActivePlots()<line_sep>AddPlot("Pseudocolor" "px" 1 0)<line_sep>PseudocolorAtts=PseudocolorAttributes()<line_sep>PseudocolorAtts.pointType=PseudocolorAtts.Sphere<line_sep>PseudocolorAtts.pointSize=1.5<line_sep>SetPlotOptions(PseudocolorAtts)<line_sep>DrawPlots()<line_sep>Test("h5part_03")<line_sep>AddPlot("Pseudocolor" "LinearField" 1 0)<line_sep>AddOperator("Slice" 0)<line_sep>SliceAtts=SliceAttributes()<line_sep>SliceAtts.originType=SliceAtts.Intercept<line_sep>SliceAtts.originIntercept=30<line_sep>SliceAtts.axisType=SliceAtts.XAxis<line_sep>SliceAtts.project2d=0<line_sep>SliceAtts.meshName="particles"<line_sep>SetOperatorOptions(SliceAtts)<line_sep>DrawPlots()<line_sep>Test("h5part_04")<line_sep>Exit()<line_sep>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. <import_from_stmt>functools partial<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>timm.models.layers DropPath<import_from_stmt>einops.layers.torch Reduce<import_from_stmt>.layers DWConv SPATIAL_FUNC ChannelMLP STEM_LAYER<import_from_stmt>.misc reshape2n<class_stmt>MixingBlock(nn.Module)<block_start><def_stmt>__init__ self dim spatial_func=<none> scaled=<true> init_values=1e-4 shared_spatial_func=<false> norm_layer=partial(nn.LayerNorm eps=1e-6) act_layer=nn.GELU drop_path=0. cpe=<true> num_heads=<none> qkv_bias=<false> qk_scale=<none> attn_drop=0. proj_drop=0. # attn in_features=<none> hidden_features=<none> drop=0. # mlp channel_ratio=2.0<block_start>super(MixingBlock self).__init__()<line_sep>spatial_kwargs=dict(act_layer=act_layer in_features=in_features hidden_features=hidden_features drop=drop # mlp dim=dim num_heads=num_heads qkv_bias=qkv_bias qk_scale=qk_scale attn_drop=attn_drop proj_drop=proj_drop# attn )<line_sep>self.valid_spatial_func=<true><if_stmt>spatial_func<is><not><none><block_start><if_stmt>shared_spatial_func<block_start>self.spatial_func=spatial_func<block_end><else_stmt><block_start>self.spatial_func=spatial_func(**spatial_kwargs)<block_end>self.norm1=norm_layer(dim)<if_stmt>scaled<block_start>self.gamma_1=nn.Parameter(init_values<times>torch.ones(1 1 dim) requires_grad=<true>)<block_end><else_stmt><block_start>self.gamma_1=1.<block_end><block_end><else_stmt><block_start>self.valid_spatial_func=<false><block_end>self.channel_func=ChannelMLP(in_features=dim hidden_features=int(dim<times>channel_ratio) act_layer=act_layer drop=drop)<line_sep>self.norm2=norm_layer(dim)<line_sep>self.drop_path=DropPath(drop_path)<if>drop_path<g>0.<else>nn.Identity()<line_sep>self.cpe=cpe<if_stmt>cpe<block_start>self.cpe_net=DWConv(dim)<block_end><block_end><def_stmt>forward self x<block_start>in_x=x<if_stmt>self.valid_spatial_func<block_start>x=x+self.drop_path(self.gamma_1<times>self.spatial_func(self.norm1(in_x)))<block_end><if_stmt>self.cpe<block_start>x=x+self.cpe_net(in_x)<block_end>x=x+self.drop_path(self.channel_func(self.norm2(x)))<line_sep><return>x<block_end><def_stmt>flops self input_shape<block_start>_,N,C=input_shape<line_sep>flops=0<if_stmt>self.valid_spatial_func<block_start>flops<augadd>self.spatial_func.flops(input_shape)<line_sep>flops<augadd>N<times>C<times>2# norm + skip <block_end><if_stmt>self.cpe<block_start>flops<augadd>self.cpe_net.flops(input_shape)<block_end>flops<augadd>self.channel_func.flops(input_shape)<line_sep>flops<augadd>N<times>C<times>2<line_sep><return>flops<block_end><block_end><class_stmt>Spach(nn.Module)<block_start><def_stmt>__init__ self num_classes=1000 img_size=224 in_chans=3 hidden_dim=384 patch_size=16 net_arch=<none> act_layer=nn.GELU norm_layer=partial(nn.LayerNorm eps=1e-6) stem_type='conv1' scaled=<true> init_values=1e-4 drop_path_rate=0. cpe=<true> shared_spatial_func=<false> # mixing block num_heads=12 qkv_bias=<true> qk_scale=<none> attn_drop=0. proj_drop=0. # attn token_ratio=0.5 channel_ratio=2.0 drop_rate=0. # mlp downstream=<false> **kwargs<block_start>super(Spach self).__init__()<line_sep>self.num_classes=num_classes<line_sep>self.hidden_dim=hidden_dim<line_sep>self.downstream=downstream<line_sep>self.stem=STEM_LAYER[stem_type](img_size=img_size patch_size=patch_size in_chans=in_chans embed_dim=hidden_dim downstream=downstream)<line_sep>self.norm1=norm_layer(hidden_dim)<line_sep>block_kwargs=dict(dim=hidden_dim scaled=scaled init_values=init_values cpe=cpe shared_spatial_func=shared_spatial_func norm_layer=norm_layer act_layer=act_layer num_heads=num_heads qkv_bias=qkv_bias qk_scale=qk_scale attn_drop=attn_drop proj_drop=proj_drop # attn in_features=self.stem.num_patches hidden_features=int(self.stem.num_patches<times>token_ratio) channel_ratio=channel_ratio drop=drop_rate)<line_sep># mlp self.blocks=self.make_blocks(net_arch block_kwargs drop_path_rate shared_spatial_func)<line_sep>self.norm2=norm_layer(hidden_dim)<if_stmt><not>downstream<block_start>self.pool=Reduce('b n c -> b c' reduction='mean')<line_sep>self.head=nn.Linear(hidden_dim self.num_classes)<block_end>self.init_weights()<block_end><def_stmt>make_blocks self net_arch block_kwargs drop_path shared_spatial_func<block_start><if_stmt>shared_spatial_func<block_start><assert_stmt>len(net_arch)<eq>1 '`shared_spatial_func` only support unitary spatial function'<assert_stmt>net_arch[0][0]<ne>'pass' '`shared_spatial_func` do not support pass'<line_sep>spatial_func=SPATIAL_FUNC[net_arch[0][0]](**block_kwargs)<block_end><else_stmt><block_start>spatial_func=<none><block_end>blocks=[]<for_stmt>func_type,depth net_arch<block_start><for_stmt>i range(depth)<block_start>blocks.append(MixingBlock(spatial_func=spatial_func<or>SPATIAL_FUNC[func_type] drop_path=drop_path **block_kwargs))<block_end><block_end><return>nn.Sequential(*blocks)<block_end><def_stmt>init_weights self<block_start><for_stmt>n,m self.named_modules()<block_start>_init_weights(m n)<block_end><block_end><def_stmt>forward_features self x<block_start>x=self.stem(x)<line_sep>x=reshape2n(x)<line_sep>x=self.norm1(x)<line_sep>x=self.blocks(x)<line_sep>x=self.norm2(x)<line_sep><return>x<block_end><def_stmt>forward self x<block_start>x=self.forward_features(x)<line_sep>x=self.pool(x)<line_sep>x=self.head(x)<line_sep><return>x<block_end><def_stmt>flops self<block_start>flops=0<line_sep>shape=(1 self.stem.num_patches self.hidden_dim)<line_sep># stem flops<augadd>self.stem.flops()<line_sep>flops<augadd>sum(shape)<line_sep># blocks flops<augadd>sum([i.flops(shape)<for>i self.blocks])<line_sep>flops<augadd>sum(shape)<line_sep># head flops<augadd>self.hidden_dim<times>self.num_classes<line_sep><return>flops<block_end><block_end><def_stmt>_init_weights m n:str<block_start><if_stmt>isinstance(m nn.Linear)<block_start><if_stmt>n.startswith('head')<block_start>nn.init.zeros_(m.weight)<line_sep>nn.init.zeros_(m.bias)<block_end><else_stmt><block_start>nn.init.xavier_uniform_(m.weight)<if_stmt>m.bias<is><not><none><block_start><if_stmt>'mlp'<in>n<block_start>nn.init.normal_(m.bias std=1e-6)<block_end><else_stmt><block_start>nn.init.zeros_(m.bias)<block_end><block_end><block_end><block_end><elif_stmt>isinstance(m nn.Conv2d)<block_start>nn.init.kaiming_normal_(m.weight mode='fan_out' nonlinearity='relu')<if_stmt>m.bias<is><not><none><block_start>nn.init.zeros_(m.bias)<block_end><block_end><elif_stmt>isinstance(m (nn.LayerNorm nn.BatchNorm2d nn.GroupNorm))<block_start>nn.init.ones_(m.weight)<line_sep>nn.init.zeros_(m.bias)<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring """ Test FSDP with grad scaler. """<import_stmt>os<import_stmt>random<import_stmt>pytest<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>fairscale.nn FullyShardedDataParallel<import_from_stmt>fairscale.optim.grad_scaler ShardedGradScaler<import_from_stmt>fairscale.utils.testing skip_if_no_cuda<try_stmt><block_start><import_from_stmt>torch.cuda.amp autocast<block_end><except_stmt>ImportError# Older version doesn't support autocast. Skip this file. <block_start>pytestmark=pytest.mark.skip<block_end># Mixed precision needs cuda. @skip_if_no_cuda<def_stmt>test_scaler_cpu_offload_breaks <block_start>device=torch.device("cuda")<line_sep>torch.cuda.set_device(0)<line_sep># Random port in case the next test run quickly, same port would cause conflict. os.environ["MASTER_ADDR"]="localhost"<line_sep>os.environ["MASTER_PORT"]=str(random.randint(2000 3000))<line_sep>torch.distributed.init_process_group(backend="nccl" rank=0 world_size=1)<try_stmt><block_start>scaler=ShardedGradScaler()<line_sep>model=FullyShardedDataParallel(nn.Linear(5 5) cpu_offload=<true> mixed_precision=<true>)<line_sep>optim=torch.optim.SGD(model.parameters() lr=1e-3)<line_sep>input=torch.rand((1 5) dtype=torch.float).to(device)<line_sep>optim.zero_grad()<with_stmt>autocast()<block_start>output=model(input)<line_sep>loss=F.mse_loss(input output)<block_end>scaler.scale(loss).backward()<line_sep># TODO (Min): Need to fix. Details in issue #421. <with_stmt>pytest.raises(RuntimeError)<block_start>scaler.step(optim)<line_sep>scaler.update()<block_end><block_end><finally_stmt># Clean-up is important or the next test in this file may fail to init the PG. <block_start>torch.distributed.destroy_process_group()<del_stmt>os.environ["MASTER_ADDR"]<del_stmt>os.environ["MASTER_PORT"]<block_end><block_end>
<import_stmt>urllib.parse<import_stmt>cachetools<import_from_stmt>common database<as>db<import_from_stmt>sqlalchemy.orm joinedload<line_sep>FEED_LOOKUP_CACHE=cachetools.LRUCache(maxsize=200)<def_stmt>patch_blogspot innetloc<block_start><assert_stmt>isinstance(innetloc str) "Expected str, recieved %s"%type(innetloc)<line_sep># Blogspot domains are coerced to ".com" since they seem to localize their TLD, # and somehow it all points to the same place in the end. <if_stmt>".blogspot."<in>innetloc<and><not>innetloc.endswith(".blogspot.com")<block_start>prefix=innetloc.split(".blogspot.")[0]<line_sep>innetloc=prefix+".blogspot.com"<block_end><return>innetloc<block_end><def_stmt>get_name_for_netloc_db db_sess netloc<block_start><if_stmt>netloc<in>FEED_LOOKUP_CACHE<block_start><return>FEED_LOOKUP_CACHE[netloc]<block_end>row=db_sess.query(db.RssFeedUrlMapper).filter(db.RssFeedUrlMapper.feed_netloc<eq>netloc).options(joinedload('feed_entry')).all()<if_stmt><not>row<block_start><return><false><block_end><if_stmt>len(row)<g>1<block_start>print("ERROR: Multiple solutions for netloc %s?"%netloc)<block_end>feedname=row[0].feed_entry.feed_name<if_stmt>feedname<block_start>FEED_LOOKUP_CACHE[netloc]=feedname<line_sep><return>feedname<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>getNiceName session srcurl netloc=<none> debug=<false><block_start><if_stmt>netloc<block_start><assert_stmt>isinstance(netloc str) "Expected str, recieved %s"%type(netloc)<line_sep>srcnetloc=netloc<block_end><elif_stmt>srcurl<block_start><assert_stmt>isinstance(srcurl str) "Expected str, recieved %s"%type(srcurl)<line_sep>srcnetloc=urllib.parse.urlparse(srcurl).netloc<block_end><else_stmt><block_start><raise>RuntimeError("You need to at least pass a srcurl or netloc!")<block_end>srcnetloc=patch_blogspot(srcnetloc)<line_sep>val=get_name_for_netloc_db(session srcnetloc)<line_sep><return>val<block_end>
<import_from_stmt>pathlib Path<import_from_stmt>.build DocBuilder<def_stmt>finalize_builddir repo_name<block_start>'Bookkeeping on the docs build directory'<line_sep>root=Path('_build')/repo_name<with_stmt>open(root/'.nojekyll' 'w')<as>fh<block_start>fh.write('')<block_end><block_end><def_stmt>build_root repo_name<block_start>'''Build the top-level documentation. See :py:mod:`.build` on building sub-projects. '''<with_stmt>DocBuilder(repo_name '.')<as>builder<block_start>builder.build()<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>EcalLiteDTUPedestalsRcd=cms.ESSource("EmptyESSource" recordName=cms.string("EcalLiteDTUPedestalsRcd") firstValid=cms.vuint32(1) iovIsRunNotTime=cms.bool(<true>))<line_sep>EcalLiteDTUPedestals=cms.ESProducer("EcalLiteDTUPedestalsESProducer" ComponentName=cms.string('EcalLiteDTUPedestalProducer') MeanPedestalsGain10=cms.double(12) RMSPedestalsGain10=cms.double(2.5) MeanPedestalsGain1=cms.double(12.) RMSPedestalsGain1=cms.double(2.))<line_sep>
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : extract-coco-features.py # Author : <NAME> # Email : <EMAIL> # Date : 11/27/2018 # # This file is part of Jacinle. # Distributed under terms of the MIT license. """ Extracing features from the MS-COCO dataset. Examples: jac-crun 0 extract-coco-features.py --caption /mnt/localssd1/coco/annotations/captions_train2014.json --image-root /mnt/localssd1/coco/raw/train2014 --output /mnt/localssd2/train.h5 """<import_stmt>os.path<as>osp<import_stmt>queue<import_stmt>threading<import_from_stmt>PIL Image<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.cuda<as>cuda<import_stmt>torch.backends.cudnn<as>cudnn<import_from_stmt>torch.utils.data.dataset Dataset<import_stmt>jacinle.io<as>io<import_from_stmt>jacinle.cli.argument JacArgumentParser<import_from_stmt>jacinle.logging get_logger<import_from_stmt>jacinle.utils.container GView<import_from_stmt>jacinle.utils.tqdm tqdm<import_from_stmt>jactorch.cuda.copy async_copy_to<line_sep>logger=get_logger(__file__)<line_sep>io.set_fs_verbose(<true>)<line_sep>parser=JacArgumentParser()<line_sep>parser.add_argument('--caption' required=<true> type='checked_file' help='caption annotations (*.json)')<line_sep>parser.add_argument('--image-root' required=<true> type='checked_dir' help='image directory')<line_sep>parser.add_argument('--output' required=<true> help='output .h5 file')<line_sep>parser.add_argument('--image-size' default=224 type=int metavar='N' help='input image size')<line_sep>parser.add_argument('--batch-size' default=64 type=int metavar='N' help='batch size')<line_sep>parser.add_argument('--data-workers' type=int default=4 metavar='N' help='the num of workers that input training data')<line_sep>parser.add_argument('--use-gpu' type='bool' default=<true> metavar='B' help='use GPU or not')<line_sep>parser.add_argument('--force-gpu' action='store_true' help='force the script to use GPUs, useful when there exists on-the-ground devices')<line_sep>args=parser.parse_args()<line_sep>args.output_images_json=osp.splitext(args.output)[0]+'.images.json'<if_stmt>args.use_gpu<block_start>nr_devs=cuda.device_count()<if_stmt>args.force_gpu<and>nr_devs<eq>0<block_start>nr_devs=1<block_end><assert_stmt>nr_devs<g>0 'No GPU device available'<line_sep>args.gpus=[i<for>i range(nr_devs)]<line_sep>args.gpu_parallel=(nr_devs<g>1)<block_end><class_stmt>COCOImageDataset(Dataset)<block_start><def_stmt>__init__ self images image_root image_transform<block_start>self.images=images<line_sep>self.image_root=image_root<line_sep>self.image_transform=image_transform<block_end><def_stmt>__getitem__ self index<block_start>info=self.images[index]<line_sep>feed_dict=GView()<line_sep>feed_dict.image_filename=info['file_name']<if_stmt>self.image_root<is><not><none><block_start>feed_dict.image=Image.open(osp.join(self.image_root feed_dict.image_filename)).convert('RGB')<line_sep>feed_dict.image=self.image_transform(feed_dict.image)<block_end><return>feed_dict.raw()<block_end><def_stmt>__len__ self<block_start><return>len(self.images)<block_end><def_stmt>make_dataloader self batch_size shuffle drop_last nr_workers<block_start><import_from_stmt>jactorch.data.dataloader JacDataLoader<import_from_stmt>jactorch.data.collate VarLengthCollateV2<line_sep>collate_guide={'image_filename':'skip' }<line_sep><return>JacDataLoader(self batch_size=batch_size shuffle=shuffle drop_last=drop_last num_workers=nr_workers pin_memory=<true> collate_fn=VarLengthCollateV2(collate_guide))<block_end><block_end><class_stmt>FeatureExtractor(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<import_stmt>jactorch.models.vision.resnet<as>resnet<line_sep>self.resnet=resnet.resnet152(pretrained=<true> incl_gap=<false> num_classes=<none>)<block_end><def_stmt>forward self feed_dict<block_start>feed_dict=GView(feed_dict)<line_sep>f=self.resnet(feed_dict.image)<line_sep>output_dict={'features':f}<line_sep><return>output_dict<block_end><block_end><class_stmt>AsyncWriter(object)<block_start><def_stmt>__init__ self output_file total_size<block_start>self.output_file=output_file<line_sep>self.total_size=total_size<line_sep>self.queue=queue.Queue(maxsize=5)<line_sep>self.output_dataset=<none><line_sep>self.thread=threading.Thread(target=self.target)<line_sep>self.thread.start()<block_end><def_stmt>feed self payload<block_start>self.queue.put(payload)<block_end><def_stmt>join self<block_start>self.queue.put(<none>)<line_sep>self.thread.join()<block_end><def_stmt>target self<block_start>cur_idx=0<while_stmt><true><block_start>payload=self.queue.get()<if_stmt>payload<is><none><block_start><break><block_end>output_dict=payload<if_stmt>self.output_dataset<is><none><block_start>logger.info('Initializing the dataset.')<line_sep>self.output_dataset={k:self.output_file.create_dataset(k (self.total_size )+v.size()[1:] dtype='float32')<for>k,v output_dict.items()}<block_end><for_stmt>k,v output_dict.items()<block_start>next_idx=cur_idx+v.size(0)<line_sep>self.output_dataset[k][cur_idx:next_idx]=v.cpu().numpy()<block_end>cur_idx=next_idx<block_end><block_end><block_end><def_stmt>main <block_start>logger.critical('Loading the dataset.')<line_sep>data=io.load(args.caption)<line_sep># Step 1: filter out images. images={c['image_id']<for>c data['annotations']}<line_sep># Step 2: build a reverse mapping for images. id2image={i['id']:i<for>i data['images']}<line_sep>images=[id2image[i]<for>i images]<import_stmt>torchvision.transforms<as>T<line_sep>image_transform=T.Compose([T.Resize((args.image_size args.image_size)) T.ToTensor() T.Normalize([0.485 0.456 0.406] [0.229 0.224 0.225])])<line_sep>dataset=COCOImageDataset(images args.image_root image_transform)<line_sep>logger.critical('Building the model.')<line_sep>model=FeatureExtractor()<if_stmt>args.use_gpu<block_start>model.cuda()<if_stmt>args.gpu_parallel<block_start><import_from_stmt>jactorch.parallel JacDataParallel<line_sep>model=JacDataParallel(model device_ids=args.gpus).cuda()<block_end>cudnn.benchmark=<true><block_end>model.eval()<line_sep>dataloader=dataset.make_dataloader(args.batch_size shuffle=<false> drop_last=<false> nr_workers=args.data_workers)<line_sep>output_file=io.open_h5(args.output 'w')<line_sep>writer=AsyncWriter(output_file total_size=len(dataset))<for_stmt>feed_dict tqdm(dataloader total=len(dataloader) desc='Extracting features')<block_start><if_stmt>args.use_gpu<block_start>feed_dict=async_copy_to(feed_dict 0)<block_end><with_stmt>torch.no_grad()<block_start>output_dict=model(feed_dict)<block_end>writer.feed(output_dict)<block_end>writer.join()<line_sep>output_file.close()<line_sep>io.dump(args.output_images_json images)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# -*- coding: utf-8 -*- # # This code is part of Qiskit. # # (C) Copyright IBM 2019, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=missing-docstring,invalid-name <import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>qiskit Aer<import_from_stmt>qiskit.compiler assemble<import_from_stmt>qiskit.ignis.verification.tomography GatesetTomographyFitter<import_from_stmt>qiskit.ignis.verification.tomography gateset_tomography_circuits<import_from_stmt>qiskit.ignis.verification.tomography.basis default_gateset_basis<import_from_stmt>qiskit.providers.aer.noise NoiseModel<import_from_stmt>qiskit.extensions HGate SGate<import_from_stmt>qiskit.quantum_info PTM<class_stmt>TestGatesetTomography(unittest.TestCase)<block_start>@staticmethod<def_stmt>collect_tomography_data shots=10000 noise_model=<none> gateset_basis='Default'<block_start>backend_qasm=Aer.get_backend('qasm_simulator')<line_sep>circuits=gateset_tomography_circuits(gateset_basis=gateset_basis)<line_sep>qobj=assemble(circuits shots=shots)<line_sep>result=backend_qasm.run(qobj noise_model=noise_model).result()<line_sep>fitter=GatesetTomographyFitter(result circuits gateset_basis)<line_sep><return>fitter<block_end>@staticmethod<def_stmt>expected_linear_inversion_gates Gs Fs<block_start>rho=Gs['rho']<line_sep>E=Gs['E']<line_sep>B=np.array([(F@rho).T[0]<for>F Fs]).T<line_sep>BB=np.linalg.inv(B)<line_sep>gates={label:BB@G@B<for>(label G) Gs.items()<if>label<not><in>['E' 'rho']}<line_sep>gates['E']=E@B<line_sep>gates['rho']=BB@rho<line_sep><return>gates<block_end>@staticmethod<def_stmt>hs_distance A B<block_start><return>sum([np.abs(x)<power>2<for>x np.nditer(A-B)])<block_end>@staticmethod<def_stmt>convert_from_ptm vector<block_start>Id=np.sqrt(0.5)<times>np.array([[1 0] [0 1]])<line_sep>X=np.sqrt(0.5)<times>np.array([[0 1] [1 0]])<line_sep>Y=np.sqrt(0.5)<times>np.array([[0 -1j] [1j 0]])<line_sep>Z=np.sqrt(0.5)<times>np.array([[1 0] [0 -1]])<line_sep>v=vector.reshape(4)<line_sep><return>v[0]<times>Id+v[1]<times>X+v[2]<times>Y+v[3]<times>Z<block_end><def_stmt>compare_gates self expected_gates result_gates labels delta=0.2<block_start><for_stmt>label labels<block_start>expected_gate=expected_gates[label]<line_sep>result_gate=result_gates[label].data<line_sep>msg="Failure on gate {}: Expected gate = \n{}\n"<concat>"vs Actual gate = \n{}".format(label expected_gate result_gate)<line_sep>distance=self.hs_distance(expected_gate result_gate)<line_sep>self.assertAlmostEqual(distance 0 delta=delta msg=msg)<block_end><block_end><def_stmt>run_test_on_basis_and_noise self gateset_basis='Default' noise_model=<none> noise_ptm=<none><block_start><if_stmt>gateset_basis<eq>'Default'<block_start>gateset_basis=default_gateset_basis()<block_end>labels=gateset_basis.gate_labels<line_sep>gates=gateset_basis.gate_matrices<line_sep>gates['rho']=np.array([[np.sqrt(0.5)] [0] [0] [np.sqrt(0.5)]])<line_sep>gates['E']=np.array([[np.sqrt(0.5) 0 0 np.sqrt(0.5)]])<line_sep># apply noise if given <for_stmt>label labels<block_start><if_stmt>label<ne>"Id"<and>noise_ptm<is><not><none><block_start>gates[label]=noise_ptm@gates[label]<block_end><block_end>Fs=[gateset_basis.spam_matrix(label)<for>label gateset_basis.spam_labels]<line_sep># prepare the fitter fitter=self.collect_tomography_data(shots=10000 noise_model=noise_model gateset_basis=gateset_basis)<line_sep># linear inversion test result_gates=fitter.linear_inversion()<line_sep>expected_gates=self.expected_linear_inversion_gates(gates Fs)<line_sep>self.compare_gates(expected_gates result_gates labels+['E' 'rho'])<line_sep># fitter optimization test result_gates=fitter.fit()<line_sep>expected_gates=gates<line_sep>expected_gates['E']=self.convert_from_ptm(expected_gates['E'])<line_sep>expected_gates['rho']=self.convert_from_ptm(expected_gates['rho'])<line_sep>self.compare_gates(expected_gates result_gates labels+['E' 'rho'])<block_end><def_stmt>test_noiseless_standard_basis self<block_start>self.run_test_on_basis_and_noise()<block_end><def_stmt>test_noiseless_h_gate_standard_basis self<block_start>basis=default_gateset_basis()<line_sep>basis.add_gate(HGate())<line_sep>self.run_test_on_basis_and_noise(gateset_basis=basis)<block_end><def_stmt>test_noiseless_s_gate_standard_basis self<block_start>basis=default_gateset_basis()<line_sep>basis.add_gate(SGate())<line_sep>self.run_test_on_basis_and_noise(gateset_basis=basis)<block_end><def_stmt>test_amplitude_damping_standard_basis self<block_start>gamma=0.05<line_sep>noise_ptm=PTM(np.array([[1 0 0 0] [0 np.sqrt(1-gamma) 0 0] [0 0 np.sqrt(1-gamma) 0] [gamma 0 0 1-gamma]]))<line_sep>noise_model=NoiseModel()<line_sep>noise_model.add_all_qubit_quantum_error(noise_ptm ['u1' 'u2' 'u3'])<line_sep>self.run_test_on_basis_and_noise(noise_model=noise_model noise_ptm=np.real(noise_ptm.data))<block_end><def_stmt>test_depolarization_standard_basis self<block_start>p=0.05<line_sep>noise_ptm=PTM(np.array([[1 0 0 0] [0 1-p 0 0] [0 0 1-p 0] [0 0 0 1-p]]))<line_sep>noise_model=NoiseModel()<line_sep>noise_model.add_all_qubit_quantum_error(noise_ptm ['u1' 'u2' 'u3'])<line_sep>self.run_test_on_basis_and_noise(noise_model=noise_model noise_ptm=np.real(noise_ptm.data))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
"""Tests for lookup handler for env."""<line_sep># pylint: disable=no-self-use # pyright: basic <import_from_future_stmt> annotations<import_from_stmt>typing TYPE_CHECKING<import_stmt>pytest<import_from_stmt>runway.lookups.handlers.env EnvLookup<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>...factories MockRunwayContext<block_end>ENV_VARS={"str_val":"test"}<class_stmt>TestEnvLookup<block_start>"""Tests for EnvLookup."""<def_stmt>test_handle self runway_context:MockRunwayContext<arrow><none><block_start>"""Validate handle base functionality."""<line_sep>runway_context.env.vars=ENV_VARS.copy()<line_sep>result=EnvLookup.handle("str_val" context=runway_context)<assert_stmt>result<eq>"test"<block_end><def_stmt>test_handle_not_found self runway_context:MockRunwayContext<arrow><none><block_start>"""Validate exception when lookup cannot be resolved."""<line_sep>runway_context.env.vars=ENV_VARS.copy()<with_stmt>pytest.raises(ValueError)<block_start>EnvLookup.handle("NOT_VALID" context=runway_context)<block_end><block_end><block_end>
<import_from_stmt>scapy.all *<import_from_stmt>websploit.core base<import_from_stmt>threading Thread<import_from_stmt>websploit.core.utils get_fake_mac get_fake_name<line_sep>conf.verb=0<class_stmt>Main(base.Module)<block_start>"""Spamming Fake access points """<line_sep>parameters={"iface":"wlan0mon" "count":10 }<line_sep>completions=list(parameters.keys())<def_stmt>do_execute self line<block_start>"""Execute current module"""<line_sep>process_list=[]<try_stmt><block_start><for_stmt>_ range(int(self.parameters['count']))<block_start>name=get_fake_name()<line_sep>mac=get_fake_mac()<line_sep>p=Thread(target=SpawnAP args=(name mac self.parameters['iface']))<line_sep>process_list.append(p)<line_sep>p.start()<line_sep>self.cp.success(text=f"Access point name : {name} - MAC {mac} started.")<block_end>self.cp.info("Press Ctrl+C for stop ...")<line_sep>input("")<block_end><except_stmt>KeyboardInterrupt<block_start>self.cp.warning("\nKilling all access points, please wait ...")<line_sep># for p in process_list: # p.terminate() # p.join() self.cp.success("Done.")<block_end><block_end><def_stmt>complete_set self text line begidx endidx<block_start>mline=line.partition(' ')[2]<line_sep>offs=len(mline)-len(text)<line_sep><return>[s[offs:]<for>s self.completions<if>s.startswith(mline)]<block_end><block_end><class_stmt>SpawnAP<block_start><def_stmt>__init__ self ssid mac iface<block_start>self.ssid=ssid<line_sep>self.mac=mac<line_sep>self.iface=iface<line_sep>self.run()<block_end><def_stmt>run self<block_start>dot11=Dot11(type=0 subtype=8 addr1="ff:ff:ff:ff:ff:ff" addr2=self.mac addr3=self.mac)<line_sep>beacon=Dot11Beacon()<line_sep>essid=Dot11Elt(ID="SSID" info=self.ssid len=len(self.ssid))<line_sep>rsn=Dot11Elt(ID='RSNinfo' info=('\x01\x00'# RSN Version 1 <concat>'\x00\x0f\xac\x02'# Group Cipher Suite : 00-0f-ac TKIP <concat>'\x02\x00'# 2 Pairwise Cipher Suites (next two lines) <concat>'\x00\x0f\xac\x04'# AES Cipher <concat>'\x00\x0f\xac\x02'# TKIP Cipher <concat>'\x01\x00'# 1 Authentication Key Managment Suite (line below) <concat>'\x00\x0f\xac\x02'# Pre-Shared Key <concat>'\x00\x00'))<line_sep># RSN Capabilities (no extra capabilities) frame=RadioTap()/dot11/beacon/essid/rsn<line_sep>sendp(frame inter=0.1 iface=self.iface loop=1)<block_end><block_end>
#import sqlite3 <import_from_stmt>flask Blueprint flash g redirect render_template request session url_for <import_stmt>functools<import_from_stmt>setting Config<import_stmt>hashlib<line_sep>bp=Blueprint('login' __name__ url_prefix='/web')<line_sep>@bp.route('/login' methods=('GET' 'POST'))<def_stmt>login # 登录 <block_start><if_stmt>request.method<eq>'POST'<block_start>username=request.form['username']<line_sep>password=request.form['password']<line_sep>error=<none><if_stmt>username<ne>Config.USERNAME<or>password<ne>Config.PASSWORD<block_start>error='错误的用户名或密码 Incorrect username or password.'<block_end><if_stmt>error<is><none><block_start>session.clear()<line_sep>hash_session=username+hashlib.sha256(password.encode("utf8")).hexdigest()<line_sep>hash_session=hashlib.sha256(hash_session.encode("utf8")).hexdigest()<line_sep>session['user_id']=hash_session<line_sep><return>redirect(url_for('index.index'))<block_end>flash(error)<block_end><return>render_template('web/login.html')<block_end>@bp.route('/logout')<def_stmt>logout # 登出 <block_start>session.clear()<line_sep>flash('成功登出 Successfully log out.')<line_sep><return>redirect(url_for('index.index'))<block_end><def_stmt>login_required view# 登录验证,写成了修饰器 <block_start>@functools.wraps(view)<def_stmt>wrapped_view **kwargs<block_start>x=session.get('user_id')<line_sep>hash_session=Config.USERNAME+hashlib.sha256(Config.PASSWORD.encode("utf8")).hexdigest()<line_sep>hash_session=hashlib.sha256(hash_session.encode("utf8")).hexdigest()<if_stmt>x<ne>hash_session<block_start><return>redirect(url_for('login.login'))<block_end>g.user={'user_id':x 'username':Config.USERNAME}<line_sep><return>view(**kwargs)<block_end><return>wrapped_view<block_end>
<import_from_stmt>simple_rl.mdp.oomdp.OOMDPStateClass OOMDPState<class_stmt>TrenchOOMDPState(OOMDPState)<block_start>''' Class for Trench World States '''<def_stmt>__init__ self objects<block_start>OOMDPState.__init__(self objects=objects)<block_end><def_stmt>get_agent_x self<block_start><return>self.objects["agent"][0]["x"]<block_end><def_stmt>get_agent_y self<block_start><return>self.objects["agent"][0]["y"]<block_end><def_stmt>__hash__ self<block_start>state_hash=str(self.get_agent_x())+str(self.get_agent_y())+str(self.objects["agent"][0]["dx"]+1)+str(self.objects["agent"][0]["dy"]+1)+str(self.objects["agent"][0]["dest_x"])+str(self.objects["agent"][0]["dest_x"])+str(self.objects["agent"][0]["dest_y"])+str(self.objects["agent"][0]["has_block"])+"00"<for_stmt>b self.objects["block"]<block_start>state_hash<augadd>str(b["x"])+str(b["y"])<block_end>state_hash<augadd>"00"<for_stmt>l self.objects["lava"]<block_start>state_hash<augadd>str(l["x"])+str(l["y"])<block_end><return>int(state_hash)<block_end><def_stmt>__eq__ self other_trench_state<block_start><return>hash(self)<eq>hash(other_trench_state)<block_end><block_end>
<import_stmt>os<import_stmt>pytest<import_from_stmt>FinMind.data DataLoader<import_from_stmt>FinMind.schema.plot Labels Series convert_labels_series_schema<line_sep>@pytest.fixture(scope="module")<def_stmt>df <block_start>user_id=os.environ.get("FINMIND_USER" "")<line_sep>password=os.environ.get("FINMIND_PASSWORD" "")<line_sep>data_loader=DataLoader()<line_sep>data_loader.login(user_id password)<line_sep>df=data_loader.taiwan_stock_month_revenue(stock_id="2890" start_date="2018-1M" end_date="2021-7M")<line_sep>df["labels"]=(df[["revenue_year" "revenue_month"]].astype(str).apply(<lambda>date:f"{date[0]}-{date[1]}M" axis=1))<line_sep>df["series"]=df["revenue"].map(<lambda>value:round(value<times>1e-8 2))<line_sep><return>df<block_end><def_stmt>test_Labels df<block_start>labels=df.to_dict("list")["labels"]<assert_stmt>Labels(labels=labels)<block_end><def_stmt>test_Series df<block_start>series=df.to_dict("list")["series"]<assert_stmt>Series(series=series)<block_end><def_stmt>test_convert_labels_series_schema df<block_start>labels=df.to_dict("list")["labels"]<line_sep>series=df.to_dict("list")["series"]<line_sep>labels,series=convert_labels_series_schema(labels=labels series=series)<assert_stmt>isinstance(labels Labels)<assert_stmt>isinstance(series Series)<block_end>
# coding: utf-8 <import_from_stmt>dynaconf.utils.parse_conf parse_conf_data<def_stmt>parse_data data<block_start>"""Return converted data from @int, @float, @bool, @json markers"""<line_sep><return>parse_conf_data(data)<block_end><def_stmt>custom_var_dict cvarlist<block_start>cvarlist=cvarlist<or>[]<line_sep><return>{cvar['key']:parse_data(cvar['value'])<for>cvar cvarlist}<block_end>
"""Tests for accumulate.py"""<import_stmt>pandas<as>pd<import_stmt>xarray<as>xr<import_from_stmt>timeflux.helpers.testing DummyData DummyXArray<import_from_stmt>timeflux.nodes.accumulate AppendDataFrame AppendDataArray<line_sep>xarray_data=DummyXArray()<line_sep>pandas_data=DummyData()<def_stmt>test_append_dataframe <block_start>""""Test node AppendDataFrame"""<line_sep>node=AppendDataFrame()<line_sep>pandas_data.reset()<line_sep>node.clear()<line_sep># gate is not closed, data should be accumulated but not released # first chunk node.i.data=pandas_data.next(5)<line_sep>node.update()<line_sep># assert no output <assert_stmt>node.o.data<eq><none><line_sep># assert the data has been buffered pd.testing.assert_frame_equal(pandas_data._data.iloc[:5 :] node._data)<line_sep># second chunk node.clear()<line_sep>node.i.data=pandas_data.next(10)<line_sep>node.update()<line_sep># assert no output <assert_stmt>node.o.data<eq><none><line_sep># assert the buffer is the concatenation of the 2 accumulated chunks pd.testing.assert_frame_equal(pandas_data._data.iloc[:15 :] node._data)<line_sep># now a meta is received, assessing that the gate has just closed node.i.data=pandas_data.next(5)<line_sep>node.i.meta={'gate_status':'closed'}<line_sep>node.update()<line_sep># assert output data is the concatenation of the 3 chunks pd.testing.assert_frame_equal(pandas_data._data.iloc[:20 :] node.o.data)<block_end><def_stmt>test_append_dataarray <block_start>""""Test node AppendDataArray"""<line_sep>node=AppendDataArray(dim='time')<line_sep>xarray_data.reset()<line_sep>node.clear()<line_sep># gate is not closed, data should be accumulated but not released # first chunk node.i.data=xarray_data.next(5)<line_sep>node.update()<line_sep># assert no output <assert_stmt>node.o.data<eq><none><line_sep># assert the data has been buffered xr.testing.assert_equal(xarray_data._data.isel({'time':slice(0 5)}) node._data_list[0])<line_sep># second chunk node.clear()<line_sep>node.i.data=xarray_data.next(10)<line_sep>node.update()<line_sep># assert no output <assert_stmt>node.o.data<eq><none><line_sep># assert the buffer is the concatenation of the 2 accumulated chunks xr.testing.assert_equal(xarray_data._data.isel({'time':slice(5 15)}) node._data_list[1])<line_sep># now a meta is received, assessing that the gate has just closed node.i.data=xarray_data.next(5)<line_sep>node.i.meta={'gate_status':'closed'}<line_sep>node.update()<line_sep># assert output data is the concatenation of the 3 chunks xr.testing.assert_equal(xarray_data._data.isel({'time':slice(0 20)}) node.o.data)<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep># Generated by Django 1.11.5 on 2017-12-27 07:02 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('configuration' '0014_auto_20171225_1112') ]<line_sep>operations=[migrations.CreateModel(name='ShowVersion' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('creator' models.CharField(max_length=32 verbose_name='创建者')) ('updator' models.CharField(max_length=32 verbose_name='更新者')) ('created' models.DateTimeField(auto_now_add=<true>)) ('updated' models.DateTimeField(auto_now=<true>)) ('is_deleted' models.BooleanField(default=<false>)) ('deleted_time' models.DateTimeField(blank=<true> null=<true>)) ('template_id' models.IntegerField(verbose_name='关联的模板 ID')) ('real_version_id' models.IntegerField(verbose_name='关联的VersionedEntity ID')) ('name' models.CharField(max_length=32 verbose_name='版本名称')) ] ) migrations.AddField(model_name='template' name='draft' field=models.TextField(default='' verbose_name='草稿') ) migrations.AddField(model_name='template' name='draft_time' field=models.DateTimeField(blank=<true> null=<true> verbose_name='草稿更新时间') ) migrations.AddField(model_name='template' name='draft_updator' field=models.CharField(default='' max_length=32 verbose_name='草稿更新者') ) migrations.AlterField(model_name='application' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='configmap' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='deplpyment' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='secret' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='service' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='template' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterField(model_name='versionedentity' name='updator' field=models.CharField(max_length=32 verbose_name='更新者') ) migrations.AlterUniqueTogether(name='showversion' unique_together=set([('template_id' 'name')]) ) ]<block_end>
"""Tests for pyno.element"""<line_sep><pass><line_sep>
<import_from_stmt>flask_wtf Form<import_from_stmt>wtforms validators<import_from_stmt>wtforms.fields.html5 EmailField<class_stmt>InviteForm(Form)<block_start>email=EmailField('Email Address' [validators.DataRequired() validators.Email()])<block_end>
<import_stmt>os<import_stmt>logging<import_stmt>re<import_from_stmt>copy_reg pickle<import_from_stmt>multiprocessing Pool<import_from_stmt>subprocess check_output<import_from_stmt>types MethodType<import_from_stmt>RsyncUploadThread RsyncUploadThread<import_from_stmt>mongodb_consistent_backup.Common config_to_string<import_from_stmt>mongodb_consistent_backup.Errors OperationError<import_from_stmt>mongodb_consistent_backup.Pipeline Task<line_sep># Allows pooled .apply_async()s to work on Class-methods: <def_stmt>_reduce_method m<block_start><if_stmt>m.im_self<is><none><block_start><return>getattr (m.im_class m.im_func.func_name)<block_end><else_stmt><block_start><return>getattr (m.im_self m.im_func.func_name)<block_end><block_end>pickle(MethodType _reduce_method)<class_stmt>Rsync(Task)<block_start><def_stmt>__init__ self manager config timer base_dir backup_dir **kwargs<block_start>super(Rsync self).__init__(self.__class__.__name__ manager config timer base_dir backup_dir **kwargs)<line_sep>self.backup_location=self.config.backup.location<line_sep>self.backup_name=self.config.backup.name<line_sep>self.remove_uploaded=self.config.upload.remove_uploaded<line_sep>self.retries=self.config.upload.retries<line_sep>self.rsync_path=self.config.upload.rsync.path<line_sep>self.rsync_user=self.config.upload.rsync.user<line_sep>self.rsync_host=self.config.upload.rsync.host<line_sep>self.rsync_port=self.config.upload.rsync.port<line_sep>self.rsync_ssh_key=self.config.upload.rsync.ssh_key<line_sep>self.rsync_binary="rsync"<line_sep>self.rsync_flags=["--archive" "--compress"]<line_sep>self.rsync_version=<none><line_sep>self._rsync_info=<none><line_sep>self.threads(self.config.upload.threads)<line_sep>self._pool=Pool(processes=self.threads())<block_end><def_stmt>init self<block_start><if_stmt><not>self.host_has_rsync()<block_start><raise>OperationError("Cannot find rsync binary on this host!")<block_end><if_stmt><not>os.path.isdir(self.backup_dir)<block_start>logging.error("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!"%self.backup_dir)<line_sep><raise>OperationError("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!"%self.backup_dir)<block_end><block_end><def_stmt>rsync_info self<block_start><if_stmt><not>self._rsync_info<block_start>output=check_output([self.rsync_binary "--version"])<line_sep>search=re.search(r"^rsync\s+version\s([0-9.-]+)\s+protocol\sversion\s(\d+)" output)<line_sep>self.rsync_version=search.group(1)<line_sep>self._rsync_info={"version":self.rsync_version "protocol_version":int(search.group(2))}<block_end><return>self._rsync_info<block_end><def_stmt>host_has_rsync self<block_start><if_stmt>self.rsync_info()<block_start><return><true><block_end><return><false><block_end><def_stmt>get_dest_path self<block_start><return>os.path.join(self.rsync_path self.base_dir)<block_end><def_stmt>prepare_dest_dir self# mkdir -p the rsync dest path via ssh <block_start>ssh_mkdir_cmd=["ssh"]<if_stmt>self.rsync_ssh_key<block_start>ssh_mkdir_cmd.extend(["-i" self.rsync_ssh_key])<block_end>ssh_mkdir_cmd.extend(["%s@%s"%(self.rsync_user self.rsync_host) "mkdir" "-p" self.get_dest_path()])<line_sep># run the mkdir via ssh <try_stmt><block_start>check_output(ssh_mkdir_cmd)<block_end><except_stmt>Exception e<block_start>logging.error("Creating rsync dest path with ssh failed for %s: %s"%(self.rsync_host e))<line_sep><raise>e<block_end><return><true><block_end><def_stmt>done self data<block_start>logging.info(data)<block_end><def_stmt>run self<block_start><try_stmt><block_start>self.init()<line_sep>self.timer.start(self.timer_name)<line_sep>logging.info("Preparing destination path on %s"%self.rsync_host)<line_sep>self.prepare_dest_dir()<line_sep>rsync_config={"dest":"%s@%s:%s"%(self.rsync_user self.rsync_host self.get_dest_path()) "threads":self.threads() "retries":self.retries}<line_sep>rsync_config.update(self.rsync_info())<line_sep>logging.info("Starting upload using rsync version %s (%s)"%(self.rsync_info()['version'] config_to_string(rsync_config)))<for_stmt>child os.listdir(self.backup_dir)<block_start>self._pool.apply_async(RsyncUploadThread(os.path.join(self.backup_dir child) self.base_dir self.rsync_flags self.rsync_path self.rsync_user self.rsync_host self.rsync_port self.rsync_ssh_key self.remove_uploaded self.retries).run callback=self.done)<block_end>self.wait()<block_end><except_stmt>Exception e<block_start>logging.error("Rsync upload failed! Error: %s"%e)<line_sep><raise>OperationError(e)<block_end><finally_stmt><block_start>self.timer.stop(self.timer_name)<block_end>self.completed=<true><block_end><def_stmt>wait self<block_start><if_stmt>self._pool<block_start>logging.info("Waiting for Rsync upload threads to stop")<line_sep>self._pool.close()<line_sep>self._pool.join()<block_end><block_end><def_stmt>close self<block_start><if_stmt>self._pool<block_start>logging.error("Stopping Rsync upload threads")<line_sep>self._pool.terminate()<line_sep>self._pool.join()<block_end><block_end><block_end>
<import_stmt>time<import_stmt>os<import_stmt>psycopg2<import_stmt>psycopg2.extras<import_from_stmt>pyinfraboxutils get_logger<line_sep>logger=get_logger('infrabox')<def_stmt>connect_db <block_start><while_stmt><true><block_start><try_stmt><block_start>conn=psycopg2.connect(dbname=os.environ['INFRABOX_DATABASE_DB'] user=os.environ['INFRABOX_DATABASE_USER'] password=os.environ['<PASSWORD>'] host=os.environ['INFRABOX_DATABASE_HOST'] port=os.environ['INFRABOX_DATABASE_PORT'])<line_sep><return>conn<block_end><except_stmt>Exception<as>e<block_start>logger.warn("Could not connect to db: %s" e)<line_sep>time.sleep(3)<block_end><block_end><block_end><class_stmt>DB(object)<block_start><def_stmt>__init__ self conn<block_start>self.conn=conn<block_end><def_stmt>execute_one self stmt args=<none><block_start>r=self.execute_many(stmt args)<if_stmt><not>r<block_start><return>r<block_end><return>r[0]<block_end><def_stmt>execute_many self stmt args=<none><block_start>c=self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)<line_sep>c.execute(stmt args)<line_sep>r=c.fetchall()<line_sep>c.close()<line_sep><return>r<block_end><def_stmt>execute_one_dict self stmt args=<none><block_start>r=self.execute_many_dict(stmt args)<if_stmt><not>r<block_start><return>r<block_end><return>r[0]<block_end><def_stmt>execute_many_dict self stmt args=<none><block_start>c=self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)<line_sep>c.execute(stmt args)<line_sep>r=c.fetchall()<line_sep>c.close()<line_sep><return>r<block_end><def_stmt>execute self stmt args=<none><block_start>c=self.conn.cursor()<line_sep>c.execute(stmt args)<line_sep>c.close()<block_end><def_stmt>commit self<block_start>self.conn.commit()<block_end><def_stmt>rollback self<block_start>self.conn.rollback()<block_end><def_stmt>close self<block_start>self.conn.close()<block_end><block_end>
#============================================================================== # ConfigManger_test.py #============================================================================== <import_from_stmt>pymtl *<import_from_stmt>pclib.test TestVectorSimulator<import_from_stmt>onehot Mux Demux<line_sep>#------------------------------------------------------------------------------ # test_Mux #------------------------------------------------------------------------------ <def_stmt>test_Mux dump_vcd test_verilog<block_start>nports=2<line_sep>data_nbits=16<line_sep># Define test input and output functions <def_stmt>tv_in model test_vector<block_start>model.sel.value=test_vector[0]<line_sep>model.in_[0].value=test_vector[1]<line_sep>model.in_[1].value=test_vector[2]<block_end><def_stmt>tv_out model test_vector<block_start><assert_stmt>model.out<eq>test_vector[3]<block_end># Select and elaborate the model under test model=Mux(nports dtype=data_nbits)<line_sep>model.vcd_file=dump_vcd<if_stmt>test_verilog<block_start>model=TranslationTool(model)<block_end>model.elaborate()<line_sep># Define the test vectors test_vectors=[# sel in[0] in[1] out [0b00 0x1111 0x2222 0x0000] [0b01 0x1111 0x2222 0x1111] [0b10 0x1111 0x2222 0x2222] [0b00 0x1111 0x2222 0x0000] ]<line_sep># Create the simulator and configure it sim=TestVectorSimulator(model test_vectors tv_in tv_out)<line_sep># Run the simulator sim.run_test()<block_end>#------------------------------------------------------------------------------ # test_Demux #------------------------------------------------------------------------------ <def_stmt>test_Demux dump_vcd test_verilog<block_start>nports=2<line_sep>data_nbits=16<line_sep># Define test input and output functions <def_stmt>tv_in model test_vector<block_start>model.sel.value=test_vector[0]<line_sep>model.in_.value=test_vector[1]<block_end><def_stmt>tv_out model test_vector<block_start><assert_stmt>model.out[0]<eq>test_vector[2]<assert_stmt>model.out[1]<eq>test_vector[3]<block_end># Select and elaborate the model under test model=Demux(nports dtype=data_nbits)<line_sep>model.vcd_file=dump_vcd<if_stmt>test_verilog<block_start>model=TranslationTool(model)<block_end>model.elaborate()<line_sep># Define the test vectors test_vectors=[# sel in_ out[0] out[1] [0b00 0x3333 0x0000 0x0000] [0b01 0x1111 0x1111 0x0000] [0b10 0x2222 0x0000 0x2222] [0b00 0x1111 0x0000 0x0000] ]<line_sep># Create the simulator and configure it sim=TestVectorSimulator(model test_vectors tv_in tv_out)<line_sep># Run the simulator sim.run_test()<block_end>
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #! /usr/bin/env python # -*- coding: utf-8 -*- <import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>math<import_stmt>xml.etree.ElementTree<as>ET<import_from_stmt>PIL Image<def_stmt>resize_img img<block_start>""" 调整图片尺寸 Args: img: 图片信息 """<line_sep>h,w=img.shape[:2]<line_sep>min_size=580<if_stmt>w<ge>h<and>w<g>min_size<block_start>new_w=min_size<line_sep>new_h=new_w<times>h/w<block_end><elif_stmt>h<ge>w<and>h<g>min_size<block_start>new_h=min_size<line_sep>new_w=new_h<times>w/h<block_end><else_stmt><block_start>new_h=h<line_sep>new_w=w<block_end>new_img=cv2.resize(img (int(new_w) int(new_h)) interpolation=cv2.INTER_CUBIC)<line_sep>scale_value=new_w/w<line_sep><return>new_img scale_value<block_end><def_stmt>plot_det_label image anno labels<block_start>""" 目标检测类型生成标注图 Args: image: 图片路径 anno: 图片标注 labels: 图片所属数据集的类别信息 """<line_sep>catid2color={}<line_sep>img=cv2.imread(image)<line_sep>img,scale_value=resize_img(img)<line_sep>tree=ET.parse(anno)<line_sep>objs=tree.findall('object')<line_sep>color_map=get_color_map_list(len(labels)+1)<for_stmt>i,obj enumerate(objs)<block_start>cname=obj.find('name').text<line_sep>catid=labels.index(cname)<if_stmt>cname<not><in>labels<block_start><continue><block_end>xmin=int(float(obj.find('bndbox').find('xmin').text)<times>scale_value)<line_sep>ymin=int(float(obj.find('bndbox').find('ymin').text)<times>scale_value)<line_sep>xmax=int(float(obj.find('bndbox').find('xmax').text)<times>scale_value)<line_sep>ymax=int(float(obj.find('bndbox').find('ymax').text)<times>scale_value)<if_stmt>catid<not><in>catid2color<block_start>catid2color[catid]=color_map[catid+1]<block_end>color=tuple(catid2color[catid])<line_sep>img=draw_rectangle_and_cname(img xmin ymin xmax ymax cname color)<block_end><return>img<block_end><def_stmt>plot_seg_label anno<block_start>""" 语义分割类型生成标注图 Args: anno: 图片标注 """<line_sep>label=pil_imread(anno)<line_sep>pse_label=gray2pseudo(label)<line_sep><return>pse_label<block_end><def_stmt>plot_insseg_label image anno labels alpha=0.7<block_start>""" 实例分割类型生成标注图 Args: image: 图片路径 anno: 图片标注 labels: 图片所属数据集的类别信息 """<line_sep>anno=np.load(anno allow_pickle=<true>).tolist()<line_sep>catid2color=dict()<line_sep>img=cv2.imread(image)<line_sep>img,scale_value=resize_img(img)<line_sep>color_map=get_color_map_list(len(labels)+1)<line_sep>img_h=anno['h']<line_sep>img_w=anno['w']<line_sep>gt_class=anno['gt_class']<line_sep>gt_bbox=anno['gt_bbox']<line_sep>gt_poly=anno['gt_poly']<line_sep>num_bbox=gt_bbox.shape[0]<line_sep>num_mask=len(gt_poly)<line_sep># 描绘mask信息 img_array=np.array(img).astype('float32')<for_stmt>i range(num_mask)<block_start>cname=gt_class[i]<line_sep>catid=labels.index(cname)<if_stmt>cname<not><in>labels<block_start><continue><block_end><if_stmt>catid<not><in>catid2color<block_start>catid2color[catid]=color_map[catid+1]<block_end>color=np.array(catid2color[catid]).astype('float32')<import_stmt>pycocotools.mask<as>mask_util<for_stmt>x range(len(gt_poly[i]))<block_start><for_stmt>y range(len(gt_poly[i][x]))<block_start>gt_poly[i][x][y]=int(float(gt_poly[i][x][y])<times>scale_value)<block_end><block_end>poly=gt_poly[i]<line_sep>rles=mask_util.frPyObjects(poly int(float(img_h)<times>scale_value) int(float(img_w)<times>scale_value))<line_sep>rle=mask_util.merge(rles)<line_sep>mask=mask_util.decode(rle)<times>255<line_sep>idx=np.nonzero(mask)<line_sep>img_array[idx[0] idx[1] :]<augmul>1.0-alpha<line_sep>img_array[idx[0] idx[1] :]<augadd>alpha<times>color<block_end>img=img_array.astype('uint8')<for_stmt>i range(num_bbox)<block_start>cname=gt_class[i]<line_sep>catid=labels.index(cname)<if_stmt>cname<not><in>labels<block_start><continue><block_end><if_stmt>catid<not><in>catid2color<block_start>catid2color[catid]=color_map[catid]<block_end>color=tuple(catid2color[catid])<line_sep>xmin,ymin,xmax,ymax=gt_bbox[i]<line_sep>img=draw_rectangle_and_cname(img int(float(xmin)<times>scale_value) int(float(ymin)<times>scale_value) int(float(xmax)<times>scale_value) int(float(ymax)<times>scale_value) cname color)<block_end><return>img<block_end><def_stmt>draw_rectangle_and_cname img xmin ymin xmax ymax cname color<block_start>""" 根据提供的标注信息,给图片描绘框体和类别显示 Args: img: 图片路径 xmin: 检测框最小的x坐标 ymin: 检测框最小的y坐标 xmax: 检测框最大的x坐标 ymax: 检测框最大的y坐标 cname: 类别信息 color: 类别与颜色的对应信息 """<line_sep># 描绘检测框 line_width=math.ceil(2<times>max(img.shape[0:2])/600)<line_sep>cv2.rectangle(img pt1=(xmin ymin) pt2=(xmax ymax) color=color thickness=line_width)<line_sep># 计算并描绘类别信息 text_thickness=math.ceil(2<times>max(img.shape[0:2])/1200)<line_sep>fontscale=math.ceil(0.5<times>max(img.shape[0:2])/600)<line_sep>tw,th=cv2.getTextSize(cname 0 fontScale=fontscale thickness=text_thickness)[0]<line_sep>cv2.rectangle(img pt1=(xmin+1 ymin-th) pt2=(xmin+int(0.7<times>tw)+1 ymin) color=color thickness=-1)<line_sep>cv2.putText(img cname (int(xmin)+3 int(ymin)-5) 0 0.6<times>fontscale (255 255 255) lineType=cv2.LINE_AA thickness=text_thickness)<line_sep><return>img<block_end><def_stmt>pil_imread file_path<block_start>""" 将图片读成np格式数据 Args: file_path: 图片路径 """<line_sep>img=Image.open(file_path)<line_sep><return>np.asarray(img)<block_end><def_stmt>get_color_map_list num_classes<block_start>""" 为类别信息生成对应的颜色列表 Args: num_classes: 类别数量 """<line_sep>color_map=num_classes<times>[0 0 0]<for_stmt>i range(0 num_classes)<block_start>j=0<line_sep>lab=i<while_stmt>lab<block_start>color_map[i<times>3]<augor>(((lab<rshift>0)&1)<lshift>(7-j))<line_sep>color_map[i<times>3+1]<augor>(((lab<rshift>1)&1)<lshift>(7-j))<line_sep>color_map[i<times>3+2]<augor>(((lab<rshift>2)&1)<lshift>(7-j))<line_sep>j<augadd>1<line_sep>lab<augrshift>3<block_end><block_end>color_map=[color_map[i:i+3]<for>i range(0 len(color_map) 3)]<line_sep><return>color_map<block_end><def_stmt>gray2pseudo gray_image<block_start>""" 将分割的结果映射到图片 Args: gray_image: 灰度图 """<line_sep>color_map=get_color_map_list(256)<line_sep>color_map=np.array(color_map).astype("uint8")<line_sep># 用OpenCV进行色彩映射 c1=cv2.LUT(gray_image color_map[: 0])<line_sep>c2=cv2.LUT(gray_image color_map[: 1])<line_sep>c3=cv2.LUT(gray_image color_map[: 2])<line_sep>pseudo_img=np.dstack((c1 c2 c3))<line_sep><return>pseudo_img<block_end>
# Generated by Django 3.2.7 on 2021-09-08 00:45 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('invites' '0001_initial') ]<line_sep>operations=[migrations.AlterField(model_name='invite' name='invitee_email' field=models.EmailField(db_index=<true> max_length=254 verbose_name='invitee email') ) ]<block_end>
# Copyright 2020 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== <import_stmt>unittest<import_from_stmt>test.unittest_base UnittestBase<class_stmt>TestParameters(UnittestBase unittest.TestCase)<block_start><def_stmt>float_unittest self exploration<block_start>agent,environment=self.prepare(exploration=exploration)<line_sep>states=environment.reset()<line_sep>actions=agent.act(states=states)<line_sep>exploration1=agent.model.exploration.value().numpy().item()<line_sep>states,terminal,reward=environment.execute(actions=actions)<line_sep>agent.observe(terminal=terminal reward=reward)<line_sep>actions=agent.act(states=states)<line_sep>exploration2=agent.model.exploration.value().numpy().item()<if_stmt><not>isinstance(exploration dict)<or>exploration['type']<eq>'constant'<block_start>self.assertEqual(exploration2 exploration1)<block_end><else_stmt><block_start>self.assertNotEqual(exploration2 exploration1)<block_end>states,terminal,reward=environment.execute(actions=actions)<line_sep>agent.observe(terminal=terminal reward=reward)<line_sep>agent.close()<line_sep>environment.close()<line_sep>self.finished_test()<block_end><def_stmt>int_unittest self horizon<block_start>agent,environment=self.prepare(reward_estimation=dict(horizon=horizon))<line_sep>states=environment.reset()<line_sep>actions=agent.act(states=states)<line_sep>states,terminal,reward=environment.execute(actions=actions)<line_sep>agent.observe(terminal=terminal reward=reward)<line_sep>horizon1=agent.model.reward_horizon.value().numpy().item()<line_sep>actions=agent.act(states=states)<line_sep>states,terminal,reward=environment.execute(actions=actions)<line_sep>agent.observe(terminal=terminal reward=reward)<line_sep>horizon2=agent.model.reward_horizon.value().numpy().item()<if_stmt><not>isinstance(horizon dict)<or>horizon['type']<eq>'constant'<block_start>self.assertEqual(horizon2 horizon1)<block_end><else_stmt><block_start>self.assertNotEqual(horizon2 horizon1)<block_end>agent.close()<line_sep>environment.close()<line_sep>self.finished_test()<block_end><def_stmt>test_constant self<block_start>self.start_tests(name='constant')<line_sep>exploration=0.1<line_sep>self.float_unittest(exploration=exploration)<line_sep>horizon=4<line_sep>self.int_unittest(horizon=horizon)<block_end><def_stmt>test_decaying self<block_start>self.start_tests(name='decaying')<line_sep>exploration=dict(type='decaying' decay='exponential' unit='timesteps' num_steps=5 initial_value=0.1 decay_rate=0.5)<line_sep>self.float_unittest(exploration=exploration)<line_sep>horizon=dict(type='polynomial' unit='timesteps' num_steps=1 initial_value=2 final_value=4 power=2)<line_sep>self.int_unittest(horizon=horizon)<block_end><def_stmt>test_exponential self<block_start>self.start_tests(name='exponential')<line_sep># SPECIFICATION.MD exploration=dict(type='exponential' unit='timesteps' num_steps=5 initial_value=0.1 decay_rate=0.5)<line_sep>self.float_unittest(exploration=exploration)<block_end><def_stmt>test_linear self<block_start>self.start_tests(name='linear')<line_sep>exploration=dict(type='linear' unit='timesteps' num_steps=5 initial_value=0.1 final_value=0.5)<line_sep>self.float_unittest(exploration=exploration)<line_sep># SPECIFICATION.MD horizon=dict(type='linear' unit='timesteps' num_steps=1 initial_value=2 final_value=4)<line_sep>self.int_unittest(horizon=horizon)<block_end><def_stmt>test_ornstein_uhlenbeck self<block_start>self.start_tests(name='ornstein-uhlenbeck')<line_sep>exploration=dict(type='ornstein_uhlenbeck' absolute=<true>)<line_sep>self.float_unittest(exploration=exploration)<block_end><def_stmt>test_piecewise_constant self<block_start>self.start_tests(name='piecewise-constant')<line_sep>exploration=dict(type='piecewise_constant' unit='timesteps' boundaries=[1] values=[0.1 0.0])<line_sep>self.float_unittest(exploration=exploration)<line_sep>horizon=dict(type='piecewise_constant' dtype='int' unit='timesteps' boundaries=[1] values=[1 2])<line_sep>self.int_unittest(horizon=horizon)<block_end><def_stmt>test_random self<block_start>self.start_tests(name='random')<line_sep>exploration=dict(type='random' distribution='uniform')<line_sep>self.float_unittest(exploration=exploration)<block_end><block_end>
""" =============================================================================== The code for all predicting methods =============================================================================== """<import_from_stmt>pydlm.core._dlm _dlm<class_stmt>_dlmPredict(_dlm)<block_start>""" The main class containing all prediction methods. Methods: _oneDayAheadPredict: predict one day a head. _continuePredict: continue predicting one day after _oneDayAheadPredict """<line_sep># Note the following functions will modify the status of the model, so they # shall not be directly call through the main model if this behavior is not # desired. # featureDict contains all the features for prediction. # It is a dictionary with key equals to the name of the component and # the value as the new feature (a list). The function # will first use the features provided in this feature dict, if not # found, it will fetch the default feature from the component. If # it could not find feature for some component, it returns an error # The intermediate result will be stored in result.predictStatus as # (start_date, next_pred_date, [all_predicted_values]), which will be # used by _continuePredict. <def_stmt>_oneDayAheadPredict self date featureDict=<none><block_start>""" One day ahead prediction based on the date and the featureDict. The prediction could be on the last day and into the future or in the middle of the time series and ignore the rest. For predicting into the future, the new features must be supplied to featureDict. For prediction in the middle, the user can still supply the features which will be used priorily. The old features will be used if featureDict is None. Args: date: the prediction starts (based on the observation before and on this date) featureDict: the new feature value for some dynamic components. must be specified in a form of {component_name: value} if the feature for some dynamic component is not supplied. The algorithm will use the features from the old data. (which means if the prediction is out of sample, then all dynamic component must be provided with the new feature value) Returns: A tuple of (predicted_mean, predicted_variance) """<if_stmt>date<g>self.n-1<block_start><raise>NameError('The date is beyond the data range.')<block_end># get the correct status of the model self._setModelStatus(date=date)<line_sep>self._constructEvaluationForPrediction(date=date+1 featureDict=featureDict padded_data=self.padded_data[:(date+1)])<line_sep># initialize the prediction status self.builder.model.prediction.step=0<line_sep># start predicting self.Filter.predict(self.builder.model)<line_sep>predictedObs=self.builder.model.prediction.obs<line_sep>predictedObsVar=self.builder.model.prediction.obsVar<line_sep>self.result.predictStatus=[date # start_date date+1 # current_date [predictedObs[0 0]]# all historical predictions ]<line_sep><return>(predictedObs predictedObsVar)<block_end><def_stmt>_continuePredict self featureDict=<none><block_start>""" Continue predicting one day after _oneDayAheadPredict or after _continuePredict. After using _oneDayAheadPredict, the user can continue predicting by using _continuePredict. The featureDict act the same as in _oneDayAheadPredict. Args: featureDict: the new feature value for some dynamic components. see @_oneDayAheadPredict Returns: A tuple of (predicted_mean, predicted_variance) """<if_stmt>self.result.predictStatus<is><none><block_start><raise>NameError('_continoousPredict can only be used after '+'_oneDayAheadPredict')<block_end>startDate=self.result.predictStatus[0]<line_sep>currentDate=self.result.predictStatus[1]<line_sep>self._constructEvaluationForPrediction(date=currentDate+1 featureDict=featureDict padded_data=self.padded_data[:(startDate+1)]+self.result.predictStatus[2])<line_sep>self.Filter.predict(self.builder.model)<line_sep>predictedObs=self.builder.model.prediction.obs<line_sep>predictedObsVar=self.builder.model.prediction.obsVar<line_sep>self.result.predictStatus[1]<augadd>1<line_sep>self.result.predictStatus[2].append(predictedObs[0 0])<line_sep><return>(predictedObs predictedObsVar)<block_end># This function will modify the status of the object, use with caution. <def_stmt>_constructEvaluationForPrediction self date featureDict=<none> padded_data=<none><block_start>""" Construct the evaluation matrix based on date and featureDict. Used for prediction. Features provided in the featureDict will be used preferrably. If the feature is not found in featureDict, the algorithm will seek it based on the old data and the date. Args: featureDict: a dictionary containing {dynamic_component_name: value} for update the feature for the corresponding component. date: if a dynamic component name is not found in featureDict, the algorithm is using its old feature on the given date. padded_data: is the mix of the raw data and the predicted data. It is used by auto regressor. """<line_sep># New features are provided. Update dynamic componnet. # We distribute the featureDict back to dynamicComponents. If the date is # out of bound, we append the feature to the feature set. If the date is # within range, we replace the old feature with the new feature. <if_stmt>featureDict<is><not><none><block_start><for_stmt>name featureDict<block_start><if_stmt>name<in>self.builder.dynamicComponents<block_start>comp=self.builder.dynamicComponents[name]<line_sep># the date is within range <if_stmt>date<l>comp.n<block_start>comp.features[date]=featureDict[name]<line_sep>comp.n<augadd>1<block_end><elif_stmt>date<l>comp.n+1<block_start>comp.features.append(featureDict[name])<line_sep>comp.n<augadd>1<block_end><else_stmt><block_start><raise>NameError("Feature is missing between the last predicted "+"day and the new day")<block_end><block_end><block_end><block_end>self.builder.updateEvaluation(date padded_data)<block_end><block_end>
"""Init file for blinkpy helper functions."""<line_sep>
__all__=["bgl_preprocessor" "open_source_logs" ]<line_sep>
ASSEMBLY_HUMAN="Homo_sapiens.GRCh38.104"<line_sep>ASSEMBLY_MOUSE="Mus_musculus.GRCm39.104"<line_sep>CELLTYPES=["adventitial cell" "endothelial cell" "acinar cell" "pancreatic PP cell" "type B pancreatic cell"]<line_sep>CL_VERSION="v2021-08-10"<line_sep>
<import_from_stmt>en_transformer.en_transformer EquivariantAttention EnTransformer<line_sep>
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. <import_stmt>torch<import_stmt>pytest<import_stmt>poptorch<import_stmt>helpers<class_stmt>Model(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>torch.matmul(x y)<block_end><block_end><def_stmt>assert_perf_counter_size perf inputs outputs steps outsteps=<none><block_start><def_stmt>assert_size perf elems steps<block_start><assert_stmt>len(perf)<eq>elems<for_stmt>elem perf<block_start><assert_stmt>len(elem)<eq>steps<block_end><block_end>outsteps=outsteps<or>steps<line_sep>assert_size(perf['input'] inputs steps)<line_sep>assert_size(perf['input_complete'] inputs steps)<line_sep>assert_size(perf['output'] outputs outsteps)<line_sep>assert_size(perf['output_complete'] outputs outsteps)<block_end><def_stmt>assert_latency_values model<block_start><def_stmt>check latency<block_start>(minimum maximum average)=latency<assert_stmt>minimum<le>average<assert_stmt>average<le>maximum<block_end>host2ipu=model.getHostIpuLatency()<line_sep>compute=model.getComputeLatency()<line_sep>ipu2host=model.getIpuHostLatency()<line_sep>round_trip=model.getLatency()<line_sep>check(host2ipu)<line_sep>check(compute)<line_sep>check(ipu2host)<line_sep>check(round_trip)<block_end><def_stmt>test_simple <block_start>x=torch.randn(100 100)<line_sep>y=torch.randn(100 100)<line_sep>model=Model()<line_sep>poptorch_model=poptorch.inferenceModel(model)<line_sep>poptorch_model(x y)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>assert_perf_counter_size(perf 2 1 1)<line_sep>assert_latency_values(poptorch_model)<block_end><def_stmt>test_steps <block_start>x=torch.randn(10 100 100)<line_sep>y=torch.randn(10 100 100)<line_sep>model=Model()<line_sep>opts=poptorch.Options().deviceIterations(10)<line_sep>poptorch_model=poptorch.inferenceModel(model opts)<line_sep>poptorch_model(x y)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>assert_perf_counter_size(perf 2 1 10)<line_sep>assert_latency_values(poptorch_model)<block_end>@pytest.mark.skipif(<not>poptorch.ipuHardwareIsAvailable() reason="Hardware IPU needed")<def_stmt>test_replicas <block_start>x=torch.randn(4 100 100)<line_sep>y=torch.randn(4 100 100)<line_sep>model=Model()<line_sep>opts=poptorch.Options().replicationFactor(4)<line_sep>poptorch_model=poptorch.inferenceModel(model opts)<line_sep>poptorch_model(x y)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>assert_perf_counter_size(perf 2 1 4)<line_sep>assert_latency_values(poptorch_model)<block_end>@pytest.mark.parametrize("mode_tuple" [(poptorch.AnchorMode.Final 1) (poptorch.AnchorMode.All 1) (poptorch.AnchorMode.Sum 1) (poptorch.AnchorMode.EveryN 2)])@pytest.mark.parametrize("steps" [2 4])@pytest.mark.parametrize("replicas" [1 2])@pytest.mark.skipif(<not>poptorch.ipuHardwareIsAvailable() reason="Hardware IPU needed")<def_stmt>test_inference mode_tuple steps replicas<block_start>model=Model()<line_sep>opts=poptorch.Options()<line_sep>opts.anchorMode(mode_tuple[0] mode_tuple[1])<line_sep>opts.deviceIterations(steps)<line_sep>opts.replicationFactor(replicas)<line_sep>poptorch_model=poptorch.inferenceModel(model opts)<line_sep>torch.manual_seed(42)<line_sep>x=torch.randn(16 100 100)<line_sep>y=torch.randn(16 100 100)<line_sep>poptorch_model(x y)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>outsteps=steps<times>replicas<if_stmt>mode_tuple[0]<in>[poptorch.AnchorMode.Final poptorch.AnchorMode.Sum]<block_start>outsteps=replicas<block_end><elif_stmt>mode_tuple[0]<is>poptorch.AnchorMode.EveryN<block_start>outsteps=steps<floordiv>mode_tuple[1]<times>replicas<block_end>assert_perf_counter_size(perf 2 1 steps<times>replicas outsteps)<line_sep>assert_latency_values(poptorch_model)<block_end>@pytest.mark.parametrize("mode_tuple" [(poptorch.AnchorMode.Final 1) (poptorch.AnchorMode.All 1) (poptorch.AnchorMode.Sum 1) (poptorch.AnchorMode.EveryN 2)])@pytest.mark.parametrize("steps" [2 4])@pytest.mark.parametrize("accums" [1 2])@pytest.mark.parametrize("replicas" [1 2])@pytest.mark.skipif(<not>poptorch.ipuHardwareIsAvailable() reason="Hardware IPU needed")<def_stmt>test_training mode_tuple steps accums replicas<block_start>torch.manual_seed(42)<line_sep>inputs=torch.randn(16 100)<line_sep>targets=torch.randn(16 100)<line_sep>opts=poptorch.Options()<line_sep>opts.anchorMode(mode_tuple[0] mode_tuple[1])<line_sep>opts.deviceIterations(steps)<line_sep>opts.Training.gradientAccumulation(accums)<line_sep>opts.replicationFactor(replicas)<line_sep>model=torch.nn.Linear(100 100)<line_sep>poptorch_model=helpers.trainingModelWithLoss(model loss=torch.nn.L1Loss() options=opts)<line_sep>poptorch_model(inputs targets)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>outsteps=steps<times>accums<times>replicas<if_stmt>mode_tuple[0]<in>[poptorch.AnchorMode.Final poptorch.AnchorMode.Sum]<block_start>outsteps=replicas<block_end><elif_stmt>mode_tuple[0]<is>poptorch.AnchorMode.EveryN<block_start>outsteps=steps<floordiv>mode_tuple[1]<times>accums<times>replicas<block_end>assert_perf_counter_size(perf 2 2 steps<times>accums<times>replicas outsteps)<line_sep>assert_latency_values(poptorch_model)<block_end><def_stmt>test_synthetic_data <block_start>model=Model()<line_sep>opts=poptorch.Options()<line_sep>opts.deviceIterations(16)<line_sep>opts.enableSyntheticData(<true>)<line_sep>poptorch_model=poptorch.inferenceModel(model opts)<line_sep>torch.manual_seed(42)<line_sep>x=torch.randn(16 100 100)<line_sep>y=torch.randn(16 100 100)<line_sep>poptorch_model(x y)<line_sep>perf=poptorch_model.getPerfCounters()<line_sep>assert_perf_counter_size(perf 2 1 0 0)<line_sep>latency=poptorch_model.getLatency()<assert_stmt>latency<eq>(0. 0. 0.)<block_end>
""" Script to convert Pubmed RCT dataset to textual format for sent classification """<import_stmt>jsonlines<import_stmt>click<import_stmt>pathlib<line_sep>@click.command()@click.argument('inpath')@click.argument('outpath')<def_stmt>convert inpath outpath<block_start>pathlib.Path(outpath).parent.mkdir(parents=<true> exist_ok=<true>)<with_stmt>open(inpath)<as>f_in<block_start><with_stmt>jsonlines.open(outpath 'w')<as>f_out<block_start><for_stmt>line f_in<block_start>abstract_id=''<line_sep>line=line.strip()<if_stmt><not>line<block_start><continue><block_end><if_stmt>line.startswith('###')<block_start>abstract_id=line<line_sep><continue><block_end>label,sent=line.split('\t')<line_sep>f_out.write({'label':label 'text':sent 'metadata':abstract_id})<block_end><block_end><block_end><block_end>convert()<line_sep>
<import_stmt>numpy<as>np<def_stmt>produce_adjacent_matrix_random stroke_length conn=0.15<block_start>attention_mask=np.random.choice(a=[0 1] size=[7 7] p=[conn 1-conn])<line_sep>attention_mask[stroke_length: :]=2<line_sep>attention_mask[: stroke_length:]=2<line_sep>##### attention_mask=np.triu(attention_mask)<line_sep>attention_mask<augadd>attention_mask.T-np.diag(attention_mask.diagonal())<for_stmt>i range(stroke_length)<block_start>attention_mask[i i]=0<block_end><return>attention_mask<block_end>att_msk=produce_adjacent_matrix_random(4 0.5)<line_sep>print(att_msk)<line_sep>print("----------------------")<line_sep>print((att_msk.T<eq>att_msk).all())<line_sep>