content
stringlengths
0
1.55M
<import_from_stmt>rest_framework.mixins CreateModelMixin DestroyModelMixin ListModelMixin <import_from_stmt>rest_framework.viewsets GenericViewSet<import_from_stmt>pydis_site.apps.api.models.bot.offensive_message OffensiveMessage<import_from_stmt>pydis_site.apps.api.serializers OffensiveMessageSerializer<class_stmt>OffensiveMessageViewSet(CreateModelMixin ListModelMixin DestroyModelMixin GenericViewSet)<block_start>""" View providing CRUD access to offensive messages. ## Routes ### GET /bot/offensive-messages Returns all offensive messages in the database. #### Response format >>> [ ... { ... 'id': '631953598091100200', ... 'channel_id': '291284109232308226', ... 'delete_date': '2019-11-01T21:51:15.545000Z' ... }, ... ... ... ] #### Status codes - 200: returned on success ### POST /bot/offensive-messages Create a new offensive message object. #### Request body >>> { ... 'id': int, ... 'channel_id': int, ... 'delete_date': datetime.datetime # ISO-8601-formatted date ... } #### Status codes - 201: returned on success - 400: if the body format is invalid ### DELETE /bot/offensive-messages/<id:int> Delete the offensive message object with the given `id`. #### Status codes - 204: returned on success - 404: if a offensive message object with the given `id` does not exist ## Authentication Requires an API token. """<line_sep>serializer_class=OffensiveMessageSerializer<line_sep>queryset=OffensiveMessage.objects.all()<block_end>
# # Autogenerated by Thrift Compiler (0.11.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py:new_style,no_utf8strings # <import_from_stmt>thrift.Thrift TType TMessageType TFrozenDict TException TApplicationException<import_from_stmt>thrift.protocol.TProtocol TProtocolException<import_from_stmt>thrift.TRecursive fix_spec<import_from_stmt>thrift.transport TTransport<line_sep>all_structs=[]<class_stmt>TUnit(object)<block_start>UNIT=0<line_sep>UNIT_PER_SECOND=1<line_sep>CPU_TICKS=2<line_sep>BYTES=3<line_sep>BYTES_PER_SECOND=4<line_sep>TIME_NS=5<line_sep>DOUBLE_VALUE=6<line_sep>NONE=7<line_sep>TIME_MS=8<line_sep>TIME_S=9<line_sep>TIME_US=10<line_sep>BASIS_POINTS=11<line_sep>_VALUES_TO_NAMES={0:"UNIT" 1:"UNIT_PER_SECOND" 2:"CPU_TICKS" 3:"BYTES" 4:"BYTES_PER_SECOND" 5:"TIME_NS" 6:"DOUBLE_VALUE" 7:"NONE" 8:"TIME_MS" 9:"TIME_S" 10:"TIME_US" 11:"BASIS_POINTS" }<line_sep>_NAMES_TO_VALUES={"UNIT":0 "UNIT_PER_SECOND":1 "CPU_TICKS":2 "BYTES":3 "BYTES_PER_SECOND":4 "TIME_NS":5 "DOUBLE_VALUE":6 "NONE":7 "TIME_MS":8 "TIME_S":9 "TIME_US":10 "BASIS_POINTS":11 }<block_end><class_stmt>TMetricKind(object)<block_start>GAUGE=0<line_sep>COUNTER=1<line_sep>PROPERTY=2<line_sep>STATS=3<line_sep>SET=4<line_sep>HISTOGRAM=5<line_sep>_VALUES_TO_NAMES={0:"GAUGE" 1:"COUNTER" 2:"PROPERTY" 3:"STATS" 4:"SET" 5:"HISTOGRAM" }<line_sep>_NAMES_TO_VALUES={"GAUGE":0 "COUNTER":1 "PROPERTY":2 "STATS":3 "SET":4 "HISTOGRAM":5 }<block_end>fix_spec(all_structs)<del_stmt>all_structs<line_sep>
<import_from_stmt>hamcrest *<import_from_stmt>utils *<line_sep>@pytest.mark.serial@pytest.mark.manual_batch_review<def_stmt>test_approve_pending_batch_change_success shared_zone_test_context<block_start>""" Test approving a batch change succeeds for a support user """<line_sep>client=shared_zone_test_context.ok_vinyldns_client<line_sep>approver=shared_zone_test_context.support_user_client<line_sep>batch_change_input={"changes":[get_change_A_AAAA_json("test-approve-success.not.loaded." address="4.3.2.1") get_change_A_AAAA_json("needs-review.not.loaded." address="4.3.2.1") get_change_A_AAAA_json("zone-name-flagged-for-manual-review.zone.requires.review.")] "ownerGroupId":shared_zone_test_context.ok_group['id']}<line_sep>to_delete=[]<line_sep>to_disconnect=<none><try_stmt><block_start>result=client.create_batch_change(batch_change_input status=202)<line_sep>get_batch=client.get_batch_change(result['id'])<line_sep>assert_that(get_batch['status'] is_('PendingReview'))<line_sep>assert_that(get_batch['approvalStatus'] is_('PendingReview'))<line_sep>assert_that(get_batch['changes'][0]['status'] is_('NeedsReview'))<line_sep>assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'] is_('ZoneDiscoveryError'))<line_sep>assert_that(get_batch['changes'][1]['status'] is_('NeedsReview'))<line_sep>assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'] is_('RecordRequiresManualReview'))<line_sep>assert_that(get_batch['changes'][2]['status'] is_('NeedsReview'))<line_sep>assert_that(get_batch['changes'][2]['validationErrors'][0]['errorType'] is_('RecordRequiresManualReview'))<line_sep># need to create the zone so the change can succeed zone={'name':'not.loaded.' 'email':'<EMAIL>' 'adminGroupId':shared_zone_test_context.ok_group['id'] 'backendId':'func-test-backend' 'shared':<true>}<line_sep>zone_create=approver.create_zone(zone status=202)<line_sep>to_disconnect=zone_create['zone']<line_sep>approver.wait_until_zone_active(to_disconnect['id'])<line_sep>approved=approver.approve_batch_change(result['id'] status=202)<line_sep>completed_batch=client.wait_until_batch_change_completed(approved)<line_sep>to_delete=[(change['zoneId'] change['recordSetId'])<for>change completed_batch['changes']]<line_sep>assert_that(completed_batch['status'] is_('Complete'))<for_stmt>change completed_batch['changes']<block_start>assert_that(change['status'] is_('Complete'))<line_sep>assert_that(len(change['validationErrors']) is_(0))<block_end>assert_that(completed_batch['approvalStatus'] is_('ManuallyApproved'))<line_sep>assert_that(completed_batch['reviewerId'] is_('support-user-id'))<line_sep>assert_that(completed_batch['reviewerUserName'] is_('support-user'))<line_sep>assert_that(completed_batch has_key('reviewTimestamp'))<line_sep>assert_that(get_batch <not>(has_key('cancelledTimestamp')))<block_end><finally_stmt><block_start>clear_zoneid_rsid_tuple_list(to_delete client)<if_stmt>to_disconnect<block_start>approver.abandon_zones(to_disconnect['id'] status=202)<block_end><block_end><block_end>@pytest.mark.manual_batch_review<def_stmt>test_approve_pending_batch_change_fails_if_there_are_still_errors shared_zone_test_context<block_start>""" Test approving a batch change fails if there are still errors """<line_sep>client=shared_zone_test_context.ok_vinyldns_client<line_sep>approver=shared_zone_test_context.support_user_client<line_sep>batch_change_input={"changes":[get_change_A_AAAA_json("needs-review.nonexistent." address="4.3.2.1") get_change_A_AAAA_json("zone.does.not.exist.")] "ownerGroupId":shared_zone_test_context.ok_group['id']}<line_sep>complete_rs=<none><try_stmt><block_start>result=client.create_batch_change(batch_change_input status=202)<line_sep>get_batch=client.get_batch_change(result['id'])<line_sep>assert_that(get_batch['status'] is_('PendingReview'))<line_sep>assert_that(get_batch['approvalStatus'] is_('PendingReview'))<line_sep>assert_that(get_batch['changes'][0]['status'] is_('NeedsReview'))<line_sep>assert_that(get_batch['changes'][0]['validationErrors'][0]['errorType'] is_('RecordRequiresManualReview'))<line_sep>assert_that(get_batch['changes'][1]['status'] is_('NeedsReview'))<line_sep>assert_that(get_batch['changes'][1]['validationErrors'][0]['errorType'] is_('ZoneDiscoveryError'))<line_sep>approval_response=approver.approve_batch_change(result['id'] status=400)<line_sep>assert_that((approval_response[0]['errors'][0]) contains_string('Zone Discovery Failed'))<line_sep>assert_that((approval_response[1]['errors'][0]) contains_string('Zone Discovery Failed'))<line_sep>updated_batch=client.get_batch_change(result['id'] status=200)<line_sep>assert_that(updated_batch['status'] is_('PendingReview'))<line_sep>assert_that(updated_batch['approvalStatus'] is_('PendingReview'))<line_sep>assert_that(updated_batch <not>(has_key('reviewerId')))<line_sep>assert_that(updated_batch <not>(has_key('reviewerUserName')))<line_sep>assert_that(updated_batch <not>(has_key('reviewTimestamp')))<line_sep>assert_that(updated_batch <not>(has_key('cancelledTimestamp')))<line_sep>assert_that(updated_batch['changes'][0]['status'] is_('NeedsReview'))<line_sep>assert_that(updated_batch['changes'][0]['validationErrors'][0]['errorType'] is_('ZoneDiscoveryError'))<line_sep>assert_that(updated_batch['changes'][1]['status'] is_('NeedsReview'))<line_sep>assert_that(updated_batch['changes'][1]['validationErrors'][0]['errorType'] is_('ZoneDiscoveryError'))<block_end><finally_stmt><block_start><if_stmt>complete_rs<block_start>delete_result=client.delete_recordset(complete_rs['zoneId'] complete_rs['id'] status=202)<line_sep>client.wait_until_recordset_change_status(delete_result 'Complete')<block_end><block_end><block_end>@pytest.mark.manual_batch_review<def_stmt>test_approve_batch_change_with_invalid_batch_change_id_fails shared_zone_test_context<block_start>""" Test approving a batch change with invalid batch change ID """<line_sep>client=shared_zone_test_context.ok_vinyldns_client<line_sep>error=client.approve_batch_change("some-id" status=404)<line_sep>assert_that(error is_("Batch change with id some-id cannot be found"))<block_end>@pytest.mark.manual_batch_review<def_stmt>test_approve_batch_change_with_comments_exceeding_max_length_fails shared_zone_test_context<block_start>""" Test approving a batch change with comments exceeding 1024 characters fails """<line_sep>client=shared_zone_test_context.ok_vinyldns_client<line_sep>approve_batch_change_input={"reviewComment":"a"<times>1025}<line_sep>errors=client.approve_batch_change("some-id" approve_batch_change_input status=400)['errors']<line_sep>assert_that(errors contains_inanyorder("Comment length must not exceed 1024 characters."))<block_end>@pytest.mark.manual_batch_review<def_stmt>test_approve_batch_change_fails_with_forbidden_error_for_non_system_admins shared_zone_test_context<block_start>""" Test approving a batch change if the reviewer is not a super user or support user """<line_sep>client=shared_zone_test_context.ok_vinyldns_client<line_sep>batch_change_input={"changes":[get_change_A_AAAA_json("no-owner-group-id.ok." address="4.3.2.1")]}<line_sep>to_delete=[]<try_stmt><block_start>result=client.create_batch_change(batch_change_input status=202)<line_sep>completed_batch=client.wait_until_batch_change_completed(result)<line_sep>to_delete=[(change['zoneId'] change['recordSetId'])<for>change completed_batch['changes']]<line_sep>error=client.approve_batch_change(completed_batch['id'] status=403)<line_sep>assert_that(error is_("User does not have access to item "+completed_batch['id']))<block_end><finally_stmt><block_start>clear_zoneid_rsid_tuple_list(to_delete client)<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>collections defaultdict<import_from_stmt>.base Answer<import_from_stmt>.utils normalize_answer<class_stmt>SpanSelection<block_start><def_stmt>reset self<block_start><pass><block_end><def_stmt>score self span text<block_start><pass><block_end><def_stmt>add_answers self spans_by_text texts<block_start><pass><block_end><def_stmt>top_answers self num_spans<block_start><pass><block_end><def_stmt>__str__ self<block_start><pass><block_end><block_end><class_stmt>DprSelection(SpanSelection)<block_start><def_stmt>reset self<block_start>self.answers=[]<block_end><def_stmt>score self span text<block_start><return>float(span.relevance_score) float(span.span_score)<block_end><def_stmt>add_answers self spans_by_text texts<block_start><for_stmt>spans,text zip(spans_by_text texts)<block_start><for_stmt>span spans<block_start>self.answers.append(Answer(text=span.text context=text score=self.score(span text)))<block_end><block_end><block_end><def_stmt>top_answers self num_spans<block_start><return>sorted(self.answers reverse=<true> key=<lambda>answer:answer.score)[:num_spans]<block_end><def_stmt>__str__ self<block_start><return>'DPR'<block_end><block_end><class_stmt>DprFusionSelection(DprSelection)<block_start><def_stmt>__init__ self beta gamma<block_start>self.beta=float(beta)<line_sep>self.gamma=float(gamma)<block_end><def_stmt>score self span text<block_start><return>float(span.relevance_score)<times>self.beta+float(text.score)<times>self.gamma float(span.span_score)<block_end><def_stmt>__str__ self<block_start><return>f'DPR Fusion, beta={self.beta}, gamma={self.gamma}'<block_end><block_end><class_stmt>GarSelection(SpanSelection)<block_start><def_stmt>reset self<block_start>self.answers=defaultdict(int)<block_end><def_stmt>score self span text<block_start><return>float(span.relevance_score)<block_end><def_stmt>add_answers self spans_by_text texts<block_start>eD=np.exp(np.array([self.score(spans[0] text)<for>spans,text zip(spans_by_text texts)]))<for_stmt>i,spans enumerate(spans_by_text)<block_start>topn_spans=spans[:5]<line_sep>eSi=np.exp(np.array([float(span.span_score)<for>span topn_spans]))<line_sep>softmaxSi=list(eSi/np.sum(eSi))<for_stmt>j,span enumerate(topn_spans)<block_start>self.answers[normalize_answer(span.text)]<augadd>eD[i]<times>softmaxSi[j]<block_end><block_end><block_end><def_stmt>top_answers self num_spans<block_start>answers=sorted(list(self.answers.items()) reverse=<true> key=<lambda>answer:answer[1])[:num_spans]<line_sep><return>list(map(<lambda>answer:Answer(text=answer[0] score=answer[1]) answers))<block_end><def_stmt>__str__ self<block_start><return>'GAR'<block_end><block_end><class_stmt>GarFusionSelection(GarSelection)<block_start><def_stmt>__init__ self beta gamma<block_start>self.beta=float(beta)<line_sep>self.gamma=float(gamma)<block_end><def_stmt>score self span text<block_start><return>float(span.relevance_score)<times>self.beta+float(text.score)<times>self.gamma<block_end><def_stmt>__str__ self<block_start><return>f'GAR Fusion, beta={self.beta}, gamma={self.gamma}'<block_end><block_end>
<import_from_stmt>.config init_config get_key_path<line_sep>init_config()<import_from_stmt>.mjviewer MjViewer<import_from_stmt>.mjcore MjModel<import_from_stmt>.mjcore register_license<import_from_stmt>.mjconstants *<import_from_stmt>.platname_targdir targdir<line_sep>register_license(get_key_path())<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>workflow.steps.util.base BaseInstanceStep<import_stmt>os<import_stmt>logging<line_sep>LOG=logging.getLogger(__name__)<class_stmt>CantUpgradePatch(Exception)<block_start><pass><block_end><class_stmt>DatabaseUpgradePatchStep(BaseInstanceStep)<block_start><def_stmt>__init__ self instance<block_start>super(DatabaseUpgradePatchStep self).__init__(instance)<line_sep>upgrade=self.database.upgrades_patch.last()<if_stmt>upgrade<and>upgrade.is_running<block_start>self.target_patch=upgrade.target_patch<line_sep>self.source_patch=upgrade.source_patch<block_end><else_stmt><block_start>self.target_patch=self.infra.engine_patch<line_sep>self.source_patch=self.engine.default_engine_patch<block_end><block_end><def_stmt>__unicode__ self<block_start><return>"Changing database binaries..."<block_end>@property<def_stmt>patch_path_by_os self<block_start>pp=self.target_patch.patch_path<if_stmt>self.host.is_ol7<block_start>pp=self.target_patch.patch_path_ol7<or>pp<block_end><return>pp<block_end>@property<def_stmt>is_valid self<block_start><if_stmt>self.source_patch<eq>self.target_patch<block_start><return><false><block_end><if_stmt>self.source_patch.engine<ne>self.target_patch.engine<block_start>error="Can not change the Engine."<line_sep>error<augadd>" Source engine={}, targe engine={}".format(self.source_patch.engine self.target_patch.engine)<line_sep><raise>CantUpgradePatch(error)<block_end><if_stmt>self.source_patch.patch_version<g>self.target_patch.patch_version<block_start>error="Target patch must be bigger than source patch."<line_sep>error<augadd>" Source patch={}, targe patch={}".format(self.source_patch self.target_patch)<line_sep><raise>CantUpgradePatch(error)<block_end><if_stmt><not>self.patch_path_by_os<block_start>error="Patch path can not be empty."<line_sep><raise>CantUpgradePatch(error)<block_end><return><true><block_end><def_stmt>execute_script self script<block_start><raise>Exception("U must use the new method. run_script of HostSSH class")<line_sep>output={}<line_sep>return_code=exec_remote_command_host(self.host script output)<if_stmt>return_code<ne>0<block_start>error='Could not execute script {}: {}'.format(return_code output)<line_sep><raise>EnvironmentError(error)<block_end><block_end><def_stmt>undo self<block_start><pass><block_end><block_end><class_stmt>MongoDBCHGBinStep(DatabaseUpgradePatchStep)<block_start><def_stmt>do self<block_start><if_stmt><not>self.is_valid<block_start><return><block_end>patch_path=self.patch_path_by_os<line_sep>dir_name=os.path.splitext(os.path.basename(patch_path))[0]<if_stmt>self.patch_path_by_os.startswith('https')<block_start>download_script='curl {} | tar -xz'.format(patch_path)<block_end><else_stmt><block_start>download_script='tar -xvf {}'.format(patch_path)<block_end>script="""cd /usr/local/ {download_script} rm -f mongodb ln -s {dir_name} mongodb chown -R mongodb:mongodb mongodb/ """.format(download_script=download_script dir_name=dir_name)<line_sep># self.execute_script(script) self.host.ssh.run_script(script)<block_end><block_end><class_stmt>MongoDBCHGBinStepRollback(MongoDBCHGBinStep)<block_start><def_stmt>do self<block_start><pass><block_end><def_stmt>undo self<block_start>super(MongoDBCHGBinStepRollback self).do()<block_end><block_end><class_stmt>RedisCHGBinStep(DatabaseUpgradePatchStep)<block_start><def_stmt>do self<block_start><if_stmt><not>self.is_valid<block_start><return><block_end>patch_path=self.patch_path_by_os<line_sep>_,file_name=os.path.split(patch_path)<line_sep>dir_name=file_name.rsplit('.' 2)[0]<if_stmt>self.patch_path_by_os.startswith('https')<block_start>download_script='curl {} | tar -xz'.format(patch_path)<block_end><else_stmt><block_start>download_script='tar -xvf {}'.format(patch_path)<block_end>script="""cd /usr/local/ {download_script} rm -f redis ln -s {dir_name} redis cd redis && make wget -P /usr/local/redis/src/ https://artifactory.globoi.com/artifactory/generic-local/db/redis/redis-trib-gcom.rb cd .. chown -R redis:redis redis/ """.format(download_script=download_script dir_name=dir_name)<line_sep># self.execute_script(script) self.host.ssh.run_script(script)<block_end><block_end><class_stmt>RedisCHGBinStepRollback(RedisCHGBinStep)<block_start><def_stmt>do self<block_start><pass><block_end><def_stmt>undo self<block_start>super(RedisCHGBinStepRollback self).do()<block_end><block_end><class_stmt>MySQLCHGBinStep(DatabaseUpgradePatchStep)<block_start><def_stmt>do self<block_start><if_stmt><not>self.is_valid<block_start><return><block_end>patch_path=self.patch_path_by_os<if_stmt>self.patch_path_by_os.startswith('https')<block_start>script=""" mkdir /tmp/mysql_patch/ wget -P /tmp/mysql_patch/ -r -nH --reject="index.html*" --no-parent --cut-dirs=8 {patch_path} yum -y localinstall --nogpgcheck /tmp/mysql_patch/*.rpm rm -rf /tmp/mysql_patch/ """.format(patch_path=patch_path)<block_end><else_stmt><block_start>script="""cd {patch_path} yum -y localinstall --nogpgcheck *.rpm """.format(patch_path=patch_path)<block_end># self.execute_script(script) self.host.ssh.run_script(script)<block_end><block_end><class_stmt>MySQLCHGBinStepRollback(MySQLCHGBinStep)<block_start><def_stmt>do self<block_start><pass><block_end><def_stmt>undo self<block_start>super(MySQLCHGBinStepRollback self).do()<block_end><block_end>
# Copyright 2020 The Netket Authors. - All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>netket legacy<as>nk<line_sep># 1D Lattice L=20<line_sep>g=nk.graph.Hypercube(length=L n_dim=1 pbc=<true>)<line_sep># Hilbert space of spins on the graph hi=nk.hilbert.Spin(s=1/2 N=g.n_nodes)<line_sep># Ising spin hamiltonian ha=nk.operator.Ising(hilbert=hi graph=g h=1.0)<line_sep># RBM Spin Machine ma=nk.nn.models.RBM(alpha=1)<line_sep># Metropolis Local Sampling sa=nk.sampler.MetropolisLocal(hi n_chains=32)<line_sep># Optimizer op=nk.optim.GradientDescent(learning_rate=0.1)<line_sep># Create the optimization driver vs=nk.variational_states.ClassicalVariationalState(ma sa n_samples=1000 n_discard=100)<line_sep>gs=nk.Vmc(ha op variational_state=vs)<line_sep># Run the optimization for 300 iterations gs.run(n_iter=2 out=<none>)<line_sep>gs.run(n_iter=300 out=<none>)<line_sep>
<import_from_stmt>.socfaker SocFaker<line_sep>
#from CAvmHGMCommon import * #from CAvmCommon import *
<import_stmt>os<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>logger=logging.getLogger(__name__)<import_from_stmt>supervised.utils.config LOG_LEVEL<import_from_stmt>supervised.utils.common learner_name_to_fold_repeat<import_from_stmt>supervised.utils.metric Metric<line_sep>logger.setLevel(LOG_LEVEL)<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.colors<as>mcolors<line_sep>MY_COLORS=list(mcolors.TABLEAU_COLORS.values())<class_stmt>LearningCurves<block_start>output_file_name="learning_curves.png"<line_sep>@staticmethod<def_stmt>single_iteration learner_names model_path<block_start><for_stmt>ln learner_names<block_start>df=pd.read_csv(os.path.join(model_path f"{ln}_training.log") names=["iteration" "train" "test"] )<if_stmt>df.shape[0]<g>1<block_start><return><false><block_end><block_end><return><true><block_end>@staticmethod<def_stmt>plot learner_names metric_name model_path trees_in_iteration=<none><block_start>colors=MY_COLORS<if_stmt>len(learner_names)<g>len(colors)<block_start>repeat_colors=int(np.ceil(len(learner_names)/len(colors)))<line_sep>colors=colors<times>repeat_colors<block_end><if_stmt>LearningCurves.single_iteration(learner_names model_path)<block_start>LearningCurves.plot_single_iter(learner_names metric_name model_path colors)<block_end><else_stmt><block_start>LearningCurves.plot_iterations(learner_names metric_name model_path colors trees_in_iteration)<block_end><block_end>@staticmethod<def_stmt>plot_single_iter learner_names metric_name model_path colors<block_start>plt.figure(figsize=(10 7))<for_stmt>ln learner_names<block_start>df=pd.read_csv(os.path.join(model_path f"{ln}_training.log") names=["iteration" "train" "test"] )<line_sep>fold,repeat=learner_name_to_fold_repeat(ln)<line_sep>repeat_str=f" Reapeat {repeat+1},"<if>repeat<is><not><none><else>""<line_sep>plt.bar(f"Fold {fold+1},{repeat_str} train" df.train[0] color="white" edgecolor=colors[fold] )<line_sep>plt.bar(f"Fold {fold+1},{repeat_str} test" df.test[0] color=colors[fold])<block_end>plt.ylabel(metric_name)<line_sep>plt.xticks(rotation=90)<line_sep>plt.tight_layout(pad=2.0)<line_sep>plot_path=os.path.join(model_path LearningCurves.output_file_name)<line_sep>plt.savefig(plot_path)<line_sep>plt.close("all")<block_end>@staticmethod<def_stmt>plot_iterations learner_names metric_name model_path colors trees_in_iteration=<none><block_start>plt.figure(figsize=(10 7))<for_stmt>ln learner_names<block_start>df=pd.read_csv(os.path.join(model_path f"{ln}_training.log") names=["iteration" "train" "test"] )<line_sep>fold,repeat=learner_name_to_fold_repeat(ln)<line_sep>repeat_str=f" Reapeat {repeat+1},"<if>repeat<is><not><none><else>""<line_sep># if trees_in_iteration is not None: # df.iteration = df.iteration * trees_in_iteration any_none=np.sum(pd.isnull(df.train))<if_stmt>any_none<eq>0<block_start>plt.plot(df.iteration df.train "--" color=colors[fold] label=f"Fold {fold+1},{repeat_str} train" )<block_end>any_none=np.sum(pd.isnull(df.test))<if_stmt>any_none<eq>0<block_start>plt.plot(df.iteration df.test color=colors[fold] label=f"Fold {fold+1},{repeat_str} test" )<block_end>best_iter=<none><if_stmt>Metric.optimize_negative(metric_name)<block_start>best_iter=df.test.argmax()<block_end><else_stmt><block_start>best_iter=df.test.argmin()<block_end><if_stmt>best_iter<is><not><none><and>best_iter<ne>-1<block_start>plt.axvline(best_iter color=colors[fold] alpha=0.3)<block_end><block_end><if_stmt>trees_in_iteration<is><not><none><block_start>plt.xlabel("#Trees")<block_end><else_stmt><block_start>plt.xlabel("#Iteration")<block_end>plt.ylabel(metric_name)<line_sep># limit number of learners in the legend # too many will raise warnings <if_stmt>len(learner_names)<le>15<block_start>plt.legend(loc="best")<block_end>plt.tight_layout(pad=2.0)<line_sep>plot_path=os.path.join(model_path LearningCurves.output_file_name)<line_sep>plt.savefig(plot_path)<line_sep>plt.close("all")<block_end>@staticmethod<def_stmt>plot_for_ensemble scores metric_name model_path<block_start>plt.figure(figsize=(10 7))<line_sep>plt.plot(range(1 len(scores)+1) scores label=f"Ensemble")<line_sep>plt.xlabel("#Iteration")<line_sep>plt.ylabel(metric_name)<line_sep>plt.legend(loc="best")<line_sep>plot_path=os.path.join(model_path LearningCurves.output_file_name)<line_sep>plt.savefig(plot_path)<line_sep>plt.close("all")<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2016-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - <NAME> <<EMAIL>>, 2016-2018 # - <NAME> <<EMAIL>>, 2016-2021 # - <NAME> <<EMAIL>>, 2016-2021 # - <NAME> <<EMAIL>>, 2018-2019 # - <NAME> <<EMAIL>>, 2019 # - <NAME> <<EMAIL>>, 2020 # - <NAME> <<EMAIL>>, 2019 # - <NAME> <<EMAIL>>, 2020 # - <NAME> <<EMAIL>>, 2020 # - <NAME> <<EMAIL>>, 2020-2021 # - <NAME> <<EMAIL>>, 2021 # - <NAME> <<EMAIL>>, 2021 ''' Dark Reaper is a daemon to manage quarantined file deletion. '''<import_stmt>hashlib<import_stmt>logging<import_stmt>os<import_stmt>random<import_stmt>socket<import_stmt>sys<import_stmt>threading<import_stmt>time<import_stmt>traceback<import_stmt>rucio.db.sqla.util<import_from_stmt>rucio.common exception<import_from_stmt>rucio.common.config config_get_bool<import_from_stmt>rucio.common.exception SourceNotFound DatabaseException ServiceUnavailable RSEAccessDenied ResourceTemporaryUnavailable RSENotFound VONotFound <import_from_stmt>rucio.common.logging setup_logging<import_from_stmt>rucio.common.utils daemon_sleep<import_from_stmt>rucio.core.heartbeat live die sanity_check<import_from_stmt>rucio.core.message add_message<import_from_stmt>rucio.core.quarantined_replica list_quarantined_replicas delete_quarantined_replicas list_rses <import_stmt>rucio.core.rse<as>rse_core<import_from_stmt>rucio.core.rse_expression_parser parse_expression<import_from_stmt>rucio.core.vo list_vos<import_from_stmt>rucio.rse rsemanager<as>rsemgr<line_sep>logging.getLogger("requests").setLevel(logging.CRITICAL)<line_sep>GRACEFUL_STOP=threading.Event()<def_stmt>reaper rses worker_number=0 total_workers=1 chunk_size=100 once=<false> scheme=<none> sleep_time=60<block_start>""" Main loop to select and delete files. :param rses: List of RSEs the reaper should work against. :param worker_number: The worker number. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param scheme: Force the reaper to use a particular protocol, e.g., mock. :param sleep_time: Thread sleep time after each chunk of work. """<line_sep>logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s' worker_number total_workers ', '.join(rses))<line_sep>pid=os.getpid()<line_sep>thread=threading.current_thread()<line_sep>hostname=socket.gethostname()<line_sep>executable=' '.join(sys.argv)<line_sep>hash_executable=hashlib.sha256((sys.argv[0]+''.join(rses)).encode()).hexdigest()<line_sep>sanity_check(executable=<none> hostname=hostname)<while_stmt><not>GRACEFUL_STOP.is_set()<block_start><try_stmt># heartbeat <block_start>heartbeat=live(executable=executable hostname=hostname pid=pid thread=thread hash_executable=hash_executable)<line_sep>logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))<line_sep>nothing_to_do=<true><line_sep>start_time=time.time()<line_sep>rses_to_process=list(set(rses)&set(list_rses()))<line_sep>random.shuffle(rses_to_process)<for_stmt>rse_id rses_to_process<block_start>replicas=list_quarantined_replicas(rse_id=rse_id limit=chunk_size worker_number=worker_number total_workers=total_workers)<line_sep>rse_info=rsemgr.get_rse_info(rse_id=rse_id)<line_sep>rse=rse_info['rse']<line_sep>prot=rsemgr.create_protocol(rse_info 'delete' scheme=scheme)<line_sep>deleted_replicas=[]<try_stmt><block_start>prot.connect()<for_stmt>replica replicas<block_start>nothing_to_do=<false><line_sep>scope=''<if_stmt>replica['scope']<block_start>scope=replica['scope'].external<block_end><try_stmt><block_start>pfn=str(list(rsemgr.lfns2pfns(rse_settings=rse_info lfns=[{'scope':scope 'name':replica['name'] 'path':replica['path']}] operation='delete' scheme=scheme).values())[0])<line_sep>logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s' worker_number total_workers scope replica['name'] pfn rse)<line_sep>start=time.time()<line_sep>prot.delete(pfn)<line_sep>duration=time.time()-start<line_sep>logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds' worker_number total_workers scope replica['name'] pfn rse duration)<line_sep>payload={'scope':scope 'name':replica['name'] 'rse':rse 'rse_id':rse_id 'file-size':replica.get('bytes')<or>0 'bytes':replica.get('bytes')<or>0 'url':pfn 'duration':duration 'protocol':prot.attributes['scheme']}<if_stmt>replica['scope'].vo<ne>'def'<block_start>payload['vo']=replica['scope'].vo<block_end>add_message('deletion-done' payload)<line_sep>deleted_replicas.append(replica)<block_end><except_stmt>SourceNotFound<block_start>err_msg=('Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s'%(worker_number total_workers scope replica['name'] pfn rse))<line_sep>logging.warning(err_msg)<line_sep>deleted_replicas.append(replica)<block_end><except_stmt>(ServiceUnavailable RSEAccessDenied ResourceTemporaryUnavailable)<as>error<block_start>err_msg=('Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s'%(worker_number total_workers scope replica['name'] pfn rse str(error)))<line_sep>logging.warning(err_msg)<line_sep>payload={'scope':scope 'name':replica['name'] 'rse':rse 'rse_id':rse_id 'file-size':replica['bytes']<or>0 'bytes':replica['bytes']<or>0 'url':pfn 'reason':str(error) 'protocol':prot.attributes['scheme']}<if_stmt>replica['scope'].vo<ne>'def'<block_start>payload['vo']=replica['scope'].vo<block_end>add_message('deletion-failed' payload)<block_end><except_stmt>Exception<block_start>logging.critical(traceback.format_exc())<block_end><block_end><block_end><finally_stmt><block_start>prot.close()<block_end>delete_quarantined_replicas(rse_id=rse_id replicas=deleted_replicas)<if_stmt>once<block_start><break><block_end><block_end><if_stmt>once<block_start><break><block_end><if_stmt>nothing_to_do<block_start>logging.info('Dark Reaper %s-%s: Nothing to do' worker_number total_workers)<line_sep>daemon_sleep(start_time=start_time sleep_time=sleep_time graceful_stop=GRACEFUL_STOP)<block_end><block_end><except_stmt>DatabaseException<as>error<block_start>logging.warning('Reaper: %s' str(error))<block_end><except_stmt>Exception<block_start>logging.critical(traceback.format_exc())<block_end><block_end>die(executable=executable hostname=hostname pid=pid thread=thread hash_executable=hash_executable)<line_sep>logging.info('Graceful stop requested')<line_sep>logging.info('Graceful stop done')<line_sep><return><block_end><def_stmt>stop signum=<none> frame=<none><block_start>""" Graceful exit. """<line_sep>GRACEFUL_STOP.set()<block_end><def_stmt>run total_workers=1 chunk_size=100 once=<false> rses=[] scheme=<none> exclude_rses=<none> include_rses=<none> vos=<none> delay_seconds=0 sleep_time=60<block_start>""" Starts up the reaper threads. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs (Single-VO only). :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock. :param exclude_rses: RSE expression to exclude RSEs from the Reaper. :param include_rses: RSE expression to include RSEs. :param vos: VOs on which to look for RSEs. Only used in multi-VO mode. If None, we either use all VOs if run from "def", or the current VO otherwise. """<line_sep>setup_logging()<if_stmt>rucio.db.sqla.util.is_old_db()<block_start><raise>exception.DatabaseException('Database was not updated, daemon won\'t start')<block_end>logging.info('main: starting processes')<line_sep>multi_vo=config_get_bool('common' 'multi_vo' raise_exception=<false> default=<false>)<if_stmt><not>multi_vo<block_start><if_stmt>vos<block_start>logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')<block_end>vos=['def']<block_end><else_stmt><block_start><if_stmt>vos<block_start>invalid=set(vos)-set([v['vo']<for>v list_vos()])<if_stmt>invalid<block_start>msg='VO{} {} cannot be found'.format('s'<if>len(invalid)<g>1<else>'' ', '.join([repr(v)<for>v invalid]))<line_sep><raise>VONotFound(msg)<block_end><block_end><else_stmt><block_start>vos=[v['vo']<for>v list_vos()]<block_end>logging.info('Dark Reaper: This instance will work on VO%s: %s'%('s'<if>len(vos)<g>1<else>'' ', '.join([v<for>v vos])))<block_end>all_rses=[]<for_stmt>vo vos<block_start>all_rses.extend([rse['id']<for>rse rse_core.list_rses(filters={'vo':vo})])<block_end><if_stmt>rses<block_start>invalid=set(rses)-set([rse['rse']<for>rse all_rses])<if_stmt>invalid<block_start>msg='RSE{} {} cannot be found'.format('s'<if>len(invalid)<g>1<else>'' ', '.join([repr(rse)<for>rse invalid]))<line_sep><raise>RSENotFound(msg)<block_end>rses=[rse<for>rse all_rses<if>rse['rse']<in>rses]<block_end><else_stmt><block_start>rses=all_rses<block_end><if_stmt>exclude_rses<block_start>excluded_rses=[rse['id']<for>rse parse_expression(exclude_rses)]<line_sep>rses=[rse<for>rse rses<if>rse<not><in>excluded_rses]<block_end><if_stmt>include_rses<block_start>included_rses=[rse['id']<for>rse parse_expression(include_rses)]<line_sep>rses=[rse<for>rse rses<if>rse<in>included_rses]<block_end><if_stmt><not>rses<block_start>logging.error('Dark Reaper: No RSEs found. Exiting.')<line_sep><return><block_end>threads=[]<for_stmt>worker range(total_workers)<block_start>kwargs={'worker_number':worker 'total_workers':total_workers 'rses':rses 'once':once 'chunk_size':chunk_size 'scheme':scheme 'sleep_time':sleep_time}<line_sep>threads.append(threading.Thread(target=reaper kwargs=kwargs name='Worker: %s, Total_Workers: %s'%(worker total_workers)))<block_end>[t.start()<for>t threads]<while_stmt>threads[0].is_alive()<block_start>[t.join(timeout=3.14)<for>t threads]<block_end><block_end>
# Generated by Django 3.0.1 on 2020-04-07 09:56 <import_stmt>datetime<import_from_stmt>django.db migrations models<import_from_stmt>django.utils.timezone utc<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('index' '0002_scantask_last_scan_time') ]<line_sep>operations=[migrations.AlterField(model_name='scantask' name='last_scan_time' field=models.DateTimeField(default=datetime.datetime(2020 4 7 9 56 27 101552 tzinfo=utc)) ) ]<block_end>
<import_stmt>datetime<as>dt<import_stmt>json<import_from_stmt>typing List Optional<import_from_stmt>uuid UUID<import_from_stmt>fastapi.encoders jsonable_encoder<import_from_stmt>injector singleton inject<import_from_stmt>common.cache fail_silently hash_cache_key<import_from_stmt>common.injection Cache<import_from_stmt>database.utils map_to<import_from_stmt>post.models Post<line_sep>@singleton<class_stmt>PostCache<block_start>POSTS_EX:int=int(dt.timedelta(minutes=1).total_seconds())<line_sep>@inject<def_stmt>__init__ self cache:Cache<block_start>self._cache=cache<block_end>@fail_silently()<async_keyword><def_stmt>get_posts self wall_profile_id:UUID include_friends:bool older_than:dt.datetime<arrow>Optional[List[Post]]<block_start>cached_posts_ids=<await>self._cache.get(f"walls:{wall_profile_id}:posts:"<concat>f"{hash_cache_key(wall_profile_id include_friends older_than)}")<line_sep>cached_posts_ids=cached_posts_ids<and>json.loads(cached_posts_ids)<if_stmt><not>cached_posts_ids<block_start><return><none><block_end>cached_posts=<await>self._cache.mget(*[f"posts:{post_id}"<for>post_id cached_posts_ids])<line_sep><return>(all(cached_posts)<or><none>)<and>[map_to(json.loads(post) Post)<for>post cached_posts]<block_end>@fail_silently()<async_keyword><def_stmt>get_post self post_id:UUID<arrow>Optional[Post]<block_start>cached_post=<await>self._cache.get(f"posts:{post_id}")<line_sep><return>cached_post<and>map_to(json.loads(cached_post) Post)<block_end>@fail_silently()<async_keyword><def_stmt>set_post self post:Post<arrow><none><block_start><await>self._cache.set(f"posts:{post.id}" json.dumps(jsonable_encoder(post)) expire=PostCache.POSTS_EX)<block_end>@fail_silently()<async_keyword><def_stmt>set_posts self posts:List[Post] wall_profile_id:UUID include_friends:bool older_than:Optional[dt.date]<arrow><none><block_start>params_cache_key=hash_cache_key(wall_profile_id include_friends older_than)<line_sep>posts_ids_key=f"walls:{wall_profile_id}:posts:{params_cache_key}"<line_sep>pipe=self._cache.pipeline()<line_sep>pipe.mset(posts_ids_key json.dumps([str(post.id)<for>post posts]) *list(sum([(f"posts:{post.id}" json.dumps(jsonable_encoder(post)))<for>post posts] ())))<for_stmt>key [posts_ids_key *[f"posts:{post.id}"<for>post posts]]<block_start>pipe.expire(key PostCache.POSTS_EX)<block_end><await>pipe.execute()<block_end>@fail_silently()<async_keyword><def_stmt>unset_posts_ids self wall_profile_id:UUID include_friends:bool older_than:Optional[dt.date]<arrow><none><block_start><await>self._cache.delete(f"walls:{wall_profile_id}:posts:"<concat>f"{hash_cache_key(wall_profile_id include_friends older_than)}")<block_end>@fail_silently()<async_keyword><def_stmt>unset_post self post_id:UUID<arrow><none><block_start><await>self._cache.delete(f"posts:{post_id}")<block_end><block_end>
# Code from Chapter 3 of Machine Learning: An Algorithmic Perspective (2nd Edition) # by <NAME> (http://stephenmonika.net) # You are free to use, change, or redistribute the code in any way you wish for # non-commercial purposes, but please maintain the name of the original author. # This code comes with no warranty of any kind. # <NAME>, 2008, 2014 <import_stmt>pylab<as>pl<import_stmt>numpy<as>np<import_stmt>pcn<import_stmt>cPickle gzip<line_sep># Read the dataset in (code from sheet) f=gzip.open('mnist.pkl.gz' 'rb')<line_sep>tset,vset,teset=cPickle.load(f)<line_sep>f.close()<line_sep>nread=200<line_sep># Just use the first few images train_in=tset[0][:nread :]<line_sep># This is a little bit of work -- 1 of N encoding # Make sure you understand how it does it train_tgt=np.zeros((nread 10))<for_stmt>i range(nread)<block_start>train_tgt[i tset[1][i]]=1<block_end>test_in=teset[0][:nread :]<line_sep>test_tgt=np.zeros((nread 10))<for_stmt>i range(nread)<block_start>test_tgt[i teset[1][i]]=1<block_end># Train a Perceptron on training set p=pcn.pcn(train_in train_tgt)<line_sep>p.pcntrain(train_in train_tgt 0.25 100)<line_sep># This isn't really good practice since it's on the training data, # but it does show that it is learning. p.confmat(train_in train_tgt)<line_sep># Now test it p.confmat(test_in test_tgt)<line_sep>
<import_from_stmt>graph_explorer structured_metrics<def_stmt>test_load <block_start>s_metrics=structured_metrics.StructuredMetrics()<line_sep>errors=s_metrics.load_plugins()<assert_stmt>len(errors)<eq>0<block_end>
<import_from_stmt>typing Type<import_stmt>warnings<import_from_stmt>base64 b64encode<import_from_stmt>html escape<import_stmt>json<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem Draw<import_from_stmt>.utils env requires tooltip_formatter mol_to_record mol_to_smiles sdf_to_dataframe remove_coordinates <import_from_stmt>.select register<try_stmt><block_start><import_from_stmt>IPython.display HTML Javascript<block_end><except_stmt>ModuleNotFoundError<block_start><pass><block_end><else_stmt><block_start>warnings.filterwarnings("ignore" "Consider using IPython.display.IFrame instead")<block_end><class_stmt>MolGrid<block_start>"""Class that handles drawing molecules, rendering the HTML document and saving or displaying it in a notebook """<def_stmt>__init__ self df smiles_col="SMILES" mol_col=<none> removeHs=<false> use_coords=<true> coordGen=<true> useSVG=<true> size=(160 120) MolDrawOptions=<none> rename=<none> name="default" **kwargs<block_start>""" Parameters ---------- df : pandas.DataFrame or dict or list Dataframe containing a SMILES or mol column, or dictionary containing a list of SMILES, or list of dictionnaries containing a SMILES field smiles_col : str or None Name of the SMILES column in the dataframe, if available mol_col : str or None Name of an RDKit molecule column. If available, coordinates and atom/bonds annotations from this will be used for depiction removeHs : bool Remove hydrogen atoms from the drawings use_coords : bool Use the existing coordinates of the molecule coordGen : bool Sets whether or not the CoordGen library should be preferred to the RDKit depiction library useSVG : bool Use SVG instead of PNG size : tuple The size of the drawing canvas MolDrawOptions : rdkit.Chem.Draw.MolDrawOptions or None Drawing options. Useful for making highly customized drawings rename : dict or None Rename the properties/fields stored in the molecule name : str Name of the grid. Used when retrieving selections from multiple grids at the same time kwargs : object MolDrawOptions attributes Notes ----- The list of supported MolDrawOptions attributes are available in https://www.rdkit.org/docs/source/rdkit.Chem.Draw.rdMolDraw2D.html#rdkit.Chem.Draw.rdMolDraw2D.MolDrawOptions ..versionchanged: 0.1.0 Added `rename` argument to replace `mapping` """<if_stmt><not>(smiles_col<or>mol_col)<block_start><raise>ValueError("One of `smiles_col` or `mol_col` must be set")<block_end><if_stmt><not>isinstance(name str)<block_start><raise>TypeError(f"`name` must be a string. Currently of type {type(name).__name__}")<block_end>Draw.rdDepictor.SetPreferCoordGen(coordGen)<if_stmt>isinstance(df pd.DataFrame)<block_start>dataframe=df.copy()<block_end><else_stmt># list of dicts or other input formats for dataframes <block_start>dataframe=pd.DataFrame(df)<block_end>mapping=kwargs.pop("mapping" <none>)<if_stmt>mapping<block_start>warnings.warn("`mapping` is deprecated and will be removed soon. Consider "<concat>"using `rename` in the future.")<block_end>rename=rename<or>mapping<if_stmt>rename<block_start>dataframe.rename(columns=rename inplace=<true>)<block_end>self._extra_columns=["img" "mols2grid-id"]<line_sep># generate temporary RDKit molecules <if_stmt>smiles_col<and><not>mol_col<block_start>mol_col="mol"<line_sep>keep_mols=<false><line_sep>dataframe[mol_col]=dataframe[smiles_col].apply(Chem.MolFromSmiles)<block_end><else_stmt><block_start>keep_mols=<true><block_end># remove hydrogens <if_stmt>removeHs<block_start>dataframe[mol_col]=dataframe[mol_col].apply(Chem.RemoveHs)<block_end><if_stmt><not>use_coords<block_start>dataframe[mol_col]=dataframe[mol_col].apply(remove_coordinates)<block_end># generate smiles col <if_stmt>mol_col<and>(smiles_col<not><in>dataframe.columns)<block_start>dataframe[smiles_col]=dataframe[mol_col].apply(mol_to_smiles)<block_end># add index dataframe["mols2grid-id"]=list(range(len(dataframe)))<line_sep># drop None dataframe.dropna(axis=0 subset=[mol_col] inplace=<true>)<line_sep># generate drawings self.useSVG=useSVG<line_sep>opts=MolDrawOptions<or>Draw.MolDrawOptions()<for_stmt>key,value kwargs.items()<block_start>setattr(opts key value)<block_end>self.MolDrawOptions=opts<line_sep>self._MolDraw2D=Draw.MolDraw2DSVG<if>useSVG<else>Draw.MolDraw2DCairo<line_sep>self.img_size=size<line_sep>dataframe["img"]=dataframe[mol_col].apply(self.mol_to_img)<if_stmt>keep_mols<block_start>self.dataframe=dataframe<block_end><else_stmt><block_start>self.dataframe=dataframe.drop(columns=mol_col)<line_sep>mol_col=<none><block_end>self.smiles_col=smiles_col<line_sep>self.mol_col=mol_col<line_sep># register instance self._grid_id=name<line_sep>register._init_grid(name)<block_end>@classmethod<def_stmt>from_mols cls mols **kwargs<block_start>"""Set up the dataframe used by mols2grid directly from a list of RDKit molecules Parameters ---------- mols : list List of RDKit molecules kwargs : object Other arguments passed on initialization """<line_sep>mol_col=kwargs.pop("mol_col" "mol")<line_sep>df=pd.DataFrame([mol_to_record(mol mol_col=mol_col)<for>mol mols])<line_sep><return>cls(df mol_col=mol_col **kwargs)<block_end>@classmethod<def_stmt>from_sdf cls sdf_file **kwargs<block_start>"""Set up the dataframe used by mols2grid directly from an SDFile Parameters ---------- sdf_file : str Path to the SDF file kwargs : object Other arguments passed on initialization """<line_sep>mol_col=kwargs.pop("mol_col" "mol")<line_sep>df=sdf_to_dataframe(sdf_file mol_col=mol_col)<line_sep><return>cls(df mol_col=mol_col **kwargs)<block_end>@property<def_stmt>template self<block_start>"""Kind of grid displayed, one of: - pages - table """<line_sep><return>self._template<block_end>@template.setter<def_stmt>template self value<block_start><if_stmt>value<not><in>["pages" "table"]<block_start><raise>ValueError(f"template={value!r} not supported. "<concat>"Use one of 'pages' or 'table'")<block_end>self._template=value<block_end><def_stmt>draw_mol self mol<block_start>"""Draw a molecule"""<line_sep>d2d=self._MolDraw2D(*self.img_size)<line_sep>d2d.SetDrawOptions(self.MolDrawOptions)<line_sep>hl_atoms=getattr(mol "__sssAtoms" [])<line_sep>d2d.DrawMolecule(mol highlightAtoms=hl_atoms)<line_sep>d2d.FinishDrawing()<line_sep><return>d2d.GetDrawingText()<block_end><def_stmt>mol_to_img self mol<block_start>"""Convert an RDKit mol to an HTML image containing a drawing of the molecule"""<line_sep>img=self.draw_mol(mol)<if_stmt>self.useSVG<block_start><return>img<block_end>data=b64encode(img).decode()<line_sep><return>f'<img src="data:image/png;base64,{data}">'<block_end><def_stmt>render self template="pages" **kwargs<block_start>"""Returns the HTML document corresponding to the "pages" or "table" template. See `to_pages` and `to_table` for the list of arguments Parameters ---------- template : str Kind of grid to draw: * "table" is a very simple table where all molecules are displayed on the document, the main usecase is printing to PDF or on paper. * "pages" is a more interactive version that splits the original data into several pages. """<line_sep>self.template=template<line_sep><return>getattr(self f"to_{self.template}")(**kwargs)<block_end><def_stmt>to_pages self subset=<none> tooltip=<none> cell_width=160 n_cols=5 n_rows=3 border="1px solid #cccccc" gap=0 fontsize="12pt" fontfamily="'DejaVu', sans-serif" textalign="center" tooltip_fmt="<strong>{key}</strong>: {value}" tooltip_trigger="click hover" tooltip_placement="bottom" hover_color="#e7e7e7" style=<none> selection=<true> transform=<none> custom_css=<none> custom_header=<none> callback=<none> sort_by=<none><block_start>"""Returns the HTML document for the "pages" template Parameters ---------- subset : list or None Columns to be displayed in each cell of the grid. Each column's value will be displayed from top to bottom in the same order given here. Use `"img"` for the image of the molecule. Default: all columns (with "img" in first position) tooltip : list or None Columns to be displayed as a tooltip when hovering/clicking on the image of a cell. Use `None` for no tooltip. tooltip_fmt : str Format string of each key/value pair in the tooltip tooltip_trigger : str Sequence of triggers for the tooltip: (click, hover, focus) tooltip_placement : str Position of the tooltip: auto, top, bottom, left, right n_cols : int Number of columns per page n_rows : int Number of rows per page border : str Styling of the border around each cell (CSS) gap : int Size of the margin around each cell (CSS) fontsize : str Font size of the text displayed in each cell (CSS) fontfamily : str Font used for the text in each cell (CSS) textalign : str Alignment of the text in each cell (CSS) hover_color : str Background color when hovering a cell (CSS) style : dict or None CSS styling applied to specific items in all cells. The dict must follow a `key: function` structure where the key must correspond to one of the columns in `subset` or `tooltip`. The function takes the item's value as input, and outputs a valid CSS styling, for example `style={"Solubility": lambda x: "color: red" if x < -5 else ""}` if you want to color the text corresponding to the "Solubility" column in your dataframe. You can also style a whole cell using the `__all__` key, the corresponding function then has access to all values for each cell: `style={"__all__": lambda x: "color: red" if x["Solubility"] < -5 else ""}` selection : bool Enables the selection of molecules and displays a checkbox at the top of each cell. This is only usefull in the context of a Jupyter notebook, which gives you access to your selection (index and SMILES) through `mols2grid.get_selection()` transform : dict or None Functions applied to specific items in all cells. The dict must follow a `key: function` structure where the key must correspond to one of the columns in `subset` or `tooltip`. The function takes the item's value as input and transforms it, for example: `transform={"Solubility": lambda x: f"{x:.2f}", "Melting point": lambda x: f"MP: {5/9*(x-32):.1f}°C"}` will round the solubility to 2 decimals, and display the melting point in Celsius instead of Fahrenheit with a single digit precision and some text before (MP) and after (°C) the value. These transformations only affect columns in `subset` and `tooltip`, and do not interfere with `style`. custom_css : str or None Custom CSS properties applied to the content of the HTML document custom_header : str or None Custom libraries to be loaded in the header of the document callback : str or callable JavaScript or Python callback to be executed when clicking on an image. A dictionnary containing the data for the full cell is directly available as `data` in JS. For Python, the callback function must have `data` as the first argument to the function. All the values in the `data` dict are parsed as strings, except "mols2grid-id" which is always an integer. sort_by : str or None Sort the grid according to the following field (which must be present in `subset` or `tooltip`). """<if_stmt>self.mol_col<block_start>df=self.dataframe.drop(columns=self.mol_col).copy()<block_end><else_stmt><block_start>df=self.dataframe.copy()<block_end>cell_width=self.img_size[0]<line_sep>smiles=self.smiles_col<line_sep>content=[]<line_sep>column_map={}<line_sep>width=n_cols<times>(cell_width+2<times>(gap+2))<if_stmt>subset<is><none><block_start>subset=df.columns.tolist()<line_sep>subset=[subset.pop(subset.index("img"))]+subset<block_end># define fields that are searchable and sortable search_cols=[f"data-{col}"<for>col subset<if>col<ne>"img"]<if_stmt>tooltip<block_start>search_cols.append("mols2grid-tooltip")<line_sep>sort_cols=search_cols[:-1]<line_sep>sort_cols.extend([f"data-{col}"<for>col tooltip])<for_stmt>col tooltip<block_start><if_stmt>col<not><in>subset<block_start>s=f'<div class="data data-{col}" style="display: none;"></div>'<line_sep>content.append(s)<line_sep>column_map[col]=f"data-{col}"<block_end><block_end><block_end><else_stmt><block_start>sort_cols=search_cols[:]<block_end>sort_cols=["mols2grid-id"]+sort_cols<line_sep># get unique list but keep order sort_cols=list(dict.fromkeys(sort_cols))<if_stmt>style<is><none><block_start>style={}<block_end><if_stmt>transform<is><none><block_start>transform={}<block_end><if_stmt>tooltip<is><none><block_start>tooltip=[]<block_end>value_names=list(set(subset+[smiles]+tooltip))<line_sep>value_names=[f"data-{col}"<for>col value_names]<line_sep># force id, SMILES, and tooltip values to be present in the data final_columns=subset[:]<line_sep>final_columns.extend(["mols2grid-id" smiles])<if_stmt>tooltip<block_start>final_columns.extend(tooltip)<block_end>final_columns=list(set(final_columns))<line_sep># make a copy if id shown explicitely <if_stmt>"mols2grid-id"<in>subset<block_start>id_name="mols2grid-id-copy"<line_sep>df[id_name]=df["mols2grid-id"]<line_sep>value_names.append(f"data-{id_name}")<line_sep>final_columns.append(id_name)<line_sep>subset=[id_name<if>x<eq>"mols2grid-id"<else>x<for>x subset]<block_end># organize data <for_stmt>col subset<block_start><if_stmt>col<eq>"img"<and>tooltip<block_start>s=(f'<a tabindex="0" class="data data-{col} mols2grid-tooltip" '<concat>'data-toggle="popover" data-content="foo"></a>')<block_end><else_stmt><block_start><if_stmt>style.get(col)<block_start>s=f'<div class="data data-{col} style-{col}" style=""></div>'<block_end><else_stmt><block_start>s=f'<div class="data data-{col}"></div>'<block_end><block_end>content.append(s)<line_sep>column_map[col]=f"data-{col}"<block_end># add but hide SMILES div if not present <if_stmt>smiles<not><in>(subset+tooltip)<block_start>s=f'<div class="data data-{smiles}" style="display: none;"></div>'<line_sep>content.append(s)<line_sep>column_map[smiles]=f"data-{smiles}"<block_end># set mapping for list.js <if_stmt>"__all__"<in>style.keys()<block_start>whole_cell_style=<true><line_sep>x="[{data: ['mols2grid-id', 'cellstyle']}, "<block_end><else_stmt><block_start>whole_cell_style=<false><line_sep>x="[{data: ['mols2grid-id']}, "<block_end>value_names=x+str(value_names)[1:]<line_sep># apply CSS styles <for_stmt>col,func style.items()<block_start><if_stmt>col<eq>"__all__"<block_start>name="cellstyle"<line_sep>df[name]=df.apply(func axis=1)<block_end><else_stmt><block_start>name=f"style-{col}"<line_sep>df[name]=df[col].apply(func)<block_end>final_columns.append(name)<line_sep>value_names=value_names[:-1]+f", {{ attr: 'style', name: {name!r} }}]"<block_end><if_stmt>tooltip<block_start>df["mols2grid-tooltip"]=df.apply(tooltip_formatter axis=1 args=(tooltip tooltip_fmt style transform))<line_sep>final_columns=final_columns+["mols2grid-tooltip"]<line_sep>value_names=(value_names[:-1]+", {attr: 'data-content', name: 'mols2grid-tooltip'}]")<block_end># apply custom user function <for_stmt>col,func transform.items()<block_start>df[col]=df[col].apply(func)<block_end><if_stmt>selection<block_start>checkbox='<input type="checkbox" class="position-relative float-left">'<block_end><else_stmt><block_start>checkbox=""<block_end><if_stmt>whole_cell_style<block_start>item=('<div class="cell" data-mols2grid-id="0" '<concat>'data-cellstyle="0">{checkbox}{content}</div>')<block_end><else_stmt><block_start>item=('<div class="cell" data-mols2grid-id="0">'<concat>'{checkbox}{content}</div>')<block_end>item=item.format(checkbox=checkbox content="".join(content))<line_sep># callback <if_stmt>callable(callback)<block_start><if_stmt>callback.__name__<eq>"<lambda>"<block_start><raise>TypeError("Lambda functions are not supported as callbacks. Please "<concat>"use a regular function instead.")<block_end>callback_type="python"<line_sep>callback=callback.__name__<block_end><else_stmt><block_start>callback_type="js"<block_end><if_stmt>sort_by<and>sort_by<ne>"mols2grid-id"<block_start><if_stmt>sort_by<in>(subset+tooltip)<block_start>sort_by=f"data-{sort_by}"<block_end><else_stmt><block_start><raise>ValueError(f"{sort_by} is not an available field in "<concat>"`subset` or `tooltip`")<block_end><block_end><else_stmt><block_start>sort_by="mols2grid-id"<block_end>df=df[final_columns].rename(columns=column_map).sort_values(sort_by)<line_sep>template=env.get_template('pages.html')<line_sep>template_kwargs=dict(width=width border=border textalign=textalign cell_width=cell_width fontfamily=fontfamily fontsize=fontsize gap=gap hover_color=hover_color item=item item_repr=repr(item) value_names=value_names tooltip=tooltip tooltip_trigger=repr(tooltip_trigger) tooltip_placement=repr(tooltip_placement) n_items_per_page=n_rows<times>n_cols search_cols=search_cols data=json.dumps(df.to_dict("records")) selection=selection smiles_col=smiles sort_cols=sort_cols grid_id=self._grid_id whole_cell_style=whole_cell_style custom_css=custom_css<or>"" custom_header=custom_header<or>"" callback=callback callback_type=callback_type sort_by=sort_by )<line_sep><return>template.render(**template_kwargs)<block_end><def_stmt>get_selection self<block_start>"""Retrieve the dataframe subset corresponding to your selection Returns ------- pandas.DataFrame """<line_sep>sel=list(register.get_selection().keys())<line_sep><return>(self.dataframe.loc[self.dataframe["mols2grid-id"].isin(sel)].drop(columns=self._extra_columns))<block_end><def_stmt>filter self mask<block_start>"""Filters the grid using a mask (boolean array) Parameters ---------- mask : list, pd.Series, np.ndarray Boolean array: `True` when the item should be displayed, `False` if it should be filtered out. """<line_sep># convert mask to mols2grid-id ids=self.dataframe.loc[mask]["mols2grid-id"]<line_sep><return>self._filter_by_id(ids)<block_end><def_stmt>filter_by_index self indices<block_start>"""Filters the grid using the dataframe's index"""<line_sep># convert index to mols2grid-id ids=self.dataframe.loc[self.dataframe.index.isin(indices)]["mols2grid-id"]<line_sep><return>self._filter_by_id(ids)<block_end><def_stmt>_filter_by_id self ids<block_start>"""Filters the grid using the values in the `mols2grid-id` column"""<if_stmt>isinstance(ids (pd.Series np.ndarray))<block_start>ids=ids.to_list()<block_end>code=env.get_template('js/filter.js').render(grid_id=self._grid_id ids=ids)<line_sep><return>Javascript(code)<block_end><def_stmt>to_table self subset=<none> tooltip=<none> n_cols=6 cell_width=160 border="1px solid #cccccc" gap=0 fontsize="12pt" fontfamily="'DejaVu', sans-serif" textalign="center" tooltip_fmt="<strong>{key}</strong>: {value}" tooltip_trigger="click hover" tooltip_placement="bottom" hover_color="#e7e7e7" style=<none> transform=<none><block_start>"""Returns the HTML document for the "table" template Parameters ---------- subset : list or None Columns to be displayed in each cell of the grid. Each column's value will be displayed from top to bottom in the same order given here. Use `"img"` for the image of the molecule. Default: all columns (with "img" in first position) tooltip : list or None Columns to be displayed as a tooltip when hovering/clicking on the image of a cell. Use `None` for no tooltip. tooltip_fmt : str Format string of each key/value pair in the tooltip tooltip_trigger : str Sequence of triggers for the tooltip: (click, hover, focus) tooltip_placement : str Position of the tooltip: auto, top, bottom, left, right n_cols : int Number of columns in the table border : str Styling of the border around each cell (CSS) gap : int or str Size of the margin around each cell (CSS) fontsize : str Font size of the text displayed in each cell (CSS) fontfamily : str Font used for the text in each cell (CSS) textalign : str Alignment of the text in each cell (CSS) hover_color : str Background color when hovering a cell (CSS) style : dict or None CSS styling applied to specific items in all cells. The dict must follow a `key: function` structure where the key must correspond to one of the columns in `subset` or `tooltip`. The function takes the item's value as input, and outputs a valid CSS styling, for example `style={"Solubility": lambda x: "color: red" if x < -5 else "color: black"}` if you want to color the text corresponding to the "Solubility" column in your dataframe transform : dict or None Functions applied to specific items in all cells. The dict must follow a `key: function` structure where the key must correspond to one of the columns in `subset`. The function takes the item's value as input and transforms it, for example: `transform={"Solubility": lambda x: f"{x:.2f}", "Melting point": lambda x: f"MP: {5/9*(x-32):.1f}°C"}` will round the solubility to 2 decimals, and display the melting point in Celsius instead of Fahrenheit with a single digit precision and some text before (MP) and after (°C) the value. These transformations only affect columns in `subset` and `tooltip`, and are applied independantly from `style` """<line_sep>tr=[]<line_sep>data=[]<line_sep>df=self.dataframe<line_sep>cell_width=self.img_size[0]<if_stmt>subset<is><none><block_start>subset=df.columns.tolist()<line_sep>subset=[subset.pop(subset.index("img"))]+subset<block_end><if_stmt>style<is><none><block_start>style={}<block_end><if_stmt>transform<is><none><block_start>transform={}<block_end><for_stmt>i,row df.iterrows()<block_start>ncell=i+1<line_sep>nrow,ncol=divmod(i n_cols)<line_sep>td=[f'<td class="col-{ncol}>"']<if_stmt>"__all__"<in>style.keys()<block_start>s=style["__all__"](row)<line_sep>div=[f'<div class="cell-{i}" style="{s}">']<block_end><else_stmt><block_start>div=[f'<div class="cell-{i}">']<block_end><for_stmt>col subset<block_start>v=row[col]<if_stmt>col<eq>"img"<and>tooltip<block_start>popover=tooltip_formatter(row tooltip tooltip_fmt style transform)<line_sep>func=transform.get(col)<line_sep>v=func(v)<if>func<else>v<line_sep>item=(f'<div class="data data-{col} mols2grid-tooltip" data-toggle="popover" '<concat>f'data-content="{escape(popover)}">{v}</div>')<block_end><else_stmt><block_start>func=style.get(col)<if_stmt>func<block_start>item=f'<div class="data data-{col}" style="{func(v)}">'<block_end><else_stmt><block_start>item=f'<div class="data data-{col}">'<block_end>func=transform.get(col)<line_sep>v=func(v)<if>func<else>v<line_sep>item<augadd>f'{v}</div>'<block_end>div.append(item)<block_end>div.append("</div>")<line_sep>td.append("\n".join(div))<line_sep>td.append("</td>")<line_sep>tr.append("\n".join(td))<if_stmt>(ncell%n_cols<eq>0)<or>(ncell<eq>len(df))<block_start>cell=[f'<tr class="row-{nrow}">']<line_sep>cell.append("\n".join(tr))<line_sep>cell.append("</tr>")<line_sep>data.append("\n".join(cell))<line_sep>tr=[]<block_end><block_end>template=env.get_template('table.html')<line_sep>template_kwargs=dict(border=border textalign=textalign cell_width=cell_width fontfamily=fontfamily fontsize=fontsize gap=gap hover_color=hover_color tooltip=tooltip tooltip_trigger=repr(tooltip_trigger) tooltip_placement=repr(tooltip_placement) data="\n".join(data) )<line_sep><return>template.render(**template_kwargs)<block_end>@requires("IPython.display")<def_stmt>display self width="100%" height=<none> iframe_allow="clipboard-write" **kwargs<block_start>"""Render and display the grid in a Jupyter notebook"""<line_sep>doc=self.render(**kwargs)<line_sep>iframe=(env.get_template("html/iframe.html").render(width=width height=height padding=18 allow=iframe_allow doc=escape(doc)))<line_sep><return>HTML(iframe)<block_end><def_stmt>save self output **kwargs<block_start>"""Render and save the grid in an HTML document"""<with_stmt>open(output "w")<as>f<block_start>f.write(self.render(**kwargs))<block_end><block_end><block_end>
# # Copyright (C) 2018 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <def_stmt>test name input0 input1 input2 output0 input0_data input1_data input2_data output_data<block_start>model=Model().Operation("SELECT" input0 input1 input2).To(output0)<line_sep>quant8=DataTypeConverter().Identify({input1:["TENSOR_QUANT8_ASYMM" 1.5 129] input2:["TENSOR_QUANT8_ASYMM" 0.5 127] output0:["TENSOR_QUANT8_ASYMM" 1.0 128] })<line_sep>example=Example({input0:input0_data input1:input1_data input2:input2_data output0:output_data } model=model name=name).AddVariations("int32" "float16" "relaxed" quant8)<block_end>test(name="one_dim" input0=Input("input0" "TENSOR_BOOL8" "{3}") input1=Input("input1" "TENSOR_FLOAT32" "{3}") input2=Input("input2" "TENSOR_FLOAT32" "{3}") output0=Output("output0" "TENSOR_FLOAT32" "{3}") input0_data=[<true> <false> <true>] input1_data=[1 2 3] input2_data=[4 5 6] output_data=[1 5 3] )<line_sep>test(name="two_dim" input0=Input("input0" "TENSOR_BOOL8" "{2, 2}") input1=Input("input1" "TENSOR_FLOAT32" "{2, 2}") input2=Input("input2" "TENSOR_FLOAT32" "{2, 2}") output0=Output("output0" "TENSOR_FLOAT32" "{2, 2}") input0_data=[<false> <true> <false> <true>] input1_data=[1 2 3 4] input2_data=[5 6 7 8] output_data=[5 2 7 4] )<line_sep>test(name="five_dim" input0=Input("input0" "TENSOR_BOOL8" "{2, 1, 2, 1, 2}") input1=Input("input1" "TENSOR_FLOAT32" "{2, 1, 2, 1, 2}") input2=Input("input2" "TENSOR_FLOAT32" "{2, 1, 2, 1, 2}") output0=Output("output0" "TENSOR_FLOAT32" "{2, 1, 2, 1, 2}") input0_data=[<true> <false> <true> <false> <true> <false> <true> <false>] input1_data=[1 2 3 4 5 6 7 8] input2_data=[9 10 11 12 13 14 15 16] output_data=[1 10 3 12 5 14 7 16] )<line_sep>
# Copyright 2007 <NAME>. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. <import_from_future_stmt> division<import_stmt>copy<import_from_stmt>whoosh matching<import_from_stmt>whoosh.analysis Token<import_from_stmt>whoosh.compat u<import_from_stmt>whoosh.query qcore terms compound<class_stmt>Sequence(compound.CompoundQuery)<block_start>"""Matches documents containing a list of sub-queries in adjacent positions. This object has no sanity check to prevent you from using queries in different fields. """<line_sep>JOINT=" NEAR "<line_sep>intersect_merge=<true><def_stmt>__init__ self subqueries slop=1 ordered=<true> boost=1.0<block_start>""" :param subqueries: a list of :class:`whoosh.query.Query` objects to match in sequence. :param slop: the maximum difference in position allowed between the subqueries. :param ordered: if True, the position differences between subqueries must be positive (that is, each subquery in the list must appear after the previous subquery in the document). :param boost: a boost factor to add to the score of documents matching this query. """<line_sep>compound.CompoundQuery.__init__(self subqueries boost=boost)<line_sep>self.slop=slop<line_sep>self.ordered=ordered<block_end><def_stmt>__eq__ self other<block_start><return>(other<and>type(self)<is>type(other)<and>self.subqueries<eq>other.subqueries<and>self.boost<eq>other.boost)<block_end><def_stmt>__repr__ self<block_start><return>"%s(%r, slop=%d, boost=%f)"%(self.__class__.__name__ self.subqueries self.slop self.boost)<block_end><def_stmt>__hash__ self<block_start>h=hash(self.slop)^hash(self.boost)<for_stmt>q self.subqueries<block_start>h<augxor>hash(q)<block_end><return>h<block_end><def_stmt>normalize self# Because the subqueries are in sequence, we can't do the fancy merging # that CompoundQuery does <block_start><return>self.__class__([q.normalize()<for>q self.subqueries] self.slop self.ordered self.boost)<block_end><def_stmt>_and_query self<block_start><return>compound.And(self.subqueries)<block_end><def_stmt>estimate_size self ixreader<block_start><return>self._and_query().estimate_size(ixreader)<block_end><def_stmt>estimate_min_size self ixreader<block_start><return>self._and_query().estimate_min_size(ixreader)<block_end><def_stmt>_matcher self subs searcher context<block_start><import_from_stmt>whoosh.query.spans SpanNear<line_sep># Tell the sub-queries this matcher will need the current match to get # spans context=context.set(needs_current=<true>)<line_sep>m=self._tree_matcher(subs SpanNear.SpanNearMatcher searcher context <none> slop=self.slop ordered=self.ordered)<line_sep><return>m<block_end><block_end><class_stmt>Ordered(Sequence)<block_start>"""Matches documents containing a list of sub-queries in the given order. """<line_sep>JOINT=" BEFORE "<def_stmt>_matcher self subs searcher context<block_start><import_from_stmt>whoosh.query.spans SpanBefore<line_sep><return>self._tree_matcher(subs SpanBefore._Matcher searcher context <none>)<block_end><block_end><class_stmt>Phrase(qcore.Query)<block_start>"""Matches documents containing a given phrase."""<def_stmt>__init__ self fieldname words slop=1 boost=1.0 char_ranges=<none><block_start>""" :param fieldname: the field to search. :param words: a list of words (unicode strings) in the phrase. :param slop: the number of words allowed between each "word" in the phrase; the default of 1 means the phrase must match exactly. :param boost: a boost factor that to apply to the raw score of documents matched by this query. :param char_ranges: if a Phrase object is created by the query parser, it will set this attribute to a list of (startchar, endchar) pairs corresponding to the words in the phrase """<line_sep>self.fieldname=fieldname<line_sep>self.words=words<line_sep>self.slop=slop<line_sep>self.boost=boost<line_sep>self.char_ranges=char_ranges<block_end><def_stmt>__eq__ self other<block_start><return>(other<and>self.__class__<is>other.__class__<and>self.fieldname<eq>other.fieldname<and>self.words<eq>other.words<and>self.slop<eq>other.slop<and>self.boost<eq>other.boost)<block_end><def_stmt>__repr__ self<block_start><return>"%s(%r, %r, slop=%s, boost=%f)"%(self.__class__.__name__ self.fieldname self.words self.slop self.boost)<block_end><def_stmt>__unicode__ self<block_start><return>u('%s:"%s"')%(self.fieldname u(" ").join(self.words))<block_end>__str__=__unicode__<def_stmt>__hash__ self<block_start>h=hash(self.fieldname)^hash(self.slop)^hash(self.boost)<for_stmt>w self.words<block_start>h<augxor>hash(w)<block_end><return>h<block_end><def_stmt>has_terms self<block_start><return><true><block_end><def_stmt>terms self phrases=<false><block_start><if_stmt>phrases<and>self.field()<block_start><for_stmt>word self.words<block_start><yield>(self.field() word)<block_end><block_end><block_end><def_stmt>tokens self boost=1.0<block_start>char_ranges=self.char_ranges<line_sep>startchar=endchar=<none><for_stmt>i,word enumerate(self.words)<block_start><if_stmt>char_ranges<block_start>startchar,endchar=char_ranges[i]<block_end><yield>Token(fieldname=self.fieldname text=word boost=boost<times>self.boost startchar=startchar endchar=endchar chars=<true>)<block_end><block_end><def_stmt>normalize self<block_start><if_stmt><not>self.words<block_start><return>qcore.NullQuery<block_end><if_stmt>len(self.words)<eq>1<block_start>t=terms.Term(self.fieldname self.words[0])<if_stmt>self.char_ranges<block_start>t.startchar,t.endchar=self.char_ranges[0]<block_end><return>t<block_end>words=[w<for>w self.words<if>w<is><not><none>]<line_sep><return>self.__class__(self.fieldname words slop=self.slop boost=self.boost char_ranges=self.char_ranges)<block_end><def_stmt>replace self fieldname oldtext newtext<block_start>q=copy.copy(self)<if_stmt>q.fieldname<eq>fieldname<block_start><for_stmt>i,word enumerate(q.words)<block_start><if_stmt>word<eq>oldtext<block_start>q.words[i]=newtext<block_end><block_end><block_end><return>q<block_end><def_stmt>_and_query self<block_start><return>compound.And([terms.Term(self.fieldname word)<for>word self.words])<block_end><def_stmt>estimate_size self ixreader<block_start><return>self._and_query().estimate_size(ixreader)<block_end><def_stmt>estimate_min_size self ixreader<block_start><return>self._and_query().estimate_min_size(ixreader)<block_end><def_stmt>matcher self searcher context=<none><block_start><import_from_stmt>whoosh.query Term SpanNear2<line_sep>fieldname=self.fieldname<if_stmt>fieldname<not><in>searcher.schema<block_start><return>matching.NullMatcher()<block_end>field=searcher.schema[fieldname]<if_stmt><not>field.format<or><not>field.format.supports("positions")<block_start><raise>qcore.QueryError("Phrase search: %r field has no positions"%self.fieldname)<block_end>terms=[]<line_sep># Build a list of Term queries from the words in the phrase reader=searcher.reader()<for_stmt>word self.words<block_start><try_stmt><block_start>word=field.to_bytes(word)<block_end><except_stmt>ValueError<block_start><return>matching.NullMatcher()<block_end><if_stmt>(fieldname word)<not><in>reader# Shortcut the query if one of the words doesn't exist. <block_start><return>matching.NullMatcher()<block_end>terms.append(Term(fieldname word))<block_end># Create the equivalent SpanNear2 query from the terms q=SpanNear2(terms slop=self.slop ordered=<true> mindist=1)<line_sep># Get the matcher m=q.matcher(searcher context)<if_stmt>self.boost<ne>1.0<block_start>m=matching.WrappingMatcher(m boost=self.boost)<block_end><return>m<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>allConversions=cms.EDProducer('ConversionProducer' AlgorithmName=cms.string('mixed') #src = cms.VInputTag(cms.InputTag("generalTracks")), src=cms.InputTag("gsfGeneralInOutOutInConversionTrackMerger") convertedPhotonCollection=cms.string('') ## or empty bcEndcapCollection=cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALEndcap') bcBarrelCollection=cms.InputTag('particleFlowSuperClusterECAL:particleFlowBasicClusterECALBarrel') scBarrelProducer=cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALBarrel') scEndcapProducer=cms.InputTag('particleFlowSuperClusterECAL:particleFlowSuperClusterECALEndcapWithPreshower') primaryVertexProducer=cms.InputTag('offlinePrimaryVerticesWithBS') deltaEta=cms.double(0.4) #track pair search range in eta (applied even in case of preselection bypass) HalfwayEta=cms.double(.1) # Track-bc matching search range on Eta maxNumOfTrackInPU=cms.int32(999999) maxTrackRho=cms.double(120.) maxTrackZ=cms.double(300.) minSCEt=cms.double(10.0) dEtacutForSCmatching=cms.double(0.03) dPhicutForSCmatching=cms.double(0.05) dEtaTrackBC=cms.double(.2) # Track-Basic cluster matching, position diff on eta dPhiTrackBC=cms.double(1.) # Track-Basic cluster matching, position diff on phi EnergyBC=cms.double(0.3) # Track-Basic cluster matching, BC energy lower cut EnergyTotalBC=cms.double(.3) # Track-Basic cluster matching, two BC energy summation cut #tight cuts d0=cms.double(0.) #d0*charge cut MaxChi2Left=cms.double(10.) #Track quality MaxChi2Right=cms.double(10.) MinHitsLeft=cms.int32(4) MinHitsRight=cms.int32(2) DeltaCotTheta=cms.double(0.1) #Track pair opening angle on R-Z DeltaPhi=cms.double(.2) #Track pair opening angle on X-Y (not a final selection cut) vtxChi2=cms.double(0.0005) MinApproachLow=cms.double(-.25) #Track pair min distance at approaching point on X-Y MinApproachHigh=cms.double(1.0) #Track pair min distance at approaching point on X-Y rCut=cms.double(2.0) #analytical track cross point dz=cms.double(5.0) #track pair inner position difference # kinematic vertex fit parameters maxDelta=cms.double(0.01) #delta of parameters maxReducedChiSq=cms.double(225.) #maximum chi^2 per degree of freedom before fit is terminated minChiSqImprovement=cms.double(50.) #threshold for "significant improvement" in the fit termination logic maxNbrOfIterations=cms.int32(40) #maximum number of convergence iterations UsePvtx=cms.bool(<true>) AllowD0=cms.bool(<true>) #Allow d0*charge cut AllowDeltaPhi=cms.bool(<false>) AllowTrackBC=cms.bool(<false>) #Allow to match track-basic cluster AllowDeltaCot=cms.bool(<true>) #Allow pairing using delta cot theta cut AllowMinApproach=cms.bool(<true>) #Allow pairing using min approach cut AllowOppCharge=cms.bool(<true>) #use opposite charge tracks to pair AllowVertex=cms.bool(<true>) bypassPreselGsf=cms.bool(<true>) #bypass preselection for gsf + X pairs bypassPreselEcal=cms.bool(<false>) #bypass preselection for ecal-seeded + X pairs bypassPreselEcalEcal=cms.bool(<true>) #bypass preselection for ecal-seeded + ecal-seeded pairs AllowSingleLeg=cms.bool(<false>) #Allow single track conversion AllowRightBC=cms.bool(<false>)#Require second leg matching basic cluster )<import_from_stmt>Configuration.Eras.Modifier_phase2_hgcal_cff phase2_hgcal<line_sep>phase2_hgcal.toModify(allConversions bypassPreselGsf=<false>)<import_from_stmt>Configuration.Eras.Modifier_fastSim_cff fastSim<line_sep>fastSim.toModify(allConversions src='gsfGeneralConversionTrackMerger')<line_sep>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_from_stmt>archai.nas.model_desc ModelDesc<import_from_stmt>archai.common.common common_init<import_from_stmt>archai.nas.model Model<import_from_stmt>archai.algos.petridish.petridish_model_desc_builder PetridishModelBuilder<import_from_stmt>archai.common.model_summary summary<line_sep>conf=common_init(config_filepath='confs/petridish_cifar.yaml' param_args=['--common.experiment_name' 'petridish_run2_seed42_eval'])<line_sep>conf_eval=conf['nas']['eval']<line_sep>conf_model_desc=conf_eval['model_desc']<line_sep>conf_model_desc['n_cells']=14<line_sep>template_model_desc=ModelDesc.load('$expdir/final_model_desc.yaml')<line_sep>model_builder=PetridishModelBuilder()<line_sep>model_desc=model_builder.build(conf_model_desc template=template_model_desc)<line_sep>mb=PetridishModelBuilder()<line_sep>model=Model(model_desc droppath=<false> affine=<false>)<line_sep>summary(model [64 3 32 32])<line_sep>exit(0)<line_sep>
<import_from_stmt>vyper.compiler compile_code<import_from_stmt>vyper.compiler.output _compress_source_map<import_from_stmt>vyper.compiler.utils expand_source_map<line_sep>TEST_CODE=""" @internal def _baz(a: int128) -> int128: b: int128 = a for i in range(2, 5): b *= i if b > 31337: break return b @internal def _bar(a: uint256) -> bool: if a > 42: return True return False @external def foo(a: uint256) -> int128: if self._bar(a): return self._baz(2) else: return 42 """<def_stmt>test_jump_map <block_start>source_map=compile_code(TEST_CODE ["source_map"])["source_map"]<line_sep>pos_map=source_map["pc_pos_map"]<line_sep>jump_map=source_map["pc_jump_map"]<assert_stmt>len([v<for>v jump_map.values()<if>v<eq>"o"])<eq>1<assert_stmt>len([v<for>v jump_map.values()<if>v<eq>"i"])<eq>2<line_sep>code_lines=[i+"\n"<for>i TEST_CODE.split("\n")]<for_stmt>pc [k<for>k,v jump_map.items()<if>v<eq>"o"]<block_start>lineno,col_offset,_,end_col_offset=pos_map[pc]<assert_stmt>code_lines[lineno-1][col_offset:end_col_offset].startswith("return")<block_end><for_stmt>pc [k<for>k,v jump_map.items()<if>v<eq>"i"]<block_start>lineno,col_offset,_,end_col_offset=pos_map[pc]<assert_stmt>code_lines[lineno-1][col_offset:end_col_offset].startswith("self.")<block_end><block_end><def_stmt>test_pos_map_offsets <block_start>source_map=compile_code(TEST_CODE ["source_map"])["source_map"]<line_sep>expanded=expand_source_map(source_map["pc_pos_map_compressed"])<line_sep>pc_iter=iter(source_map["pc_pos_map"][i]<for>i sorted(source_map["pc_pos_map"]))<line_sep>jump_iter=iter(source_map["pc_jump_map"][i]<for>i sorted(source_map["pc_jump_map"]))<line_sep>code_lines=[i+"\n"<for>i TEST_CODE.split("\n")]<for_stmt>item expanded<block_start><if_stmt>item[-1]<is><not><none><block_start><assert_stmt>next(jump_iter)<eq>item[-1]<block_end><if_stmt>item[:2]<ne>[-1 -1]<block_start>start,length=item[:2]<line_sep>lineno,col_offset,end_lineno,end_col_offset=next(pc_iter)<assert_stmt>code_lines[lineno-1][col_offset]<eq>TEST_CODE[start]<assert_stmt>length<eq>(sum(len(i)<for>i code_lines[lineno-1:end_lineno])-col_offset-(len(code_lines[end_lineno-1])-end_col_offset))<block_end><block_end><block_end><def_stmt>test_compress_source_map <block_start>code=""" @external def foo() -> uint256: return 42 """<line_sep>compressed=_compress_source_map(code {"0":<none> "2":(2 0 4 13) "3":(2 0 2 8) "5":(2 0 2 8)} {"3":"o"} 2)<assert_stmt>compressed<eq>"-1:-1:2:-;1:45;:8::o;;"<block_end><def_stmt>test_expand_source_map <block_start>compressed="-1:-1:0:-;;13:42:1;:21;::0:o;:::-;1::1;"<line_sep>expanded=[[-1 -1 0 "-"] [-1 -1 0 <none>] [13 42 1 <none>] [13 21 1 <none>] [13 21 0 "o"] [13 21 0 "-"] [1 21 1 <none>] ]<assert_stmt>expand_source_map(compressed)<eq>expanded<block_end>
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>grpc<import_stmt>requests<import_from_stmt>grpc._cython cygrpc<import_from_stmt>fate_arch.common.base_utils json_dumps json_loads<import_from_stmt>fate_flow.entity.runtime_config RuntimeConfig<import_from_stmt>fate_flow.settings FATEFLOW_SERVICE_NAME HEADERS DEFAULT_REMOTE_REQUEST_TIMEOUT<import_from_stmt>fate_flow.settings IP GRPC_PORT stat_logger<import_from_stmt>fate_flow.utils.proto_compatibility basic_meta_pb2<import_from_stmt>fate_flow.utils.proto_compatibility proxy_pb2<import_from_stmt>fate_flow.utils.proto_compatibility proxy_pb2_grpc<import_stmt>time<import_stmt>sys<import_from_stmt>fate_flow.tests.grpc.xthread ThreadPoolExecutor<def_stmt>wrap_grpc_packet json_body http_method url src_party_id dst_party_id job_id=<none> overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT<block_start>_src_end_point=basic_meta_pb2.Endpoint(ip=IP port=GRPC_PORT)<line_sep>_src=proxy_pb2.Topic(name=job_id partyId="{}".format(src_party_id) role=FATEFLOW_SERVICE_NAME callback=_src_end_point)<line_sep>_dst=proxy_pb2.Topic(name=job_id partyId="{}".format(dst_party_id) role=FATEFLOW_SERVICE_NAME callback=<none>)<line_sep>_task=proxy_pb2.Task(taskId=job_id)<line_sep>_command=proxy_pb2.Command(name=FATEFLOW_SERVICE_NAME)<line_sep>_conf=proxy_pb2.Conf(overallTimeout=overall_timeout)<line_sep>_meta=proxy_pb2.Metadata(src=_src dst=_dst task=_task command=_command operator=http_method conf=_conf)<line_sep>_data=proxy_pb2.Data(key=url value=bytes(json_dumps(json_body) 'utf-8'))<line_sep><return>proxy_pb2.Packet(header=_meta body=_data)<block_end><def_stmt>get_url _suffix<block_start><return>"http://{}:{}/{}".format(RuntimeConfig.JOB_SERVER_HOST RuntimeConfig.HTTP_PORT _suffix.lstrip('/'))<block_end><class_stmt>UnaryService(proxy_pb2_grpc.DataTransferServiceServicer)<block_start><def_stmt>unaryCall self _request context<block_start>packet=_request<line_sep>header=packet.header<line_sep>_suffix=packet.body.key<line_sep>param_bytes=packet.body.value<line_sep>param=bytes.decode(param_bytes)<line_sep>job_id=header.task.taskId<line_sep>src=header.src<line_sep>dst=header.dst<line_sep>method=header.operator<line_sep>param_dict=json_loads(param)<line_sep>param_dict['src_party_id']=str(src.partyId)<line_sep>source_routing_header=[]<for_stmt>key,value context.invocation_metadata()<block_start>source_routing_header.append((key value))<block_end>stat_logger.info(f"grpc request routing header: {source_routing_header}")<line_sep>param=bytes.decode(bytes(json_dumps(param_dict) 'utf-8'))<line_sep>action=getattr(requests method.lower() <none>)<if_stmt>action<block_start>print(_suffix)<line_sep>#resp = action(url=get_url(_suffix), data=param, headers=HEADERS) <block_end><else_stmt><block_start><pass><block_end>#resp_json = resp.json() resp_json={"status":"test"}<import_stmt>time<line_sep>print("sleep")<line_sep>time.sleep(60)<line_sep><return>wrap_grpc_packet(resp_json method _suffix dst.partyId src.partyId job_id)<block_end><block_end>thread_pool_executor=ThreadPoolExecutor(max_workers=5)<line_sep>print(f"start grpc server pool on {thread_pool_executor._max_workers} max workers")<line_sep>server=grpc.server(thread_pool_executor options=[(cygrpc.ChannelArgKey.max_send_message_length -1) (cygrpc.ChannelArgKey.max_receive_message_length -1)])<line_sep>proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService() server)<line_sep>server.add_insecure_port("{}:{}".format("127.0.0.1" 7777))<line_sep>server.start()<try_stmt><block_start><while_stmt><true><block_start>time.sleep(60<times>60<times>24)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start>server.stop(0)<line_sep>sys.exit(0)<block_end>
<import_stmt>sys<import_stmt>yaml<class_stmt>Config<block_start><def_stmt>__init__ self cfg=<none><block_start>self.cfg={}<if_stmt>cfg<is><not><none><block_start>self.update(cfg)<block_end><block_end><def_stmt>__getattribute__ self name<block_start>cfg=object.__getattribute__(self 'cfg')<if_stmt>name<not><in>cfg<block_start><return>object.__getattribute__(self name)<block_end><return>cfg[name]<block_end><def_stmt>items self<block_start><return>object.__getattribute__(self 'cfg').items()<block_end><def_stmt>update self new_cfg<block_start>cfg=self.cfg<for_stmt>key,val new_cfg.items()<block_start><if_stmt>type(val)<eq>dict<block_start>val=Config(val)<if_stmt>key<in>cfg<block_start>cfg[key].update(val)<line_sep><continue><block_end><block_end>cfg[key]=val<block_end><block_end><def_stmt>add self arg val=<none># Manual item <block_start><if_stmt>val<is><not><none><block_start>subkeys=arg.split('.')<line_sep>subconfig=self<for_stmt>subkey subkeys[:-1]<block_start>subconfig=subconfig.cfg[subkey]<block_end><if_stmt>subkeys[-1]<in>subconfig.cfg<block_start><if_stmt>type(subconfig.cfg[subkeys[-1]])<eq>int<block_start>val=int(val)<block_end><elif_stmt>type(subconfig.cfg[subkeys[-1]])<eq>float<block_start>val=float(val)<block_end><block_end>subconfig.cfg[subkeys[-1]]=val<line_sep>print('{} is set to {}'.format(arg val))<line_sep><return><block_end># Config file shortcut <if_stmt><not>arg.endswith('.yaml')<block_start>arg='configs/{}.yaml'.format(arg)<block_end># Config file print('importing config from "{}"'.format(arg))<with_stmt>open(arg)<as>f<block_start>self.update(yaml.load(f Loader=yaml.Loader))<block_end><block_end><def_stmt>as_dict self<block_start><return>{key:(val.as_dict()<if>isinstance(val Config)<else>val)<for>key,val self.cfg.items()}<block_end><def_stmt>show self depth=0<block_start>yaml.dump(self.as_dict() sys.stdout)<block_end><def_stmt>get_path self name<block_start><return>self.data.cfg[name].format(self.data.name self.model.shortname)<block_end><block_end><def_stmt>init_config <block_start>config=Config()<line_sep>config.add('configs/default.yaml')<for_stmt>arg sys.argv[1:]<block_start>config.add(*arg.split('='))<block_end><return>config<block_end><def_stmt>reset_config <block_start><global>config<line_sep>config=init_config()<block_end>config=init_config()<line_sep>
<import_from_stmt>ebonite.utils.classproperty classproperty<class_stmt>MyClass<block_start>@classproperty<def_stmt>prop1 self<block_start><return>'a'<block_end>@classproperty@classmethod<def_stmt>prop2 self<block_start><return>'b'<block_end><block_end><def_stmt>test_classproperty__get <block_start><assert_stmt>MyClass.prop1<eq>'a'<assert_stmt>MyClass.prop2<eq>'b'<block_end>
# Copyright 2020 ByteDance Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>math<import_stmt>os<import_stmt>tensorflow<as>tf<import_from_stmt>absl logging<import_from_stmt>neurst.utils.compat get_distributed_worker_setting<import_from_stmt>neurst.utils.misc deprecated flatten_string_list<line_sep>_MIN_BUCKET_BOUNDARY=8<line_sep>_BUCKET_BOUNDARY_SCALE=1.1<line_sep>_MAX_BUCKET_BOUNDARY=256<def_stmt>map_data_for_keras dataset<block_start>""" Maps data for training. For TF v2, the 2nd parameter is omitted to make Keras training work. Args: dataset: A tf.data.Dataset object. Returns: A tf.data.Dataset object. """<def_stmt>_fn *args<block_start><return>(args )<block_end><return>dataset.map(_fn num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end>@deprecated<def_stmt>_batch_examples_by_token dataset batch_size bucket_boundaries padding_values padding_length example_length_func drop_remainder=<true> num_replicas_in_sync=1<block_start>"""Group examples by similar lengths, and return batched dataset. Each batch of similar-length examples are padded to the same length, and may have different number of elements in each batch, such that: group_batch_size * padded_length <= batch_size. This decreases the number of padding tokens per batch, which improves the training speed. Args: dataset: Dataset of unbatched examples. batch_size: Max number of tokens per batch of examples. bucket_boundaries: A list of integers of the boundaries of each bucket. padding_values: A tuple of constants for padding. padding_length: A list/tuple of padding length, which will be passed to padded_decode. example_length_func: A callable function, which deduces the input examples to the maximum length. drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size. num_replicas_in_sync: The number of GPUs or other workers. We will generate global batches, and each global batch is equally divisible by number of replicas. Returns: Dataset of batched examples with similar lengths. """<line_sep># Get min and max boundary lists for each example. These are used to calculate # the `bucket_id`, which is the index at which: # buckets_min[bucket_id] <= len(example) < buckets_max[bucket_id] # Note that using both min and max lists improves the performance. buckets_min=[0]+bucket_boundaries[:-1]<line_sep>buckets_max=bucket_boundaries<line_sep># Create list of batch sizes for each bucket_id, so that # bucket_batch_size[bucket_id] * buckets_max[bucket_id] <= batch_size bucket_batch_sizes=[batch_size<floordiv>x<floordiv>num_replicas_in_sync<times>num_replicas_in_sync<for>x buckets_max]<line_sep># bucket_id will be a tensor, so convert this list to a tensor as well. bucket_batch_sizes=tf.constant(bucket_batch_sizes dtype=tf.int64)<def_stmt>example_to_bucket_id examples<block_start>"""Return int64 bucket id for this example, calculated based on length."""<line_sep>seq_length=tf.cast(example_length_func(examples) tf.int32)<line_sep>conditions_c=tf.logical_and(tf.less_equal(buckets_min seq_length) tf.less(seq_length buckets_max))<line_sep>bucket_id=tf.reduce_min(tf.where(conditions_c))<line_sep><return>bucket_id<block_end><def_stmt>window_size_fn bucket_id<block_start>"""Return number of examples to be grouped when given a bucket id."""<line_sep><return>bucket_batch_sizes[bucket_id]<block_end><def_stmt>batching_fn bucket_id grouped_dataset<block_start>"""Batch and add padding to a dataset of elements with similar lengths."""<line_sep>bucket_batch_size=window_size_fn(bucket_id)<line_sep># Batch the dataset and add padding so that all input sequences in the # examples have the same length, and all target sequences have the same # lengths as well. Resulting lengths of inputs and targets can differ. <return>grouped_dataset.padded_batch(bucket_batch_size padding_length padding_values=padding_values drop_remainder=drop_remainder)<block_end><return>dataset.apply(tf.data.experimental.group_by_window(key_func=example_to_bucket_id reduce_func=batching_fn window_size=<none> window_size_func=window_size_fn))<block_end><def_stmt>create_batch_bucket_boundaries max_length min_boundary=_MIN_BUCKET_BOUNDARY boundary_scale=_BUCKET_BOUNDARY_SCALE<block_start>""" Creates training batch bucket boundaries. Args: max_length: The maximum length of example in dataset. min_boundary: Minimum length in boundary. boundary_scale: Amount to scale consecutive boundaries in the list. Returns: A list of bucket boundaries. """<line_sep># Create bucket boundaries list by scaling the previous boundary or adding 1 # (to ensure increasing boundary sizes). bucket_boundaries=[]<line_sep>x=min_boundary<while_stmt>x<l>max_length<block_start>bucket_boundaries.append(x)<line_sep>x=max(x+1 int(x<times>boundary_scale))<block_end><if_stmt>bucket_boundaries[-1]<l>max_length+1<block_start>bucket_boundaries=bucket_boundaries+[max_length+1]<block_end><return>bucket_boundaries<block_end><def_stmt>associated_bucket_boundaries a b<block_start>""" Creates training batch bucket boundaries. Args: a: A list of bucket boundaries. b: Another list of bucket boundaries. Returns: Two refactored lists of bucket boundaries with the same size. """<line_sep>length1=len(a)<line_sep>length2=len(b)<if_stmt>length1<eq>length2<block_start><return>a b<block_end><elif_stmt>length1<g>length2<block_start>step_size1=length1<times>1./length2<line_sep>step_size2=1<block_end><else_stmt><block_start>step_size1=1<line_sep>step_size2=length2<times>1./length1<block_end>new_boundaries1=[]<line_sep>new_boundaries2=[]<line_sep>i=1<while_stmt>i<l>min(length1 length2)+1<block_start>new_boundaries1.append(a[int(math.ceil(i<times>step_size1))-1])<line_sep>new_boundaries2.append(b[int(math.ceil(i<times>step_size2))-1])<line_sep>i<augadd>1<block_end><return>new_boundaries1 new_boundaries2<block_end>@deprecated<def_stmt>load_from_tfrecord_and_auto_shard features_file shuffle=<true> example_parse_fn=<none> deterministic=<true><block_start>""" Loads TFRecords and does autot-sharding according to worker num. Args: features_file: The TFRecords file path. shuffle: Whether to shuffle files. example_parse_fn: The example parse function for TF Record. deterministic: Whether the outputs need to be produced in deterministic order. Returns: A dataset. """<line_sep>_files=features_file.split(",")<line_sep>_features_files=[]<for_stmt>_file _files<block_start><if_stmt>tf.io.gfile.isdir(_file)<block_start>_features_files.append(os.path.join(_file "*train*"))<block_end><elif_stmt>tf.io.gfile.exists(_file)<block_start>_features_files.append(_file)<block_end><else_stmt><block_start>_features_files.append(_file+"*")<block_end><block_end>logging.info("Load TFRecords from {}".format(str(_features_files)))<line_sep>dataset=tf.data.Dataset.list_files(_features_files shuffle=shuffle)<line_sep># auto sharding worker_id,num_workers,strategy=get_distributed_worker_setting()<if_stmt>num_workers<g>1<and>strategy<in>["horovod" "byteps"]<and><not>shuffle<block_start>logging.info("Shard %d of the whole dataset(total %d workers)." worker_id num_workers)<line_sep>dataset=dataset.shard(num_workers worker_id)<block_end># Read files and interleave results. # When training, the order of the examples will be non-deterministic. options=tf.data.Options()<line_sep>options.experimental_deterministic=deterministic<line_sep>dataset=dataset.interleave(<lambda>f:tf.data.TFRecordDataset(f buffer_size=32<times>1024<times>1024) cycle_length=10 num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)<if_stmt>example_parse_fn<is><none><block_start><return>dataset<block_end><return>dataset.map(example_parse_fn num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end><def_stmt>parse_tfexample serialized_example name_to_features name_mapping=<none> map_func=<none> auxiliary_elements=<none><block_start>""" Parses TF example from TF Record. """<line_sep>parsed=tf.io.parse_single_example(serialized_example name_to_features)<line_sep>elements={}<for_stmt>k,v parsed.items()<block_start><if_stmt>name_mapping<is><none><or>k<not><in>name_mapping<block_start>elements[k]=tf.sparse.to_dense(v)<block_end><else_stmt><block_start>elements[name_mapping[k]]=tf.sparse.to_dense(v)<block_end><block_end><if_stmt>isinstance(auxiliary_elements dict)<block_start>elements.update(auxiliary_elements)<block_end><if_stmt>map_func<is><none><block_start><return>elements<block_end><return>map_func(elements)<block_end><def_stmt>glob_tfrecords file_path<block_start>_files=flatten_string_list(file_path)<line_sep>_features_files=[]<for_stmt>_file _files<block_start><if_stmt>tf.io.gfile.isdir(_file)<block_start>_features_files.extend(tf.io.gfile.glob(os.path.join(_file "*train*")))<block_end><elif_stmt>tf.io.gfile.exists(_file)<block_start>_features_files.append(_file)<block_end><else_stmt><block_start>_features_files.extend(tf.io.gfile.glob(_file+"*"))<block_end><block_end><return>_features_files<block_end><def_stmt>load_tfrecords file_path name_to_features shuffle=<false> deterministic=<true> feature_name_mapping=<none> map_func=<none> sharding_index=0 num_shards=1 auto_shard=<false> auxiliary_elements=<none><arrow>tf.data.Dataset<block_start>""" Loads TFRecords and does autot-sharding according to worker num. Args: file_path: The TFRecords file path. name_to_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. shuffle: Whether to shuffle files. deterministic: Whether the outputs need to be produced in deterministic order. feature_name_mapping: A dict that maps the names in `name_to_features` to aliases. map_func: A callable function to process the data. sharding_index: The manually defined index for sharding. num_shards: The manually defined number of shards operating in parallel. auto_shard: Automatically shard the TFRecord parts if True. auxiliary_elements: A dict containing auxiliary elements that will append to the data sample. Returns: A dataset. """<line_sep>_features_files=[]<for_stmt>_file flatten_string_list(file_path)<block_start><if_stmt>tf.io.gfile.isdir(_file)<block_start>_features_files.append(os.path.join(_file "*train*"))<block_end><elif_stmt>tf.io.gfile.exists(_file)<block_start>_features_files.append(_file)<block_end><else_stmt><block_start>_features_files.append(_file+"*")<block_end><block_end># shuffle = (shuffle is True) and (num_shards == 1) # dataset = tf.data.Dataset.list_files(_features_files, shuffle=shuffle) dataset=tf.data.Dataset.list_files(_features_files shuffle=<false>)<if_stmt>num_shards<g>1<block_start>logging.info("Shard %d of the whole dataset(total %d workers)." sharding_index num_shards)<line_sep>dataset=dataset.shard(num_shards sharding_index)<block_end><else_stmt># auto sharding <block_start>worker_id,num_workers,strategy=get_distributed_worker_setting()<if_stmt>num_workers<g>1<and>strategy<in>["horovod" "byteps"]<and>auto_shard<block_start>logging.info("Shard %d of the whole dataset(total %d workers)." worker_id num_workers)<line_sep>options=tf.data.Options()<line_sep>options.experimental_distribute.auto_shard_policy=tf.data.experimental.AutoShardPolicy.OFF<line_sep>dataset=dataset.with_options(options)<line_sep>dataset=dataset.shard(num_workers worker_id)<block_end><block_end>logging.info("Loading TF Records from: ")<if_stmt>shuffle<block_start>dataset=dataset.shuffle(5000)<block_end><for_stmt>_f dataset<block_start>logging.info(f" {_f.numpy()}")<block_end># Read files and interleave results. # When training, the order of the examples will be non-deterministic. options=tf.data.Options()<line_sep>options.experimental_deterministic=deterministic<line_sep>dataset=dataset.interleave(<lambda>f:tf.data.TFRecordDataset(f buffer_size=128<times>1024<times>1024) cycle_length=10 num_parallel_calls=tf.data.experimental.AUTOTUNE).with_options(options)<if_stmt>name_to_features<is><none><block_start><return>dataset<block_end><return>dataset.map(<lambda>x:parse_tfexample(x name_to_features feature_name_mapping map_func auxiliary_elements=auxiliary_elements) num_parallel_calls=tf.data.experimental.AUTOTUNE)<block_end><def_stmt>clean_dataset_by_length dataset data_max_lengths<block_start>""" Filters empty datas, or datas exceeded max length. """<line_sep>logging.info(f"Filtering empty data and datas exceeded max length={data_max_lengths}")<line_sep><return>dataset.filter(<lambda>data_sample:tf.reduce_all([(length<eq>-1<or>length<is><none><or>tf.less_equal(tf.size(data_sample[k]) length))# filter by max length <and>(length<eq>-1<or>(length<ne>-1<and>tf.size(data_sample[k])<g>1))# filter out empty lines <for>k,length data_max_lengths.items()]))<block_end>@deprecated<def_stmt>batch_sequential_dataset dataset padding_values example_length_func=<none> batch_size=<none> batch_size_per_gpu=<none> batch_by_tokens=<false> bucket_boundaries=<none> data_max_lengths=<none> shuffer_buffer=0 drop_remainder=<true> num_replicas_in_sync=1<block_start>""" Calls padded_batch under special settings for sequential dataset. Args: dataset: A parallel dataset. padding_values: A list of padding values, will be passed to dataset.padded_batch. example_length_func: A callable function that takes a dict as input and returns the "length" of this data sample. batch_size: The number of sentences or word tokens according to `batch_by_tokens`. batch_size_per_gpu: The per-GPU batch size. batch_by_tokens: A bool, whether to batch the data by word tokens. bucket_boundaries: A list integers indicating the boundaries of the bucket when `batch_by_tokens` is True. data_max_lengths: The maximum length of training data, None or a list/tuple of integers with the the size as data samples. -1 indicates scalar data with no 'length' checking. shuffer_buffer: The buffer size for shuffling. drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size. num_replicas_in_sync: The number of GPUs or other workers. We will generate global batches, and each global batch is equally divisible by number of replicas. Returns: The batched dataset. """<if_stmt>data_max_lengths<is><none><block_start>data_max_lengths={k:<none><for>k padding_values}<block_end><assert_stmt>len(data_max_lengths)<eq>len(padding_values)<if_stmt>example_length_func<is><none><block_start><def_stmt>example_length_func examples<block_start><return>tf.reduce_max([tf.size(examples[k])<for>k,length data_max_lengths.items()<if>length<ne>-1])<block_end><block_end><if_stmt>batch_size<is><none><and>batch_size_per_gpu<is><none><block_start><raise>ValueError("Either `batch_size` or `batch_size_per_gpu` needs to be provided.")<block_end><elif_stmt>batch_size<is><not><none><and>batch_size_per_gpu<is><not><none><block_start>logging.info("Both `batch_size` and `batch_size_per_gpu` are provided, use `batch_size_per_gpu`.")<block_end><if_stmt>batch_size_per_gpu<is><not><none><block_start>batch_size=int(batch_size_per_gpu<times>num_replicas_in_sync)<block_end>logging.info("The global batch size is {}, with batch_by_tokens={}".format(batch_size batch_by_tokens))<line_sep># filter out empty lines dataset=clean_dataset_by_length(dataset data_max_lengths)<line_sep>dynamic_padding_length={k:([]<if>length<eq>-1<else>[<none>])<for>k,length data_max_lengths.items()}<if_stmt>batch_by_tokens# shuffle <block_start><if_stmt>shuffer_buffer<block_start>dataset=dataset.shuffle(buffer_size=shuffer_buffer)<block_end>max_length=max(max([_len<or>0<for>_len data_max_lengths.values()]) 0)<if_stmt><not>max_length<block_start>logging.info("Using pre-defined max length={}".format(_MAX_BUCKET_BOUNDARY))<line_sep>max_length=_MAX_BUCKET_BOUNDARY<block_end>logging.info("Final check of the max length of the training data. "<concat>"Filter out whose length is larger than {}".format(max_length))<line_sep>dataset=dataset.filter(<lambda>data_sample:tf.reduce_all([(length<eq>-1)<or>(length<is><none>)<or>tf.size(data_sample[k])<le>max_length<for>k,length data_max_lengths.items()]))<if_stmt>bucket_boundaries<is><none><block_start>bucket_boundaries=create_batch_bucket_boundaries(max_length)<block_end><return>_batch_examples_by_token(dataset batch_size=batch_size drop_remainder=drop_remainder padding_values=padding_values padding_length=dynamic_padding_length bucket_boundaries=bucket_boundaries example_length_func=example_length_func num_replicas_in_sync=num_replicas_in_sync)<block_end><else_stmt># shuffle <block_start><if_stmt>shuffer_buffer<block_start>dataset=dataset.shuffle(buffer_size=shuffer_buffer)<block_end>padding_length=dynamic_padding_length<block_end>logging.info("The padding length of the dataset is {}".format(padding_length))<line_sep>dataset=dataset.padded_batch(int(batch_size<floordiv>num_replicas_in_sync<times>num_replicas_in_sync) padding_length drop_remainder=drop_remainder padding_values=padding_values)<line_sep><return>dataset<block_end><def_stmt>adjust_batch_size batch_size=<none> batch_size_per_gpu=<none> bucket_boundaries=<none> boundaries_reduce_to_length_fn=<none> num_replicas_in_sync=1 verbose=<true><block_start><if_stmt>batch_size<is><none><and>batch_size_per_gpu<is><none><block_start><raise>ValueError("At least one of the `batch_size` and `batch_size_per_gpu` should be provided.")<block_end><elif_stmt>batch_size<is><not><none><and>batch_size_per_gpu<is><not><none><block_start>logging.info("Both `batch_size` and `batch_size_per_gpu` are provided, use `batch_size_per_gpu`.")<block_end><if_stmt>batch_size_per_gpu<is><not><none><block_start>batch_size=int(batch_size_per_gpu<times>num_replicas_in_sync)<block_end><if_stmt>bucket_boundaries<is><none><block_start>batch_size=int(batch_size<floordiv>num_replicas_in_sync<times>num_replicas_in_sync)<if_stmt>verbose<block_start>logging.info(f"The global batch size is {batch_size} samples.")<block_end><return>batch_size<block_end>logging.info(f"The global batch size is {batch_size} tokens.")<line_sep>bucket_batch_sizes=[]<try_stmt><block_start>i=0<while_stmt><true><block_start>bucket_batch_sizes.append(int(batch_size<floordiv>boundaries_reduce_to_length_fn({k:v[i]<for>k,v bucket_boundaries.items()})<floordiv>num_replicas_in_sync<times>num_replicas_in_sync))<line_sep>i<augadd>1<block_end><block_end><except_stmt>IndexError<block_start><pass><block_end><return>bucket_batch_sizes<block_end><def_stmt>batch_examples_by_token dataset bucket_boundaries bucket_batch_sizes padding_values example_length_func extra_padded_shapes=<none> drop_remainder=<true><block_start>"""Group examples by similar lengths, and return batched dataset. Each batch of similar-length examples are padded to the same length, and may have different number of elements in each batch, such that: group_batch_size * padded_length <= batch_size. This decreases the number of padding tokens per batch, which improves the training speed. Args: dataset: Dataset of unbatched examples. bucket_batch_sizes: Max number of tokens per batch of examples or a list of batch size for each bucket. bucket_boundaries: A list of integers of the boundaries of each bucket. padding_values: A tuple of constants for padding. example_length_func: A callable function, which deduces the input examples to the maximum length. extra_padded_shapes: A dict containing extra shapes (not included in bucket boundaries) for padding. drop_remainder: Whether the last batch should be dropped in the case it has fewer than batch_size. Returns: Dataset of batched examples with similar lengths. """<line_sep>cnt=0<try_stmt><block_start>logging.info("The details of batching logic:")<while_stmt><true><block_start>_batch=bucket_batch_sizes<if_stmt>isinstance(bucket_batch_sizes list)<block_start>_batch=bucket_batch_sizes[cnt]<block_end>_bounds={k:v[cnt]<for>k,v bucket_boundaries.items()}<line_sep>logging.info(f" - batch={_batch}, bucket boundary={_bounds}")<line_sep>cnt<augadd>1<block_end><block_end><except_stmt>IndexError<block_start>logging.info(f" Total {cnt} input shapes are compiled.")<block_end><if_stmt><not>isinstance(bucket_batch_sizes list)<block_start>bucket_batch_sizes=[bucket_batch_sizes]<times>cnt<block_end># bucket_id will be a tensor, so convert this list to a tensor as well. bucket_batch_sizes=tf.constant(bucket_batch_sizes dtype=tf.int64)<line_sep>bucket_boundaries={k:tf.constant(v dtype=tf.int32)<for>k,v bucket_boundaries.items()}<def_stmt>example_to_bucket_id examples<block_start>"""Return int64 bucket id for this example, calculated based on length."""<line_sep>seq_length=example_length_func(examples)<line_sep>conditions_c=tf.reduce_all([tf.less_equal(v bucket_boundaries[k])<for>k,v seq_length.items()] axis=0)<line_sep>bucket_id=tf.reduce_min(tf.where(conditions_c))<line_sep><return>bucket_id<block_end><def_stmt>window_size_fn bucket_id<block_start>"""Return number of examples to be grouped when given a bucket id."""<line_sep><return>bucket_batch_sizes[bucket_id]<block_end><def_stmt>batching_fn bucket_id grouped_dataset<block_start>"""Batch and add padding to a dataset of elements with similar lengths."""<line_sep>bucket_batch_size=window_size_fn(bucket_id)<line_sep>padded_shapes={k:[v[bucket_id]]<for>k,v bucket_boundaries.items()}<if_stmt>extra_padded_shapes<block_start><for_stmt>k,v extra_padded_shapes.items()<block_start>padded_shapes[k]=v<block_end><block_end># Batch the dataset and add padding so that all input sequences in the # examples have the same length, and all target sequences have the same # lengths as well. Resulting lengths of inputs and targets can differ. <return>grouped_dataset.padded_batch(bucket_batch_size padded_shapes padding_values=padding_values drop_remainder=drop_remainder)<block_end><return>dataset.apply(tf.data.experimental.group_by_window(key_func=example_to_bucket_id reduce_func=batching_fn window_size=<none> window_size_func=window_size_fn))<block_end><def_stmt>take_one_record data_path<block_start>_file_path=flatten_string_list(data_path)[0]<if_stmt>tf.io.gfile.isdir(_file_path)<block_start>_feature_file=os.path.join(_file_path "*train*")<block_end><elif_stmt>tf.io.gfile.exists(_file_path)<block_start>_feature_file=_file_path<block_end><else_stmt><block_start>_feature_file=_file_path+"*"<block_end>dataset=tf.data.Dataset.list_files([_feature_file] shuffle=<false>)<line_sep>dataset=dataset.interleave(<lambda>f:tf.data.TFRecordDataset(f buffer_size=128<times>1024<times>1024) cycle_length=10 num_parallel_calls=tf.data.experimental.AUTOTUNE)<for_stmt>x dataset.take(1)<block_start>example=tf.train.Example()<line_sep>example.ParseFromString(x.numpy())<line_sep><return>example<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>scipy.signal iirnotch firwin filtfilt lfilter freqz<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>nibabel<as>nb<import_stmt>subprocess<import_stmt>math<def_stmt>add_afni_prefix tpattern<block_start><if_stmt>tpattern<block_start><if_stmt>".txt"<in>tpattern<block_start>tpattern="@{0}".format(tpattern)<block_end><block_end><return>tpattern<block_end><def_stmt>nullify value function=<none><block_start><import_from_stmt>traits.trait_base Undefined<if_stmt>value<is><none><block_start><return>Undefined<block_end><if_stmt>function<block_start><return>function(value)<block_end><return>value<block_end><def_stmt>chunk_ts func_file n_chunks=<none> chunk_size=<none><block_start>func_img=nb.load(func_file)<line_sep>trs=func_img.shape[3]<line_sep>TR_ranges=[]<if_stmt>n_chunks<block_start>chunk_size=trs/n_chunks<block_end><elif_stmt>chunk_size<block_start>n_chunks=int(trs/chunk_size)<block_end><else_stmt><block_start><raise>Exception("\n[!] Dev error: Either 'n_chunks' or 'chunk_size' "<concat>"arguments must be passed to 'chunk_ts' function.\n")<block_end><for_stmt>chunk_idx range(0 n_chunks)<block_start><if_stmt>chunk_idx<eq>n_chunks-1<block_start>TR_ranges.append((int(chunk_idx<times>chunk_size) int(trs-1)))<block_end><else_stmt><block_start>TR_ranges.append((int(chunk_idx<times>chunk_size) int((chunk_idx+1)<times>chunk_size-1)))<block_end><block_end><return>TR_ranges<block_end><def_stmt>split_ts_chunks func_file tr_ranges<block_start><if_stmt>'.nii'<in>func_file<block_start>ext='.nii'<block_end><if_stmt>'.nii.gz'<in>func_file<block_start>ext='.nii.gz'<block_end>split_funcs=[]<for_stmt>chunk_idx,tr_range enumerate(tr_ranges)<block_start>out_file=os.path.join(os.getcwd() os.path.basename(func_file).replace(ext "_{0}{1}".format(chunk_idx ext)))<line_sep>in_file="{0}[{1}..{2}]".format(func_file tr_range[0] tr_range[1])<line_sep>cmd=["3dcalc" "-a" in_file "-expr" "a" "-prefix" out_file]<line_sep>retcode=subprocess.check_output(cmd)<line_sep>split_funcs.append(out_file)<block_end><return>split_funcs<block_end><def_stmt>oned_text_concat in_files<block_start>out_file=os.path.join(os.getcwd() os.path.basename(in_files[0].replace("_0" "")))<line_sep>out_txt=[]<for_stmt>txt in_files<block_start><with_stmt>open(txt 'r')<as>f<block_start>txt_lines=f.readlines()<block_end><if_stmt><not>out_txt<block_start>out_txt=[x<for>x txt_lines]<block_end><else_stmt><block_start><for_stmt>line txt_lines<block_start><if_stmt>"#"<in>line<block_start><continue><block_end>out_txt.append(line)<block_end><block_end><block_end><with_stmt>open(out_file 'wt')<as>f<block_start><for_stmt>line out_txt<block_start>f.write(line)<block_end><block_end><return>out_file<block_end><def_stmt>degrees_to_mm degrees head_radius# function to convert degrees of motion to mm <block_start>mm=2<times>math.pi<times>head_radius<times>(degrees/360)<line_sep><return>mm<block_end><def_stmt>mm_to_degrees mm head_radius# function to convert mm of motion to degrees <block_start>degrees=360<times>mm/(2<times>math.pi<times>head_radius)<line_sep><return>degrees<block_end><def_stmt>degrees_to_mm degrees head_radius# function to convert degrees of motion to mm <block_start>mm=2<times>math.pi<times>head_radius<times>(degrees/360)<line_sep><return>mm<block_end><def_stmt>mm_to_degrees mm head_radius# function to convert mm of motion to degrees <block_start>degrees=360<times>mm/(2<times>math.pi<times>head_radius)<line_sep><return>degrees<block_end><def_stmt>degrees_to_mm degrees head_radius# function to convert degrees of motion to mm <block_start>mm=2<times>math.pi<times>head_radius<times>(degrees/360)<line_sep><return>mm<block_end><def_stmt>mm_to_degrees mm head_radius# function to convert mm of motion to degrees <block_start>degrees=360<times>mm/(2<times>math.pi<times>head_radius)<line_sep><return>degrees<block_end><def_stmt>notch_filter_motion motion_params filter_type TR fc_RR_min=<none> fc_RR_max=<none> center_freq=<none> freq_bw=<none> lowpass_cutoff=<none> filter_order=4# Adapted from DCAN Labs: # https://github.com/DCAN-Labs/dcan_bold_processing/blob/master/ # ...matlab_code/filtered_movement_regressors.m <block_start><if_stmt>"ms"<in>TR<block_start>TR=float(TR.replace("ms" ""))/1000<block_end><elif_stmt>"ms"<not><in>TR<and>"s"<in>TR<block_start>TR=float(TR.replace("s" ""))<block_end>params_data=np.loadtxt(motion_params)<line_sep># Sampling frequency fs=1/TR<line_sep># Nyquist frequency fNy=fs/2<if_stmt>filter_type<eq>"notch"# Respiratory Rate <block_start><if_stmt>fc_RR_min<and>fc_RR_max<block_start>rr=[float(fc_RR_min)/float(60) float(fc_RR_max)/float(60)]<line_sep>rr_fNy=[rr[0]+fNy rr[1]+fNy]<line_sep>fa=abs(rr-np.floor(np.divide(rr_fNy fs))<times>fs)<block_end><elif_stmt>center_freq<and>freq_bw<block_start>tail=float(freq_bw)/float(2)<line_sep>fa=[center_freq-tail center_freq+tail]<block_end>W_notch=np.divide(fa fNy)<line_sep>Wn=np.mean(W_notch)<line_sep>bw=np.diff(W_notch)<line_sep># for filter info center_freq=Wn<times>fNy<line_sep>bandwidth=fa[1]-fa[0]<line_sep>Q=Wn/bw<line_sep>[b_filt a_filt]=iirnotch(Wn Q)<line_sep>num_f_apply=np.floor(filter_order/2)<line_sep>filter_info=f"Motion estimate filter information\n\nType: Notch\n"<concat>f"\nCenter freq: {center_freq}\nBandwidth: {bandwidth}\n\n"<concat>f"Wn: {Wn}\nQ: {Q}\n\n"<concat>f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"<block_end><elif_stmt>filter_type<eq>"lowpass"<block_start><if_stmt>fc_RR_min<block_start>rr=float(fc_RR_min)/float(60)<line_sep>rr_fNy=rr+fNy<line_sep>fa=abs(rr-np.floor(np.divide(rr_fNy fs))<times>fs)<block_end><elif_stmt>lowpass_cutoff<block_start>fa=lowpass_cutoff<block_end>Wn=fa/fNy<if_stmt>filter_order<block_start>b_filt=firwin(filter_order+1 Wn)<line_sep>a_filt=1<block_end>num_f_apply=0<line_sep>filter_info=f"Motion estimate filter information\n\nType: Lowpass"<concat>f"\n\nCutoff freq: {fa}\nWn: {Wn}\n\n"<concat>f"Based on:\nSampling freq: {fs}\nNyquist freq: {fNy}"<block_end>filter_design=os.path.join(os.getcwd() "motion_estimate_filter_design.txt")<line_sep>filter_plot=os.path.join(os.getcwd() "motion_estimate_filter_freq-response.png")<line_sep># plot frequency response for user info w,h=freqz(b_filt a_filt fs=fs)<line_sep>fig,ax1=plt.subplots()<line_sep>ax1.set_title('Motion estimate filter frequency response')<line_sep>ax1.plot(w 20<times>np.log10(abs(h)) 'b')<line_sep>ax1.set_ylabel('Amplitude [dB]' color='b')<line_sep>ax1.set_xlabel('Frequency [Hz]')<line_sep>plt.savefig(filter_plot)<with_stmt>open(filter_design 'wt')<as>f<block_start>f.write(filter_info)<block_end># convert rotation params from degrees to mm params_data[: 0:3]=degrees_to_mm(params_data[: 0:3] head_radius=50)<line_sep>filtered_params=lfilter(b_filt a_filt params_data.T zi=<none>)<for_stmt>i range(0 int(num_f_apply)-1)<block_start>filtered_params=lfilter(b_filt a_filt filtered_params zi=<none>)<block_end># back rotation params to degrees filtered_params[0:3 :]=mm_to_degrees(filtered_params[0:3 :] head_radius=50)<line_sep># back rotation params to degrees filtered_params[0:3 :]=mm_to_degrees(filtered_params[0:3 :] head_radius=50)<line_sep>filtered_motion_params=os.path.join(os.getcwd() "{0}_filtered.1D".format(os.path.basename(motion_params)))<line_sep>np.savetxt(filtered_motion_params filtered_params.T fmt='%f')<line_sep><return>(filtered_motion_params filter_design filter_plot)<block_end>
""" Park1 function with three domains. -- <EMAIL> """<line_sep># pylint: disable=invalid-name <import_stmt>numpy<as>np<def_stmt>park1_constrained x<block_start>""" Computes the park1 function. """<line_sep><return>park1_constrained_z_x([1.0 1.0 1.0] x)<block_end><def_stmt>park1_constrained_z_x z x<block_start>""" Computes the park1 function. """<line_sep>x1=max(x[0][0] 0.01)<times>np.sqrt(z[0])<line_sep>x2=x[0][1]<times>np.sqrt(z[1])<line_sep>x3=x[1]/100<times>np.sqrt(z[2])<line_sep>x4=(x[2]-10)/6.0<times>np.sqrt((z[0]+z[1]+z[2])/3.0)<line_sep>ret1=(x1/2)<times>(np.sqrt(1+(x2+x3<power>2)<times>x4/(x1<power>2))-1)<line_sep>ret2=(x1+3<times>x4)<times>np.exp(1+np.sin(x3))<line_sep><return>ret1+ret2<block_end># Write a function like this called obj. <def_stmt>objective x<block_start>""" Objective. """<line_sep><return>park1_constrained(x)<block_end>
# -*- coding: utf-8 -*- <import_stmt>getpass<class_stmt>PortsInfo(object)<block_start><def_stmt>__init__ self name ports broker investor=""<block_start>""" CTP连接信息, ports为port列表, 其元素为 "tcp://aaa.bbb.ccc.ddd:ppppp"形式 MDUser不需要输入investor """<line_sep>self.name=name<line_sep>self.ports=ports<line_sep>self.broker=broker<line_sep>self.investor=investor<line_sep>self.passwd=""<block_end><def_stmt>input_account self<block_start>self.investor=input("输入用户名:")<line_sep>self.passwd=getpass.getpass("输入登陆口令:")<block_end><block_end><class_stmt>PortsStub(object)<block_start><def_stmt>__init__ self<block_start>self.name="PortsStub"<line_sep>self.ports=[]<line_sep>self.broker="BrokerOfStub"<line_sep>self.investor="InvestorOfStub"<line_sep>self.passwd=""<block_end><def_stmt>input_account self<block_start><pass><block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.sparse<as>ssp<import_stmt>torch<import_from_stmt>beta_rec.models.torch_engine ModelEngine<import_from_stmt>beta_rec.utils.common_util timeit<def_stmt>top_k values k exclude=[]<block_start>"""Return the indices of the k items with the highest value in the list of values. Exclude the ids from the list "exclude". """<line_sep># Put low similarity to viewed items to exclude them from recommendations values[exclude]=-np.inf<line_sep><return>list(np.argpartition(-values range(k))[:k])<block_end><def_stmt>get_sparse_vector ids length values=<none><block_start>"""Sparse vector generation. If "values" is None, the elements are set to 1. """<line_sep>n=len(ids)<if_stmt>values<is><none><block_start><return>ssp.coo_matrix((np.ones(n) (ids np.zeros(n))) (length 1)).tocsc()<block_end><else_stmt><block_start><return>ssp.coo_matrix((values (ids np.zeros(n))) (length 1)).tocsc()<block_end><block_end><class_stmt>UserKNN(torch.nn.Module)<block_start>"""A PyTorch Module for UserKNN model."""<def_stmt>__init__ self config<block_start>"""Initialize UserKNN Class."""<line_sep>super(UserKNN self).__init__()<line_sep>self.config=config<line_sep>self.device=self.config["device_str"]<line_sep>self.n_users=self.config["n_users"]<line_sep>self.n_items=self.config["n_items"]<line_sep>self.neighbourhood_size=self.config["neighbourhood_size"]<block_end><def_stmt>prepare_model self data<block_start>"""Load data into matrices. :param data: :return: """<line_sep>row=data.train["col_user"].to_numpy()<line_sep>col=data.train["col_item"].to_numpy()<line_sep>self.binary_user_item=ssp.coo_matrix((np.ones(len(data.train)) (row col)) shape=(self.n_users self.n_items)).tocsr()<block_end><def_stmt>_items_count_per_user self<block_start>"""Calculate the number of interacted items for an user. :return: """<if_stmt><not>hasattr(self "__items_count_per_user")<block_start>self.__items_count_per_user=np.asarray(self.binary_user_item.sum(axis=1)).ravel()<block_end><return>self.__items_count_per_user<block_end><def_stmt>similarity_with_users self sequence<block_start>"""Calculate the similarity between the a given user and all users according to the overlap ratio. :param sequence: the user's interacted items :return: """<line_sep>sparse_sequence=get_sparse_vector(sequence self.n_items)<line_sep>overlap=self.binary_user_item.dot(sparse_sequence).toarray().ravel()<line_sep>overlap[overlap<ne>0]<augdiv>np.sqrt(self._items_count_per_user()[overlap<ne>0])<line_sep><return>overlap<block_end><def_stmt>forward self batch_data<block_start>"""Redundant method for UserKNN. Args: batch_data: tuple consists of (users, pos_items, neg_items), which must be LongTensor. """<line_sep><return>0.0<block_end><def_stmt>predict self users items<block_start>"""Predict result with the model. Args: users (int, or list of int): user id(s). items (int, or list of int): item id(s). Return: scores (int, or list of int): predicted scores of these user-item pairs. """<line_sep>scores=[]<for_stmt>i range(len(users))<block_start>sequence=self.binary_user_item.getrow(users[i]).nonzero()[0]<line_sep>sim_with_users=self.similarity_with_users(sequence)<line_sep>nearest_neighbour=top_k(sim_with_users self.neighbourhood_size)<line_sep>neighbour_items=get_sparse_vector(nearest_neighbour self.n_users values=sim_with_users[nearest_neighbour] )<line_sep>sim_with_items=(self.binary_user_item.T.dot(neighbour_items).toarray().ravel())<line_sep>sim_with_items[sequence]=-np.inf<line_sep>scores.append(sim_with_items[items[i]])<block_end><return>torch.tensor(scores)<block_end><block_end><class_stmt>UserKNNEngine(ModelEngine)<block_start>"""UserKNNEngine Class."""<def_stmt>__init__ self config<block_start>"""Initialize UserKNNEngine Class."""<line_sep>print("userKNNEngine init")<line_sep>self.config=config<line_sep>self.model=UserKNN(config["model"])<line_sep># super(UserKNNEngine, self).__init__(config) <block_end><def_stmt>train_single_batch self batch_data<block_start>"""Train a single batch. However, userKNN is a neighbourhood model bases its prediction on the similarity relationships among users. It requires no training procedure. Args: batch_data (list): batch users, positive items and negative items. Return: 0 """<assert_stmt>hasattr(self "model") "Please specify the exact model !"<line_sep><return>0<block_end>@timeit<def_stmt>train_an_epoch self train_loader epoch_id<block_start>"""Train a epoch, generate batch_data from data_loader, and call train_single_batch. Like the train_single_batch method, UserKNN requires no training procedure. Args: train_loader (DataLoader): epoch_id (int): set to 1. """<assert_stmt>hasattr(self "model") "Please specify the exact model !"<line_sep># self.model.train() print(f"[Training Epoch {epoch_id}] skipped")<line_sep>self.writer.add_scalar("model/loss" 0.0 epoch_id)<line_sep>self.writer.add_scalar("model/regularizer" 0.0 epoch_id)<block_end><block_end>
<import_from_future_stmt> print_function absolute_import division<import_from_stmt>future.builtins *<import_from_stmt>future standard_library<line_sep>standard_library.install_aliases()<line_sep># Copyright 2017 Autodesk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module provides access to the chemical component database, which is stored in ``moldesign/_static_data/chemical_components`` and can be re-generated by running ``cd moldesign/_static_data/ && scripts/generate_residue_data.py --download`` """<import_stmt>os<import_from_stmt>. PACKAGEPATH<import_from_stmt>moldesign utils<class_stmt>_DatabaseEntry(object)<block_start>""" Maps into a field stored in the database """<def_stmt>__init__ self hostdb keyname<block_start>self.hostdb=hostdb<line_sep>self.keyname=keyname<line_sep>self.index=self.hostdb['__FIELDS__']['RESFIELDS'].index(keyname)<block_end><def_stmt>__repr__ self<block_start><return>'<Chemical component dictionary: "%s" entries>'%self.keyname<block_end><def_stmt>__getitem__ self item<block_start><return>self.hostdb[item][self.index]<block_end>__contains__=utils.Alias('hostdb.__contains__')<def_stmt>keys self<block_start><for_stmt>key self.hostdb.keys()<block_start><if_stmt>key<eq>'__FIELDS__'<block_start><continue><block_end><yield>key<block_end><block_end><def_stmt>items self<block_start><for_stmt>key self<block_start><yield>key self[key]<block_end><block_end>__iter__=keys<block_end># This is a very big dict, so we load it as a compressed database _bondfilename=os.path.join(PACKAGEPATH '_static_data' 'chemical_components')<line_sep>CCD_DATABASE=utils.CompressedJsonDbm(_bondfilename 'r' dbm=utils.ReadOnlyDumb)<line_sep>RESIDUE_BONDS=_DatabaseEntry(CCD_DATABASE 'bonds')<line_sep>RESIDUE_ATOMS=_DatabaseEntry(CCD_DATABASE 'atoms')<line_sep>RESIDUE_CCD_NAMES=_DatabaseEntry(CCD_DATABASE 'name')<line_sep>RESIDUE_CCD_TYPES=_DatabaseEntry(CCD_DATABASE 'type')<line_sep>
<import_from_stmt>prediction_flow.features Number Category Sequence Features<import_from_stmt>prediction_flow.transformers.column StandardScaler CategoryEncoder SequenceEncoder <import_from_stmt>prediction_flow.pytorch WideDeep<import_from_stmt>.utils prepare_dataloader<def_stmt>test_normal <block_start>number_features=[Number('userAge' StandardScaler()) Number('rating' StandardScaler())]<line_sep>category_features=[Category('userId' CategoryEncoder(min_cnt=1)) Category('movieId' CategoryEncoder(min_cnt=1)) Category('topGenre' CategoryEncoder(min_cnt=1))]<line_sep>sequence_features=[Sequence('title' SequenceEncoder(sep='|' min_cnt=1)) Sequence('genres' SequenceEncoder(sep='|' min_cnt=1)) Sequence('clickedMovieIds' SequenceEncoder(sep='|' min_cnt=1 max_len=5)) Sequence('clickedMovieTopGenres' SequenceEncoder(sep='|' min_cnt=1 max_len=5))]<line_sep>features=Features(number_features=number_features category_features=category_features sequence_features=sequence_features)<line_sep>wide_features=['rating' 'title' 'genres']<line_sep>deep_features=['userAge' 'rating' 'userId' 'movieId' 'topGenre' 'clickedMovieIds' 'clickedMovieTopGenres']<line_sep>cross_features=[('movieId' 'clickedMovieIds') ('topGenre' 'clickedMovieTopGenres')]<line_sep>dataloader,_=prepare_dataloader(features)<line_sep>model=WideDeep(features wide_features deep_features cross_features num_classes=2 embedding_size=4 hidden_layers=(8 4) final_activation='sigmoid' dropout=0.3)<line_sep>model(next(iter(dataloader)))<block_end><def_stmt>test_without_number_feature <block_start>number_features=[]<line_sep>category_features=[Category('userId' CategoryEncoder(min_cnt=1)) Category('movieId' CategoryEncoder(min_cnt=1)) Category('topGenre' CategoryEncoder(min_cnt=1))]<line_sep>sequence_features=[Sequence('title' SequenceEncoder(sep='|' min_cnt=1)) Sequence('genres' SequenceEncoder(sep='|' min_cnt=1)) Sequence('clickedMovieIds' SequenceEncoder(sep='|' min_cnt=1 max_len=5)) Sequence('clickedMovieTopGenres' SequenceEncoder(sep='|' min_cnt=1 max_len=5))]<line_sep>features=Features(number_features=number_features category_features=category_features sequence_features=sequence_features)<line_sep>wide_features=['title' 'genres']<line_sep>deep_features=['userId' 'movieId' 'topGenre' 'clickedMovieIds' 'clickedMovieTopGenres']<line_sep>cross_features=[('movieId' 'clickedMovieIds') ('topGenre' 'clickedMovieTopGenres')]<line_sep>dataloader,_=prepare_dataloader(features)<line_sep>model=WideDeep(features wide_features deep_features cross_features num_classes=2 embedding_size=4 hidden_layers=(8 4) final_activation='sigmoid' dropout=0.3)<line_sep>model(next(iter(dataloader)))<block_end><def_stmt>test_without_category_feature <block_start>number_features=[]<line_sep>category_features=[]<line_sep>sequence_features=[Sequence('title' SequenceEncoder(sep='|' min_cnt=1)) Sequence('genres' SequenceEncoder(sep='|' min_cnt=1)) Sequence('clickedMovieIds' SequenceEncoder(sep='|' min_cnt=1 max_len=5)) Sequence('clickedMovieTopGenres' SequenceEncoder(sep='|' min_cnt=1 max_len=5))]<line_sep>features=Features(number_features=number_features category_features=category_features sequence_features=sequence_features)<line_sep>wide_features=['title' 'genres']<line_sep>deep_features=['clickedMovieIds' 'clickedMovieTopGenres']<line_sep>dataloader,_=prepare_dataloader(features)<line_sep>model=WideDeep(features wide_features deep_features [] num_classes=2 embedding_size=4 hidden_layers=(8 4) final_activation='sigmoid' dropout=0.3)<line_sep>model(next(iter(dataloader)))<block_end><def_stmt>test_only_with_number_features <block_start>number_features=[Number('userAge' StandardScaler()) Number('rating' StandardScaler())]<line_sep>category_features=[]<line_sep>sequence_features=[]<line_sep>features=Features(number_features=number_features category_features=category_features sequence_features=sequence_features)<line_sep>wide_features=['rating' 'userAge']<line_sep>dataloader,_=prepare_dataloader(features)<line_sep>model=WideDeep(features wide_features [] [] num_classes=2 embedding_size=4 hidden_layers=(8 4) final_activation='sigmoid' dropout=0.3)<line_sep>model(next(iter(dataloader)))<block_end>
<import_stmt>asyncio<line_sep># TODO import __init__ above <async_keyword><def_stmt>logic # TODO add tests <block_start><pass><block_end># main(logic)
<import_stmt>urllib.parse<import_from_stmt>datetime datetime<import_from_stmt>unittest.mock patch<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.contrib.messages get_messages<import_from_stmt>django.test TestCase<import_from_stmt>django.utils timezone<import_from_stmt>dfirtrack_main.models System Systemstatus Task Taskname Taskpriority Taskstatus <class_stmt>TaskCreatorViewTestCase(TestCase)<block_start>"""task creator view tests"""<line_sep>@classmethod<def_stmt>setUpTestData cls# create user <block_start>test_user=User.objects.create_user(username='testuser_task_creator' password='<PASSWORD>')<line_sep># create objects Taskname.objects.create(taskname_name='task_creator_taskname_1')<line_sep>Taskname.objects.create(taskname_name='task_creator_taskname_2')<line_sep>Taskname.objects.create(taskname_name='task_creator_taskname_3')<line_sep>Taskpriority.objects.create(taskpriority_name='taskpriority_1')<line_sep># create object systemstatus_1=Systemstatus.objects.create(systemstatus_name='task_creator_systemstatus_1')<line_sep># create objects System.objects.create(system_name='task_creator_system_1' systemstatus=systemstatus_1 system_created_by_user_id=test_user system_modified_by_user_id=test_user )<line_sep>System.objects.create(system_name='task_creator_system_2' systemstatus=systemstatus_1 system_created_by_user_id=test_user system_modified_by_user_id=test_user )<line_sep>System.objects.create(system_name='task_creator_system_3' systemstatus=systemstatus_1 system_created_by_user_id=test_user system_modified_by_user_id=test_user )<block_end><def_stmt>test_task_creator_not_logged_in self<block_start>"""test creator view"""<line_sep># create url destination='/login/?next='+urllib.parse.quote('/task/creator/' safe='')<line_sep># get response response=self.client.get('/task/creator/' follow=<true>)<line_sep># compare self.assertRedirects(response destination status_code=302 target_status_code=200)<block_end><def_stmt>test_task_creator_logged_in self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get response response=self.client.get('/task/creator/')<line_sep># compare self.assertEqual(response.status_code 200)<block_end><def_stmt>test_task_creator_template self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get response response=self.client.get('/task/creator/')<line_sep># compare self.assertTemplateUsed(response 'dfirtrack_main/task/task_creator.html')<block_end><def_stmt>test_task_creator_get_user_context self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get response response=self.client.get('/task/creator/')<line_sep># compare self.assertEqual(str(response.context['user']) 'testuser_task_creator')<block_end><def_stmt>test_task_creator_redirect self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># create url destination=urllib.parse.quote('/task/creator/' safe='/')<line_sep># get response response=self.client.get('/task/creator' follow=<true>)<line_sep># compare self.assertRedirects(response destination status_code=301 target_status_code=200)<block_end><def_stmt>test_task_creator_post_redirect self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get objects taskname_1=Taskname.objects.get(taskname_name='task_creator_taskname_1')<line_sep>taskpriority_1=Taskpriority.objects.get(taskpriority_name='taskpriority_1')<line_sep>taskstatus_pending=Taskstatus.objects.get(taskstatus_name='10_pending')<line_sep>system_1=System.objects.get(system_name='task_creator_system_1')<line_sep># create post data data_dict={'taskname':[taskname_1.taskname_id ] 'taskpriority':taskpriority_1.taskpriority_id 'taskstatus':taskstatus_pending.taskstatus_id 'system':[system_1.system_id ] }<line_sep># create url destination='/task/'<line_sep># get response response=self.client.post('/task/creator/' data_dict)<line_sep># compare self.assertRedirects(response destination status_code=302 target_status_code=200)<block_end><def_stmt>test_task_creator_post_system_and_tasks self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get objects taskname_1=Taskname.objects.get(taskname_name='task_creator_taskname_1')<line_sep>taskname_2=Taskname.objects.get(taskname_name='task_creator_taskname_2')<line_sep>taskname_3=Taskname.objects.get(taskname_name='task_creator_taskname_3')<line_sep>taskpriority_1=Taskpriority.objects.get(taskpriority_name='taskpriority_1')<line_sep>taskstatus_pending=Taskstatus.objects.get(taskstatus_name='10_pending')<line_sep>system_1=System.objects.get(system_name='task_creator_system_1')<line_sep>system_2=System.objects.get(system_name='task_creator_system_2')<line_sep>system_3=System.objects.get(system_name='task_creator_system_3')<line_sep># create post data data_dict={'taskname':[taskname_1.taskname_id taskname_2.taskname_id] 'taskpriority':taskpriority_1.taskpriority_id 'taskstatus':taskstatus_pending.taskstatus_id 'system':[system_1.system_id system_2.system_id] }<line_sep># get response self.client.post('/task/creator/' data_dict)<line_sep># get object task_1=Task.objects.get(system=system_1 taskname=taskname_1 )<line_sep># compare self.assertTrue(system_1.task_set.filter(taskname=taskname_1).exists())<line_sep>self.assertTrue(system_1.task_set.filter(taskname=taskname_2).exists())<line_sep>self.assertFalse(system_1.task_set.filter(taskname=taskname_3).exists())<line_sep>self.assertTrue(system_2.task_set.filter(taskname=taskname_1).exists())<line_sep>self.assertTrue(system_2.task_set.filter(taskname=taskname_2).exists())<line_sep>self.assertFalse(system_2.task_set.filter(taskname=taskname_3).exists())<line_sep>self.assertFalse(system_3.task_set.filter(taskname=taskname_1).exists())<line_sep>self.assertFalse(system_3.task_set.filter(taskname=taskname_2).exists())<line_sep>self.assertFalse(system_3.task_set.filter(taskname=taskname_3).exists())<line_sep>self.assertEqual(task_1.task_started_time <none>)<line_sep>self.assertEqual(task_1.task_finished_time <none>)<block_end><def_stmt>test_task_creator_post_times_working self<block_start>"""test creator view"""<line_sep># mock timezone.now() dt=datetime(2020 1 2 tzinfo=timezone.utc)<with_stmt>patch.object(timezone 'now' return_value=dt)# login testuser <block_start>self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get objects taskname_started=Taskname.objects.create(taskname_name='task_creator_started_time_working')<line_sep>taskpriority_1=Taskpriority.objects.get(taskpriority_name='taskpriority_1')<line_sep>taskstatus_working=Taskstatus.objects.get(taskstatus_name='20_working')<line_sep>system_1=System.objects.get(system_name='task_creator_system_1')<line_sep># create post data data_dict={'taskname':[taskname_started.taskname_id ] 'taskpriority':taskpriority_1.taskpriority_id 'taskstatus':taskstatus_working.taskstatus_id 'system':[system_1.system_id ] }<line_sep># get response self.client.post('/task/creator/' data_dict)<line_sep># get object task_started=Task.objects.get(system=system_1 taskname=taskname_started )<line_sep># compare self.assertEqual(task_started.task_started_time timezone.now())<line_sep>self.assertEqual(task_started.task_finished_time <none>)<block_end><block_end><def_stmt>test_task_creator_post_times_done self<block_start>"""test creator view"""<line_sep># mock timezone.now() dt=datetime(2020 3 4 tzinfo=timezone.utc)<with_stmt>patch.object(timezone 'now' return_value=dt)# login testuser <block_start>self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get objects taskname_finished=Taskname.objects.create(taskname_name='task_creator_finished_time_working')<line_sep>taskpriority_1=Taskpriority.objects.get(taskpriority_name='taskpriority_1')<line_sep>taskstatus_done=Taskstatus.objects.get(taskstatus_name='30_done')<line_sep>system_1=System.objects.get(system_name='task_creator_system_1')<line_sep># create post data data_dict={'taskname':[taskname_finished.taskname_id ] 'taskpriority':taskpriority_1.taskpriority_id 'taskstatus':taskstatus_done.taskstatus_id 'system':[system_1.system_id ] }<line_sep># get response self.client.post('/task/creator/' data_dict)<line_sep># get object task_finished=Task.objects.get(system=system_1 taskname=taskname_finished )<line_sep># compare self.assertEqual(task_finished.task_started_time timezone.now())<line_sep>self.assertEqual(task_finished.task_finished_time timezone.now())<block_end><block_end><def_stmt>test_task_creator_post_invalid_reload self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># create post data data_dict={}<line_sep># get response response=self.client.post('/task/creator/' data_dict)<line_sep># compare self.assertEqual(response.status_code 200)<block_end><def_stmt>test_task_creator_post_invalid_template self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># create post data data_dict={}<line_sep># get response response=self.client.post('/task/creator/' data_dict)<line_sep># compare self.assertTemplateUsed(response 'dfirtrack_main/task/task_creator.html')<block_end><def_stmt>test_task_creator_post_messages self<block_start>"""test creator view"""<line_sep># login testuser self.client.login(username='testuser_task_creator' password='<PASSWORD>')<line_sep># get objects taskname_1=Taskname.objects.get(taskname_name='task_creator_taskname_1')<line_sep>taskname_2=Taskname.objects.get(taskname_name='task_creator_taskname_2')<line_sep>taskname_3=Taskname.objects.get(taskname_name='task_creator_taskname_3')<line_sep>taskpriority_1=Taskpriority.objects.get(taskpriority_name='taskpriority_1')<line_sep>taskstatus_pending=Taskstatus.objects.get(taskstatus_name='10_pending')<line_sep>system_1=System.objects.get(system_name='task_creator_system_1')<line_sep>system_2=System.objects.get(system_name='task_creator_system_2')<line_sep>system_3=System.objects.get(system_name='task_creator_system_3')<line_sep># create post data data_dict={'taskname':[taskname_1.taskname_id taskname_2.taskname_id taskname_3.taskname_id ] 'taskpriority':taskpriority_1.taskpriority_id 'taskstatus':taskstatus_pending.taskstatus_id 'system':[system_1.system_id system_2.system_id system_3.system_id] }<line_sep># get response response=self.client.post('/task/creator/' data_dict)<line_sep># get messages messages=list(get_messages(response.wsgi_request))<line_sep># compare self.assertEqual(str(messages[0]) 'Task creator started')<line_sep>self.assertEqual(str(messages[1]) '9 tasks created for 3 systems.')<block_end><block_end>
<import_stmt>datetime<import_stmt>uuid<import_from_stmt>typing Mapping Sequence Union<import_stmt>hypothesis.strategies<as>st<import_from_stmt>python_proto SerDe<import_from_stmt>python_proto.api DecrementOnlyIntProp DecrementOnlyUintProp Edge EdgeList GraphDescription IdentifiedGraph IdentifiedNode IdStrategy ImmutableIntProp ImmutableStrProp ImmutableUintProp IncrementOnlyIntProp IncrementOnlyUintProp MergedEdge MergedEdgeList MergedGraph MergedNode NodeDescription NodeProperty Session Static <import_from_stmt>python_proto.common Duration Timestamp Uuid<import_from_stmt>python_proto.metrics Counter Gauge GaugeType Histogram Label MetricWrapper <import_from_stmt>python_proto.pipeline Envelope Metadata RawLog<line_sep># # constants # # These values are used to parametrize the strategies defined below. Some of # them ensure the generated data actually makes sense. Others are used to # ensure strategies perform well. Please don't change these without a Very Good # Reason(TM). UINT64_MIN=0<line_sep>UINT64_MAX=2<power>64-1<line_sep>INT64_MIN=-(2<power>63)+1<line_sep>INT64_MAX=2<power>63-1<line_sep>INT32_MIN=-(2<power>31)+1<line_sep>INT32_MAX=2<power>31-1<line_sep>DURATION_SECONDS_MIN=0<line_sep>DURATION_SECONDS_MAX=UINT64_MAX<line_sep>DURATION_NANOS_MIN=0<line_sep>DURATION_NANOS_MAX=10<power>9-1<line_sep>MAX_LIST_SIZE=5<line_sep>MIN_LOG_EVENT_SIZE=0<line_sep>MAX_LOG_EVENT_SIZE=1024<line_sep># # common # <def_stmt>uuids lsbs:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) msbs:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) <arrow>st.SearchStrategy[Uuid]<block_start><return>st.builds(Uuid lsb=lsbs msb=msbs)<block_end><def_stmt>durations seconds:st.SearchStrategy[int]=st.integers(min_value=DURATION_SECONDS_MIN max_value=DURATION_SECONDS_MAX) nanos:st.SearchStrategy[int]=st.integers(min_value=DURATION_NANOS_MIN max_value=DURATION_NANOS_MAX ) <arrow>st.SearchStrategy[Duration]<block_start><return>st.builds(Duration seconds=seconds nanos=nanos)<block_end><def_stmt>timestamps durations:st.SearchStrategy[Duration]=durations() before_epochs:st.SearchStrategy[bool]=st.booleans() <arrow>st.SearchStrategy[Timestamp]<block_start><return>st.builds(Timestamp duration=durations before_epoch=before_epochs)<block_end># # pipeline # <def_stmt>metadatas trace_ids:st.SearchStrategy[uuid.UUID]=st.uuids() tenant_ids:st.SearchStrategy[uuid.UUID]=st.uuids() event_source_ids:st.SearchStrategy[uuid.UUID]=st.uuids() created_times:st.SearchStrategy[datetime.datetime]=st.datetimes() last_updated_times:st.SearchStrategy[datetime.datetime]=st.datetimes() <arrow>st.SearchStrategy[Metadata]<block_start><return>st.builds(Metadata trace_id=trace_ids tenant_id=tenant_ids event_source_id=event_source_ids created_time=created_times last_updated_time=last_updated_times )<block_end><def_stmt>raw_logs log_events:st.SearchStrategy[bytes]=st.binary(min_size=MIN_LOG_EVENT_SIZE max_size=MAX_LOG_EVENT_SIZE)<arrow>st.SearchStrategy[RawLog]<block_start><return>st.builds(RawLog log_event=log_events)<block_end><def_stmt>envelopes metadatas:st.SearchStrategy[Metadata]=metadatas() inner_messages:st.SearchStrategy[SerDe]=uuids()|timestamps()|durations()|raw_logs() # TODO: add more here as they're implemented <arrow>st.SearchStrategy[Envelope]<block_start><return>st.builds(Envelope metadata=metadatas inner_message=inner_messages )<block_end># # api # <def_stmt>sessions primary_key_properties:st.SearchStrategy[Sequence[str]]=st.lists(st.text() max_size=MAX_LIST_SIZE) primary_key_requires_asset_ids:st.SearchStrategy[bool]=st.booleans() create_times:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) last_seen_times:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) terminate_times:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) <arrow>st.SearchStrategy[Session]<block_start><return>st.builds(Session primary_key_properties=primary_key_properties primary_key_requires_asset_id=primary_key_requires_asset_ids create_time=create_times last_seen_time=last_seen_times terminate_time=terminate_times )<block_end><def_stmt>statics primary_key_properties:st.SearchStrategy[Sequence[str]]=st.lists(st.text() max_size=MAX_LIST_SIZE) primary_key_requires_asset_ids:st.SearchStrategy[bool]=st.booleans() <arrow>st.SearchStrategy[Static]<block_start><return>st.builds(Static primary_key_properties=primary_key_properties primary_key_requires_asset_id=primary_key_requires_asset_ids )<block_end><def_stmt>id_strategies strategies:st.SearchStrategy[Union[Session Static]]=st.one_of(sessions() statics())<arrow>st.SearchStrategy[IdStrategy]<block_start><return>st.builds(IdStrategy strategy=strategies )<block_end><def_stmt>increment_only_uint_props props:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) <arrow>st.SearchStrategy[IncrementOnlyUintProp]<block_start><return>st.builds(IncrementOnlyUintProp prop=props)<block_end><def_stmt>immutable_uint_props props:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) <arrow>st.SearchStrategy[ImmutableUintProp]<block_start><return>st.builds(ImmutableUintProp prop=props)<block_end><def_stmt>decrement_only_uint_props props:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) <arrow>st.SearchStrategy[DecrementOnlyUintProp]<block_start><return>st.builds(DecrementOnlyUintProp prop=props)<block_end><def_stmt>increment_only_int_props props:st.SearchStrategy[int]=st.integers(min_value=INT64_MIN max_value=INT64_MAX) <arrow>st.SearchStrategy[IncrementOnlyIntProp]<block_start><return>st.builds(IncrementOnlyIntProp prop=props)<block_end><def_stmt>immutable_int_props props:st.SearchStrategy[int]=st.integers(min_value=INT64_MIN max_value=INT64_MAX) <arrow>st.SearchStrategy[ImmutableIntProp]<block_start><return>st.builds(ImmutableIntProp prop=props)<block_end><def_stmt>decrement_only_int_props props:st.SearchStrategy[int]=st.integers(min_value=INT64_MIN max_value=INT64_MAX) <arrow>st.SearchStrategy[DecrementOnlyIntProp]<block_start><return>st.builds(DecrementOnlyIntProp prop=props)<block_end><def_stmt>immutable_str_props props:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[ImmutableStrProp]<block_start><return>st.builds(ImmutableStrProp prop=props)<block_end><def_stmt>node_properties properties:st.SearchStrategy[Union[IncrementOnlyUintProp DecrementOnlyUintProp ImmutableUintProp IncrementOnlyIntProp DecrementOnlyIntProp ImmutableIntProp ImmutableStrProp ]]=st.one_of(increment_only_uint_props() decrement_only_uint_props() immutable_uint_props() increment_only_int_props() decrement_only_int_props() immutable_int_props() immutable_str_props() )<arrow>st.SearchStrategy[NodeProperty]<block_start><return>st.builds(NodeProperty property_=properties)<block_end><def_stmt>node_descriptions properties:st.SearchStrategy[Mapping[str NodeProperty]]=st.dictionaries(keys=st.text() values=node_properties()) node_keys:st.SearchStrategy[str]=st.text() node_types:st.SearchStrategy[str]=st.text() id_strategies:st.SearchStrategy[Sequence[IdStrategy]]=st.lists(id_strategies() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[NodeDescription]<block_start><return>st.builds(NodeDescription properties=properties node_key=node_keys node_type=node_types id_strategy=id_strategies )<block_end><def_stmt>identified_nodes properties:st.SearchStrategy[Mapping[str NodeProperty]]=st.dictionaries(keys=st.text() values=node_properties()) node_keys:st.SearchStrategy[str]=st.text() node_types:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[IdentifiedNode]<block_start><return>st.builds(IdentifiedNode properties=properties node_key=node_keys node_type=node_types )<block_end><def_stmt>merged_nodes properties:st.SearchStrategy[Mapping[str NodeProperty]]=st.dictionaries(keys=st.text() values=node_properties()) uids:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) node_keys:st.SearchStrategy[str]=st.text() node_types:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[MergedNode]<block_start><return>st.builds(MergedNode properties=properties uid=uids node_key=node_keys node_type=node_types )<block_end><def_stmt>edges from_node_keys:st.SearchStrategy[str]=st.text() to_node_keys:st.SearchStrategy[str]=st.text() edge_names:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[Edge]<block_start><return>st.builds(Edge from_node_key=from_node_keys to_node_key=to_node_keys edge_name=edge_names )<block_end><def_stmt>edge_lists edges:st.SearchStrategy[Sequence[Edge]]=st.lists(edges() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[EdgeList]<block_start><return>st.builds(EdgeList edges=edges )<block_end><def_stmt>merged_edges from_uids:st.SearchStrategy[str]=st.text() from_node_keys:st.SearchStrategy[str]=st.text() to_uids:st.SearchStrategy[str]=st.text() to_node_keys:st.SearchStrategy[str]=st.text() edge_names:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[MergedEdge]<block_start><return>st.builds(MergedEdge from_uid=from_uids from_node_key=from_node_keys to_uid=to_uids to_node_key=to_node_keys edge_name=edge_names )<block_end><def_stmt>merged_edge_lists edges:st.SearchStrategy[Sequence[MergedEdge]]=st.lists(merged_edges() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[MergedEdgeList]<block_start><return>st.builds(MergedEdgeList edges=edges )<block_end><def_stmt>graph_descriptions nodes:st.SearchStrategy[Mapping[str NodeDescription]]=st.dictionaries(keys=st.text() values=node_descriptions()) edges:st.SearchStrategy[Mapping[str EdgeList]]=st.dictionaries(keys=st.text() values=edge_lists()) <arrow>st.SearchStrategy[GraphDescription]<block_start><return>st.builds(GraphDescription nodes=nodes edges=edges )<block_end><def_stmt>identified_graphs nodes:st.SearchStrategy[Mapping[str IdentifiedNode]]=st.dictionaries(keys=st.text() values=identified_nodes()) edges:st.SearchStrategy[Mapping[str EdgeList]]=st.dictionaries(keys=st.text() values=edge_lists()) <arrow>st.SearchStrategy[IdentifiedGraph]<block_start><return>st.builds(IdentifiedGraph nodes=nodes edges=edges )<block_end><def_stmt>merged_graphs nodes:st.SearchStrategy[Mapping[str MergedNode]]=st.dictionaries(keys=st.text() values=merged_nodes()) edges:st.SearchStrategy[Mapping[str MergedEdgeList]]=st.dictionaries(keys=st.text() values=merged_edge_lists()) <arrow>st.SearchStrategy[MergedGraph]<block_start><return>st.builds(MergedGraph nodes=nodes edges=edges )<block_end># # metrics # <def_stmt>labels keys:st.SearchStrategy[str]=st.text() values:st.SearchStrategy[str]=st.text() <arrow>st.SearchStrategy[Label]<block_start><return>st.builds(Label key=keys value=values)<block_end><def_stmt>counters names:st.SearchStrategy[str]=st.text() increments:st.SearchStrategy[int]=st.integers(min_value=UINT64_MIN max_value=UINT64_MAX) labels:st.SearchStrategy[Sequence[Label]]=st.lists(labels() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[Counter]<block_start><return>st.builds(Counter name=names increment=increments labels=labels)<block_end><def_stmt>gauge_types <arrow>st.SearchStrategy[GaugeType]<block_start><return>st.sampled_from(GaugeType)<block_end><def_stmt>gauges gauge_types:st.SearchStrategy[GaugeType]=gauge_types() names:st.SearchStrategy[str]=st.text() values:st.SearchStrategy[float]=st.floats(allow_nan=<false> allow_infinity=<false>) labels:st.SearchStrategy[Sequence[Label]]=st.lists(labels() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[Gauge]<block_start><return>st.builds(Gauge gauge_type=gauge_types name=names value=values labels=labels)<block_end><def_stmt>histograms names:st.SearchStrategy[str]=st.text() values:st.SearchStrategy[float]=st.floats(allow_nan=<false> allow_infinity=<false>) labels:st.SearchStrategy[Sequence[Label]]=st.lists(labels() max_size=MAX_LIST_SIZE) <arrow>st.SearchStrategy[Histogram]<block_start><return>st.builds(Histogram name=names value=values labels=labels)<block_end><def_stmt>metric_wrappers metrics:st.SearchStrategy[Union[Counter Gauge Histogram]]=st.one_of(counters() gauges() histograms())<arrow>st.SearchStrategy[MetricWrapper]<block_start><return>st.builds(MetricWrapper metric=metrics)<block_end>
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>hypothesis given<import_stmt>hypothesis.strategies<as>st<import_from_stmt>caffe2.python core<import_stmt>caffe2.python.hypothesis_test_util<as>hu<class_stmt>TestConditionalOp(hu.HypothesisTestCase)<block_start>@given(rows_num=st.integers(1 10000) **hu.gcs_cpu_only)<def_stmt>test_conditional self rows_num gc dc<block_start>op=core.CreateOperator("Conditional" ["condition" "data_t" "data_f"] "output")<line_sep>data_t=np.random.random((rows_num 10 20)).astype(np.float32)<line_sep>data_f=np.random.random((rows_num 10 20)).astype(np.float32)<line_sep>condition=np.random.choice(a=[<true> <false>] size=rows_num)<def_stmt>ref condition data_t data_f<block_start>output=[data_t[i]<if>condition[i]<else>data_f[i]<for>i range(rows_num)]<line_sep><return>(output )<block_end>self.assertReferenceChecks(gc op [condition data_t data_f] ref)<block_end><block_end>
<import_from_stmt>chart_studio tools<import_from_stmt>chart_studio.tests.utils PlotlyTestCase<import_stmt>warnings<class_stmt>FileToolsTest(PlotlyTestCase)<block_start><def_stmt>test_set_config_file_all_entries self# Check set_config and get_config return the same values <block_start>domain,streaming_domain,api,sharing=("this" "thing" "that" "private")<line_sep>ssl_verify,proxy_auth,world_readable,auto_open=(<true> <true> <false> <false>)<line_sep>tools.set_config_file(plotly_domain=domain plotly_streaming_domain=streaming_domain plotly_api_domain=api plotly_ssl_verification=ssl_verify plotly_proxy_authorization=proxy_auth world_readable=world_readable auto_open=auto_open )<line_sep>config=tools.get_config_file()<line_sep>self.assertEqual(config["plotly_domain"] domain)<line_sep>self.assertEqual(config["plotly_streaming_domain"] streaming_domain)<line_sep>self.assertEqual(config["plotly_api_domain"] api)<line_sep>self.assertEqual(config["plotly_ssl_verification"] ssl_verify)<line_sep>self.assertEqual(config["plotly_proxy_authorization"] proxy_auth)<line_sep>self.assertEqual(config["world_readable"] world_readable)<line_sep>self.assertEqual(config["sharing"] sharing)<line_sep>self.assertEqual(config["auto_open"] auto_open)<line_sep>tools.reset_config_file()<block_end><def_stmt>test_set_config_file_two_entries self# Check set_config and get_config given only two entries return the # same values <block_start>domain,streaming_domain="this" "thing"<line_sep>tools.set_config_file(plotly_domain=domain plotly_streaming_domain=streaming_domain)<line_sep>config=tools.get_config_file()<line_sep>self.assertEqual(config["plotly_domain"] domain)<line_sep>self.assertEqual(config["plotly_streaming_domain"] streaming_domain)<line_sep>tools.reset_config_file()<block_end><def_stmt>test_set_config_file_world_readable self# Return TypeError when world_readable type is not a bool <block_start>kwargs={"world_readable":"True"}<line_sep>self.assertRaises(TypeError tools.set_config_file **kwargs)<block_end><def_stmt>test_set_config_expected_warning_msg self# Check that UserWarning is being called with http plotly_domain <block_start><with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>warnings.simplefilter("always")<line_sep>kwargs={"plotly_domain":"http://www.foo-bar.com"}<line_sep>tools.set_config_file(**kwargs)<assert_stmt>len(w)<eq>1<assert_stmt>issubclass(w[-1].category UserWarning)<assert_stmt>"plotly_domain"<in>str(w[-1].message)<block_end><block_end><def_stmt>test_set_config_no_warning_msg_if_plotly_domain_is_https self# Check that no UserWarning is being called with https plotly_domain <block_start><with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>warnings.simplefilter("always")<line_sep>kwargs={"plotly_domain":"https://www.foo-bar.com"}<line_sep>tools.set_config_file(**kwargs)<assert_stmt>len(w)<eq>0<block_end><block_end><def_stmt>test_reset_config_file self# Check reset_config and get_config return the same values <block_start>tools.reset_config_file()<line_sep>config=tools.get_config_file()<line_sep>self.assertEqual(config["plotly_domain"] "https://plotly.com")<line_sep>self.assertEqual(config["plotly_streaming_domain"] "stream.plotly.com")<block_end><def_stmt>test_get_credentials_file self# Check get_credentials returns all the keys <block_start>original_creds=tools.get_credentials_file()<line_sep>expected=["username" "stream_ids" "api_key" "proxy_username" "proxy_password" ]<line_sep>self.assertTrue(all(x<in>original_creds<for>x expected))<block_end><def_stmt>test_reset_credentials_file self# Check get_cred return all the keys <block_start>tools.reset_credentials_file()<line_sep>reset_creds=tools.get_credentials_file()<line_sep>expected=["username" "stream_ids" "api_key" "proxy_username" "proxy_password" ]<line_sep>self.assertTrue(all(x<in>reset_creds<for>x expected))<block_end><block_end>
""" The following is adapted from Dask release 2021.03.1: https://github.com/dask/dask/blob/2021.03.1/dask/local.py """<import_stmt>os<import_from_stmt>queue Queue Empty<import_from_stmt>dask config<import_from_stmt>dask.callbacks local_callbacks unpack_callbacks<import_from_stmt>dask.core _execute_task flatten get_dependencies has_tasks reverse_dict <import_from_stmt>dask.order order<if_stmt>os.name<eq>"nt"# Python 3 windows Queue.get doesn't handle interrupts properly. To # workaround this we poll at a sufficiently large interval that it # shouldn't affect performance, but small enough that users trying to kill # an application shouldn't care. <block_start><def_stmt>queue_get q<block_start><while_stmt><true><block_start><try_stmt><block_start><return>q.get(block=<true> timeout=0.1)<block_end><except_stmt>Empty<block_start><pass><block_end><block_end><block_end><block_end><else_stmt><block_start><def_stmt>queue_get q<block_start><return>q.get()<block_end><block_end><def_stmt>start_state_from_dask dsk cache=<none> sortkey=<none><block_start>"""Start state from a dask Examples -------- >>> dsk = { 'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')} # doctest: +SKIP >>> from pprint import pprint # doctest: +SKIP >>> pprint(start_state_from_dask(dsk)) # doctest: +SKIP {'cache': {'x': 1, 'y': 2}, 'dependencies': {'w': {'z', 'y'}, 'x': set(), 'y': set(), 'z': {'x'}}, 'dependents': {'w': set(), 'x': {'z'}, 'y': {'w'}, 'z': {'w'}}, 'finished': set(), 'ready': ['z'], 'released': set(), 'running': set(), 'waiting': {'w': {'z'}}, 'waiting_data': {'x': {'z'}, 'y': {'w'}, 'z': {'w'}}} """<if_stmt>sortkey<is><none><block_start>sortkey=order(dsk).get<block_end><if_stmt>cache<is><none><block_start>cache=config.get("cache" <none>)<block_end><if_stmt>cache<is><none><block_start>cache=dict()<block_end>data_keys=set()<for_stmt>k,v dsk.items()<block_start><if_stmt><not>has_tasks(dsk v)<block_start>cache[k]=v<line_sep>data_keys.add(k)<block_end><block_end>dsk2=dsk.copy()<line_sep>dsk2.update(cache)<line_sep>dependencies={k:get_dependencies(dsk2 k)<for>k dsk}<line_sep>waiting={k:v.copy()<for>k,v dependencies.items()<if>k<not><in>data_keys}<line_sep>dependents=reverse_dict(dependencies)<for_stmt>a cache<block_start><for_stmt>b dependents.get(a ())<block_start>waiting[b].remove(a)<block_end><block_end>waiting_data={k:v.copy()<for>k,v dependents.items()<if>v}<line_sep>ready_set={k<for>k,v waiting.items()<if><not>v}<line_sep>ready=sorted(ready_set key=sortkey reverse=<true>)<line_sep>waiting={k:v<for>k,v waiting.items()<if>v}<line_sep>state={"dependencies":dependencies "dependents":dependents "waiting":waiting "waiting_data":waiting_data "cache":cache "ready":ready "running":set() "finished":set() "released":set() }<line_sep><return>state<block_end><def_stmt>execute_task key task_info dumps loads get_id pack_exception<block_start>""" Compute task and handle all administration See Also -------- _execute_task : actually execute task """<try_stmt><block_start>task,data=loads(task_info)<line_sep>result=_execute_task(task data)<line_sep>id=get_id()<line_sep>result=dumps((result id))<line_sep>failed=<false><block_end><except_stmt>BaseException<as>e<block_start>result=pack_exception(e dumps)<line_sep>failed=<true><block_end><return>key result failed<block_end><def_stmt>release_data key state delete=<true><block_start>"""Remove data from temporary storage See Also -------- finish_task """<if_stmt>key<in>state["waiting_data"]<block_start><assert_stmt><not>state["waiting_data"][key]<del_stmt>state["waiting_data"][key]<block_end>state["released"].add(key)<if_stmt>delete<block_start><del_stmt>state["cache"][key]<block_end><block_end>DEBUG=<false><def_stmt>finish_task dsk key state results sortkey delete=<true> release_data=release_data<block_start>""" Update execution state after a task finishes Mutates. This should run atomically (with a lock). """<for_stmt>dep sorted(state["dependents"][key] key=sortkey reverse=<true>)<block_start>s=state["waiting"][dep]<line_sep>s.remove(key)<if_stmt><not>s<block_start><del_stmt>state["waiting"][dep]<line_sep>state["ready"].append(dep)<block_end><block_end><for_stmt>dep state["dependencies"][key]<block_start><if_stmt>dep<in>state["waiting_data"]<block_start>s=state["waiting_data"][dep]<line_sep>s.remove(key)<if_stmt><not>s<and>dep<not><in>results<block_start><if_stmt>DEBUG<block_start><import_from_stmt>chest.core nbytes<line_sep>print("Key: %s\tDep: %s\t NBytes: %.2f\t Release"%(key dep sum(map(nbytes state["cache"].values())/1e6)))<block_end>release_data(dep state delete=delete)<block_end><block_end><elif_stmt>delete<and>dep<not><in>results<block_start>release_data(dep state delete=delete)<block_end><block_end>state["finished"].add(key)<line_sep>state["running"].remove(key)<line_sep><return>state<block_end><def_stmt>nested_get ind coll<block_start>"""Get nested index from collection Examples -------- >>> nested_get(1, 'abc') 'b' >>> nested_get([1, 0], 'abc') ('b', 'a') >>> nested_get([[1, 0], [0, 1]], 'abc') (('b', 'a'), ('a', 'b')) """<if_stmt>isinstance(ind list)<block_start><return>tuple(nested_get(i coll)<for>i ind)<block_end><else_stmt><block_start><return>coll[ind]<block_end><block_end><def_stmt>default_get_id <block_start>"""Default get_id"""<line_sep><return><none><block_end><def_stmt>default_pack_exception e dumps<block_start><raise><block_end><def_stmt>reraise exc tb=<none><block_start><if_stmt>exc.__traceback__<is><not>tb<block_start><raise>exc.with_traceback(tb)<block_end><raise>exc<block_end><def_stmt>identity x<block_start>"""Identity function. Returns x. >>> identity(3) 3 """<line_sep><return>x<block_end><def_stmt>get_async apply_async num_workers dsk result cache=<none> get_id=default_get_id rerun_exceptions_locally=<none> pack_exception=default_pack_exception raise_exception=reraise callbacks=<none> dumps=identity loads=identity **kwargs<block_start>"""Asynchronous get function This is a general version of various asynchronous schedulers for dask. It takes a an apply_async function as found on Pool objects to form a more specific ``get`` method that walks through the dask array with parallel workers, avoiding repeat computation and minimizing memory use. Parameters ---------- apply_async : function Asynchronous apply function as found on Pool or ThreadPool num_workers : int The number of active tasks we should have at any one time dsk : dict A dask dictionary specifying a workflow result : key or list of keys Keys corresponding to desired data cache : dict-like, optional Temporary storage of results get_id : callable, optional Function to return the worker id, takes no arguments. Examples are `threading.current_thread` and `multiprocessing.current_process`. rerun_exceptions_locally : bool, optional Whether to rerun failing tasks in local process to enable debugging (False by default) pack_exception : callable, optional Function to take an exception and ``dumps`` method, and return a serialized tuple of ``(exception, traceback)`` to send back to the scheduler. Default is to just raise the exception. raise_exception : callable, optional Function that takes an exception and a traceback, and raises an error. dumps: callable, optional Function to serialize task data and results to communicate between worker and parent. Defaults to identity. loads: callable, optional Inverse function of `dumps`. Defaults to identity. callbacks : tuple or list of tuples, optional Callbacks are passed in as tuples of length 5. Multiple sets of callbacks may be passed in as a list of tuples. For more information, see the dask.diagnostics documentation. See Also -------- threaded.get """<line_sep>queue=Queue()<if_stmt>isinstance(result list)<block_start>result_flat=set(flatten(result))<block_end><else_stmt><block_start>result_flat={result}<block_end>results=set(result_flat)<line_sep>dsk=dict(dsk)<with_stmt>local_callbacks(callbacks)<as>callbacks<block_start>_,_,pretask_cbs,posttask_cbs,_=unpack_callbacks(callbacks)<line_sep>started_cbs=[]<line_sep>succeeded=<false><line_sep># if start_state_from_dask fails, we will have something # to pass to the final block. state={}<try_stmt><block_start><for_stmt>cb callbacks<block_start><if_stmt>cb[0]<block_start>cb[0](dsk)<block_end>started_cbs.append(cb)<block_end>keyorder=order(dsk)<line_sep>state=start_state_from_dask(dsk cache=cache sortkey=keyorder.get)<for_stmt>_,start_state,_,_,_ callbacks<block_start><if_stmt>start_state<block_start>start_state(dsk state)<block_end><block_end><if_stmt>rerun_exceptions_locally<is><none><block_start>rerun_exceptions_locally=config.get("rerun_exceptions_locally" <false>)<block_end><if_stmt>state["waiting"]<and><not>state["ready"]<block_start><raise>ValueError("Found no accessible jobs in dask")<block_end><def_stmt>fire_task <block_start>""" Fire off a task to the thread pool """<line_sep># Choose a good task to compute key=state["ready"].pop()<line_sep>state["running"].add(key)<for_stmt>f pretask_cbs<block_start>f(key dsk state)<block_end># Prep data to send data={dep:state["cache"][dep]<for>dep get_dependencies(dsk key)}<line_sep># Submit apply_async(execute_task args=(key dumps((dsk[key] data)) dumps loads get_id pack_exception ) callback=queue.put )<block_end># Seed initial tasks into the thread pool <while_stmt>state["ready"]<and>len(state["running"])<l>num_workers<block_start>fire_task()<block_end># Main loop, wait on tasks to finish, insert new ones <while_stmt>state["waiting"]<or>state["ready"]<or>state["running"]<block_start>key,res_info,failed=queue_get(queue)<if_stmt>failed<block_start>exc,tb=loads(res_info)<if_stmt>rerun_exceptions_locally<block_start>data={dep:state["cache"][dep]<for>dep get_dependencies(dsk key)}<line_sep>task=dsk[key]<line_sep>_execute_task(task data)# Re-execute locally <block_end><else_stmt><block_start>raise_exception(exc tb)<block_end><block_end>res,worker_id=loads(res_info)<line_sep>state["cache"][key]=res<line_sep>finish_task(dsk key state results keyorder.get)<for_stmt>f posttask_cbs<block_start>f(key res dsk state worker_id)<block_end><while_stmt>state["ready"]<and>len(state["running"])<l>num_workers<block_start>fire_task()<block_end><block_end>succeeded=<true><block_end><finally_stmt><block_start><for_stmt>_,_,_,_,finish started_cbs<block_start><if_stmt>finish<block_start>finish(dsk state <not>succeeded)<block_end><block_end><block_end><block_end><return>nested_get(result state["cache"])<block_end><def_stmt>apply_sync func args=() kwds=<none> callback=<none><block_start>""" A naive synchronous version of apply_async """<if_stmt>kwds<is><none><block_start>kwds={}<block_end>res=func(*args **kwds)<if_stmt>callback<is><not><none><block_start>callback(res)<block_end><block_end>
# Copyright (c) 2016 <NAME> <<EMAIL>> # Copyright (c) 2016 <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>copy<import_stmt>six<import_from_stmt>six.moves range<import_from_stmt>pytanque symbol imm Vector Matrix simplify simplify_inplace expand_esf_inplace subs_vectors subs_exprs subs_exprs_inplace analyses esf_vector esf expand_esf or_to_esf_inplace Expr<def_stmt>get_vector_from_cst nbits n<block_start>vec=Vector(nbits)<line_sep>vec.set_int_be(n nbits)<line_sep><return>vec<block_end><def_stmt>get_int nbits v<block_start><return>v.get_int_be()<block_end><def_stmt>popcount n<block_start>ret=0<while_stmt>n<g>0<block_start><if_stmt>(n&1)<eq>1<block_start>ret<augadd>1<block_end>n<augrshift>1<block_end><return>ret<block_end><def_stmt>next_zero_bit v<block_start>v=~v<line_sep>v=(v^(v-1))<rshift>1<line_sep><return>popcount(v)<block_end><def_stmt>evaluate_expr E nbits map_# keys of map_ can be mba variables or symbols # => an mba variable must map to an integer or an mba variable # => a symbol must map to an expression <block_start>keys=[]<line_sep>values=[]<for_stmt>k,v six.iteritems(map_)# TOFIX: not a clean test <block_start><if_stmt>hasattr(k "vec")<block_start>keys.extend(k.vec)<if_stmt>isinstance(v six.integer_types)<block_start>values.extend(imm((v<rshift>i)&1)<for>i range(k.nbits))<line_sep><continue><block_end><if_stmt>hasattr(v "vec")<block_start>v=v.vec<block_end><if_stmt>isinstance(v Vector)<block_start><assert_stmt>(len(v)<eq>len(k.vec))<line_sep>values.extend(v)<line_sep><continue><block_end><raise>ValueError("an MBAVariable must map to an integer value or an MBAVariable!")<block_end><elif_stmt>isinstance(k Expr)<block_start><if_stmt><not>k.is_sym()<block_start><raise>ValueError("only symbols or MBAVariable can be a key")<block_end><if_stmt><not>isinstance(v Expr)<block_start><raise>ValueError("a symbol can only be mapped to an expression")<block_end>keys.append(k)<line_sep>values.append(v)<block_end><block_end>E=expand_esf(E)<line_sep>simplify_inplace(E)<line_sep>subs_exprs_inplace(E keys values)<line_sep>simplify_inplace(E)<try_stmt><block_start><return>E.get_int_be()<block_end><except_stmt>RuntimeError<block_start><return>E<block_end><block_end><def_stmt>test_N nbits X n<block_start>ret=imm(1)<for_stmt>i range(nbits)<block_start><if_stmt>((n<rshift>i)&1)<eq>1<block_start>ret<augmul>X[i]<block_end><else_stmt><block_start>ret<augmul>X[i]+imm(1)<block_end><block_end>simplify_inplace(ret)<line_sep><return>ret<block_end><class_stmt>MBAImpl(object)<block_start><def_stmt>__init__ self nbits<block_start>self.__set_nbits(nbits)<line_sep>self.gen_x=Vector(nbits)<line_sep>self.use_esf=<false><line_sep>self.use_opt_mba=<true><for_stmt>i range(0 nbits)<block_start>self.gen_x[i]=symbol("__gen_X_%d"%i)<block_end><block_end><def_stmt>__set_nbits self nbits<block_start>self.nbits=nbits<line_sep>self.max_uint=(1<lshift>nbits)-1<block_end><def_stmt>var_symbols self name<block_start>symbols=[symbol("%s%d"%(name i))<for>i range(0 self.nbits)]<line_sep>M=Vector(self.nbits)<for_stmt>i range(0 self.nbits)<block_start>M[i]=symbols[i]<block_end><return>M<block_end><def_stmt>get_vector_from_cst self n<block_start><return>get_vector_from_cst(self.nbits n)<block_end><def_stmt>get_int self v<block_start><return>get_int(self.nbits v)<block_end><def_stmt>identity self<block_start><return>Matrix.identity(self.nbits)<block_end><def_stmt>cst_matrix self cst<block_start><return>Matrix(self.nbits self.nbits <lambda>i j:cst)<block_end><def_stmt>null_matrix self<block_start><return>Matrix(self.nbits self.nbits)<block_end># def iadd_Y(self, X, Y): # carry = imm(0) # for i in range(0, self.nbits): # Xi = X[i] # mul_XY = simplify_inplace(Xi*Y[i]) # Xi += Y[i] # simplify_inplace(Xi) # carry_new = simplify_inplace(mul_XY + (carry * Xi)) # Xi += carry # simplify_inplace(Xi) # carry = carry_new <def_stmt>iadd_Y self X Y<block_start>carry=imm(0)<line_sep>ret=Vector(self.nbits)<if_stmt>self.use_esf<block_start><for_stmt>i range(0 self.nbits)<block_start>new_carry=esf(2 [X[i] Y[i] carry])<line_sep>X[i]<augadd>simplify_inplace(Y[i]+carry)<line_sep>carry=new_carry<block_end><block_end><else_stmt><block_start><for_stmt>i range(0 self.nbits)<block_start>sum_XY=simplify_inplace(X[i]+Y[i])<line_sep>new_carry=simplify_inplace(X[i]<times>Y[i]+(carry<times>sum_XY))<line_sep>X[i]=sum_XY+carry<line_sep>carry=new_carry<block_end><block_end><return>ret<block_end><def_stmt>add_Y self X Y<block_start>carry=imm(0)<line_sep>ret=Vector(self.nbits)<if_stmt>self.use_esf<block_start><for_stmt>i range(0 self.nbits)<block_start>ret[i]=simplify_inplace(X[i]+Y[i]+carry)<line_sep>carry=esf(2 [X[i] Y[i] carry])<block_end><block_end><else_stmt><block_start><for_stmt>i range(0 self.nbits)<block_start>sum_XY=simplify_inplace(X[i]+Y[i])<line_sep>ret[i]=simplify_inplace(sum_XY+carry)<line_sep>carry=simplify_inplace(X[i]<times>Y[i]+(carry<times>sum_XY))<block_end><block_end><return>ret<block_end><def_stmt>add_n self X n<block_start>n=n&self.max_uint<if_stmt>self.use_esf<or><not>self.use_opt_mba<block_start><return>self.add_Y(X self.get_vector_from_cst(n))<block_end><else_stmt><block_start><return>self.add_n_mba(X n)<block_end><block_end><def_stmt>add_n_mba self X n<block_start>null=Vector(self.nbits)<line_sep>n=self.get_vector_from_cst(n)<while_stmt>(n<ne>null)<block_start>new_X=simplify_inplace(self.xor_Y(X n))<line_sep>n=simplify_inplace(self.and_Y(self.lshift_n(X 1) self.lshift_n(n 1)))<line_sep>X=new_X<block_end><return>(X)<block_end><def_stmt>sub_n_mba self X n<block_start>null=Vector(self.nbits)<line_sep>n=self.get_vector_from_cst(n)<while_stmt>(n<ne>null)<block_start>X=simplify_inplace(self.xor_Y(X n))<line_sep>n=simplify_inplace(self.and_Y(self.lshift_n(X 1) self.lshift_n(n 1)))<block_end><return>(X)<block_end><def_stmt>iadd_n self X n<block_start>n=n&self.max_uint<if_stmt>self.use_esf<block_start><return>self.iadd_Y(X self.get_vector_from_cst(n))<block_end><return>self.iadd_n_mba(X n)<block_end><def_stmt>iadd_n_mba self X n<block_start>null=Vector(self.nbits)<line_sep>n=self.get_vector_from_cst(n)<while_stmt>(n<ne>null)<block_start>carry=simplify_inplace(self.and_Y(X n))<line_sep>self.ixor_Y(X n)<line_sep>simplify_inplace(X)<line_sep>n=self.lshift_n(carry 1)<block_end><return>X<block_end><def_stmt>iadd_lshifted_Y self X Y offset<block_start><if_stmt>self.use_esf<block_start>self.iadd_Y(X self.lshift_n(Y offset))<line_sep>simplify_inplace(X)<line_sep><return><block_end>carry=imm(0)<for_stmt>i range(0 self.nbits)<block_start><if_stmt>i<l>offset<block_start>Yi=imm(0)<block_end><else_stmt><block_start>Yi=Y[i-offset]<block_end>Xi=X[i]<line_sep>mul_XY=simplify_inplace(Xi<times>Yi)<line_sep>Xi<augadd>Yi<line_sep>simplify_inplace(Xi)<line_sep>carry_new=simplify_inplace(mul_XY+(carry<times>Xi))<line_sep>Xi<augadd>carry<line_sep>simplify_inplace(Xi)<line_sep>carry=carry_new<block_end><block_end><def_stmt>sub_Y self X Y<block_start>carry=imm(0)<line_sep>ret=Vector(self.nbits)<if_stmt>self.use_esf<block_start><for_stmt>i range(0 self.nbits)<block_start>ret[i]=simplify_inplace(X[i]+Y[i]+carry)<line_sep>carry=esf(2 [X[i]+imm(1) Y[i] carry])<block_end><block_end><else_stmt><block_start><for_stmt>i range(0 self.nbits)<block_start>sum_XY=simplify_inplace(X[i]+Y[i])<line_sep>ret[i]=simplify_inplace(sum_XY+carry)<line_sep>carry=simplify_inplace((X[i]+imm(1))<times>Y[i]+(carry<times>(sum_XY+imm(1))))<block_end><block_end><return>ret<block_end><def_stmt>sub_n self X n<block_start>n=n&self.max_uint<line_sep><return>self.sub_Y(X self.get_vector_from_cst(n))<block_end><def_stmt>mul_Y self X Y<block_start>ret=Vector(self.nbits)<line_sep>i=0<for_stmt>i range(0 self.nbits)<block_start>Yi_vec=Vector(self.nbits Y[i])<line_sep>self.iadd_Y(ret self.lshift_n(X i)<times>Yi_vec)<block_end><return>ret<block_end><def_stmt>mul_n_org self X n<block_start>n=n&self.max_uint<line_sep>ret=Vector(self.nbits)<line_sep>i=0<while_stmt>n<g>0<block_start><if_stmt>(n&1)<eq>1<block_start>self.iadd_lshifted_Y(ret X i)<block_end>n<augrshift>1<line_sep>i<augadd>1<block_end><return>ret<block_end><def_stmt>mul_n self X n<block_start><if_stmt>(n<eq>1)<block_start><return>X<block_end>ret=Vector(self.nbits)<if_stmt>(n<eq>0)<block_start><return>ret<block_end>n=n&self.max_uint<line_sep>i=0<line_sep>final_sum=0<line_sep>not_x=<none><def_stmt>compute_not_x not_x<block_start><if_stmt>not_x<is><none><block_start>not_x=self.not_X(X)<block_end><return>not_x<block_end><while_stmt>n<g>0# Optimisations from the Hacker's delight <block_start>nz=next_zero_bit(n)<if_stmt>(nz<ge>3)<block_start>not_x=compute_not_x(not_x)<line_sep>self.iadd_lshifted_Y(ret X nz+i)<line_sep>self.iadd_lshifted_Y(ret not_x i)<line_sep>final_sum<augadd>1<lshift>i<line_sep>n<augrshift>nz<line_sep>i<augadd>nz<block_end><else_stmt><block_start>bits4=n&0b1111<if_stmt>bits4<eq>0b1011<block_start>not_x=compute_not_x(not_x)<line_sep>self.iadd_lshifted_Y(ret X 4+i)<line_sep>self.iadd_lshifted_Y(ret not_x 2+i)<line_sep>self.iadd_lshifted_Y(ret not_x i)<line_sep>final_sum<augadd>1<lshift>(i+2)<line_sep>final_sum<augadd>1<lshift>i<line_sep>n<augrshift>4<line_sep>i<augadd>4<block_end><elif_stmt>bits4<eq>0b1101<block_start>not_x=compute_not_x(not_x)<line_sep>self.iadd_lshifted_Y(ret X 4+i)<line_sep>self.iadd_lshifted_Y(ret not_x 1+i)<line_sep>self.iadd_lshifted_Y(ret not_x i)<line_sep>final_sum<augadd>1<lshift>(i+1)<line_sep>final_sum<augadd>1<lshift>i<line_sep>n<augrshift>4<line_sep>i<augadd>4<block_end><else_stmt><block_start><if_stmt>(n&1)<eq>1<block_start>self.iadd_lshifted_Y(ret X i)<block_end>n<augrshift>1<line_sep>i<augadd>1<block_end><block_end><block_end><if_stmt>final_sum<g>0<block_start>self.iadd_n(ret final_sum&self.max_uint)<block_end><return>ret<block_end><def_stmt>div_n self X n<block_start>ret=Vector(self.nbits<times>2+1)<for_stmt>i range(self.nbits)<block_start>ret[i]=X[i]<block_end>nc=(2<power>self.nbits/n)<times>n-1<for_stmt>p range(self.nbits 2<times>self.nbits+1)<block_start><if_stmt>(2<power>p<g>nc<times>(n-1-((2<power>p-1)%n)))<block_start><break><block_end><block_end><else_stmt><block_start><raise>RuntimeError("division: unable to find the shifting count")<block_end>m=(2<power>p+n-1-((2<power>p-1)%n))<floordiv>n<line_sep>self.__set_nbits(2<times>self.nbits+1)<line_sep>ret=self.mul_n(ret m)<line_sep>ret=self.rshift_n(ret p)<line_sep>self.__set_nbits((self.nbits-1)<floordiv>2)<line_sep>final_ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start>final_ret[i]=ret[i]<block_end><return>final_ret<block_end><def_stmt>phi_X self X<block_start><def_stmt>f i j<block_start><if_stmt>i<ne>j<block_start><return>imm(0)<block_end><return>X[i]<block_end><return>Matrix(self.nbits self.nbits f)<block_end><def_stmt>and_Y self X Y<block_start><return>X<times>Y<line_sep>#return self.phi_X(Y)*X <block_end><def_stmt>and_n self X n<block_start><if_stmt>n<l>0<block_start>n=n&self.max_uint<block_end>ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start><if_stmt>n&(1<lshift>i)<block_start>ret[i]=X[i]<block_end><block_end><return>ret<line_sep>#return self.phi_X(self.get_vector_from_cst(n))*X <block_end><def_stmt>and_exp self X e<block_start><return>X<times>e<block_end><def_stmt>not_X self X<block_start><return>X+self.get_vector_from_cst(self.max_uint)<block_end><def_stmt>xor_n self X n<block_start><if_stmt>n<l>0<block_start>n=n%self<block_end>ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start><if_stmt>n&(1<lshift>i)<block_start>ret[i]=X[i]+imm(1)<block_end><else_stmt><block_start>ret[i]=X[i]<block_end><block_end><return>ret<block_end><def_stmt>xor_exp self X e<block_start><return>X+e<block_end><def_stmt>xor_Y self X Y<block_start><return>X+Y<block_end><def_stmt>ixor_Y self X Y<block_start>X<augadd>Y<block_end><def_stmt>ixor_exp self X e<block_start>X<augadd>e<block_end><def_stmt>oppose_X self X<block_start><return>self.add_n(self.not_X(X) 1)<block_end><def_stmt>notand_n self X n<block_start><return>self.not_X(self.and_n(X n))<block_end><def_stmt>notand_Y self X Y<block_start><return>self.not_X(self.and_Y(X Y))<block_end><def_stmt>notand_exp self X e<block_start><return>self.not_exp(self.and_exp(X e))<block_end><def_stmt>or_Y self X Y<block_start><if_stmt>self.use_esf<block_start><return>esf_vector(2 [X Y])+esf_vector(1 [X Y])<block_end><else_stmt><block_start><return>self.xor_Y(self.and_Y(X Y) self.xor_Y(X Y))<block_end><block_end><def_stmt>or_exp self X e<block_start><if_stmt>self.use_esf<block_start>E=Vector(self.nbits e)<line_sep><return>self.or_Y(X E)<block_end><else_stmt><block_start><return>self.xor_exp(self.and_exp(X e) self.xor_exp(X e))<block_end><block_end><def_stmt>or_n self X n<block_start>ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start><if_stmt>n&(1<lshift>i)<block_start>ret[i]=imm(1)<block_end><else_stmt><block_start>ret[i]=X[i]<block_end><block_end><return>ret<block_end><def_stmt>lshift_n self X n<block_start><return>X<rshift>n<block_end><def_stmt>rshift_n self X n<block_start><return>X<lshift>n<block_end><def_stmt>arshift_n self X n<block_start>n=min(n self.nbits)<line_sep>ret=X<lshift>n<line_sep>last_bit=X[self.nbits-1]<for_stmt>i range(self.nbits-n self.nbits)<block_start>ret[i]=last_bit<block_end><return>ret<block_end><def_stmt>rshift_Y self X Y# Generate 2**Y and multiply X by this <block_start>fds<line_sep><pass><block_end><def_stmt>rol_n self X n# rol(0b(d b c a), 1) = 0b(b c a d) # rol(vec(a,b,c,d), 1) = vec(d,a,c,b)) <block_start>ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start>ret[i]=X[(i-n)%self.nbits]<block_end><return>ret<block_end><def_stmt>ror_n self X n<block_start>ret=Vector(self.nbits)<for_stmt>i range(self.nbits)<block_start>ret[i]=X[(i+n)%self.nbits]<block_end><return>ret<block_end><def_stmt>evaluate self E values<block_start><return>evaluate_expr(E self.nbits values)<block_end><def_stmt>vectorial_decomp self symbols X<block_start><return>analyses.vectorial_decomp(symbols X)<block_end><def_stmt>permut2expr self P X<block_start>ret=Vector(self.nbits)<line_sep>v0=P[0]<line_sep>nbits_in=(len(P)-1).bit_length()<for_stmt>k,v enumerate(P[1:])<block_start>v<augxor>v0<if_stmt>v<eq>0<block_start><continue><block_end>k<augadd>1<line_sep>test=test_N(nbits_in X k)<for_stmt>i range(self.nbits)<block_start><if_stmt>((v<rshift>i)&1)<eq>1<block_start>ret[i]<augadd>test<block_end><block_end><block_end><for_stmt>i range(self.nbits)<block_start>ret[i]<augadd>imm((v0<rshift>i)&1)<block_end>simplify_inplace(ret)<line_sep><return>ret<block_end><def_stmt>symbpermut2expr self P X<block_start>ret=Vector(self.nbits)<line_sep>nbits_in=(len(P)-1).bit_length()<for_stmt>k,v enumerate(P)<block_start>test=test_N(nbits_in X k)<for_stmt>i range(self.nbits)<block_start>ret[i]<augadd>v[i]<times>test<block_end><block_end>simplify_inplace(ret)<line_sep><return>ret<block_end><def_stmt>add_n_matrix self n<block_start><def_stmt>matrix_v i j<block_start><if_stmt>i<eq>j<block_start><return>imm(1)<block_end><if_stmt>i<l>j<block_start><return>imm(0)<block_end><if_stmt>i<g>j<block_start>mask=(~((1<lshift>(j))-1))&self.max_uint<line_sep>mask2=((1<lshift>(i))-1)&self.max_uint<line_sep>mask<augand>mask2<line_sep><return>imm((n&mask)<eq>mask)<block_end><block_end><return>Matrix(self.nbits self.nbits matrix_v)<block_end><def_stmt>from_bytes self s<block_start>ret=Vector(self.nbits)<for_stmt>i,c enumerate(six.iterbytes(s))<block_start><for_stmt>j range(8)<block_start>ret[i<times>8+j]=imm((c<rshift>j)&1)<block_end><block_end><return>ret<block_end><def_stmt>to_bytes self vec<block_start>l=(self.nbits+7)<floordiv>8<line_sep>ret=bytearray(l)<for_stmt>i,b enumerate(vec)<block_start><if_stmt><not>b.is_imm()<block_start><raise>ValueError("variable does not contain only immediates!")<block_end>b=b.imm_value()<if_stmt>b<block_start>bit_idx=i&7<line_sep>byte_idx=i<rshift>3<line_sep>ret[byte_idx]<augor>(b<lshift>bit_idx)<block_end><block_end><return>bytes(ret)<block_end><block_end>
<import_stmt>ast<import_from_stmt>typing ClassVar<import_from_stmt>typing_extensions final<import_from_stmt>wemake_python_styleguide.logic.nodes get_parent<import_from_stmt>wemake_python_styleguide.types AnyNodes<import_from_stmt>wemake_python_styleguide.violations.consistency IterableUnpackingViolation <import_from_stmt>wemake_python_styleguide.visitors base<line_sep>@final<class_stmt>IterableUnpackingVisitor(base.BaseNodeVisitor)<block_start>"""Checks iterables unpacking."""<line_sep>_unpackable_iterable_parent_types:ClassVar[AnyNodes]=(ast.List ast.Set ast.Tuple )<def_stmt>visit_Starred self node:ast.Starred<arrow><none><block_start>"""Checks iterable's unpacking."""<line_sep>self._check_unnecessary_iterable_unpacking(node)<line_sep>self.generic_visit(node)<block_end><def_stmt>_check_unnecessary_iterable_unpacking self node:ast.Starred<arrow><none><block_start>parent=get_parent(node)<if_stmt>isinstance(parent self._unpackable_iterable_parent_types)<block_start><if_stmt>len(getattr(parent 'elts' []))<eq>1<block_start>self.add_violation(IterableUnpackingViolation(node))<block_end><block_end><block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>pygeos<import_from_stmt>pygeos.testing assert_geometries_equal<import_from_stmt>.common empty_line_string empty_point line_string linear_ring multi_line_string multi_point multi_polygon point polygon <def_stmt>test_line_interpolate_point_geom_array <block_start>actual=pygeos.line_interpolate_point([line_string linear_ring multi_line_string] -1)<line_sep>assert_geometries_equal(actual[0] pygeos.Geometry("POINT (1 0)"))<line_sep>assert_geometries_equal(actual[1] pygeos.Geometry("POINT (0 1)"))<line_sep>assert_geometries_equal(actual[2] pygeos.Geometry("POINT (0.5528 1.1056)") tolerance=0.001)<block_end><def_stmt>test_line_interpolate_point_geom_array_normalized <block_start>actual=pygeos.line_interpolate_point([line_string linear_ring multi_line_string] 1 normalized=<true>)<line_sep>assert_geometries_equal(actual[0] pygeos.Geometry("POINT (1 1)"))<line_sep>assert_geometries_equal(actual[1] pygeos.Geometry("POINT (0 0)"))<line_sep>assert_geometries_equal(actual[2] pygeos.Geometry("POINT (1 2)"))<block_end><def_stmt>test_line_interpolate_point_float_array <block_start>actual=pygeos.line_interpolate_point(line_string [0.2 1.5 -0.2])<line_sep>assert_geometries_equal(actual[0] pygeos.Geometry("POINT (0.2 0)"))<line_sep>assert_geometries_equal(actual[1] pygeos.Geometry("POINT (1 0.5)"))<line_sep>assert_geometries_equal(actual[2] pygeos.Geometry("POINT (1 0.8)"))<block_end>@pytest.mark.parametrize("normalized" [<false> <true>])@pytest.mark.parametrize("geom" [pygeos.Geometry("LINESTRING EMPTY") pygeos.Geometry("LINEARRING EMPTY") pygeos.Geometry("MULTILINESTRING EMPTY") pygeos.Geometry("MULTILINESTRING (EMPTY, (0 0, 1 1))") pygeos.Geometry("GEOMETRYCOLLECTION EMPTY") pygeos.Geometry("GEOMETRYCOLLECTION (LINESTRING EMPTY, POINT (1 1))") ] )<def_stmt>test_line_interpolate_point_empty geom normalized# These geometries segfault in some versions of GEOS (in 3.8.0, still # some of them segfault). Instead, we patched this to return POINT EMPTY. # This matches GEOS 3.8.0 behavior on simple empty geometries. <block_start>assert_geometries_equal(pygeos.line_interpolate_point(geom 0.2 normalized=normalized) empty_point)<block_end>@pytest.mark.parametrize("normalized" [<false> <true>])@pytest.mark.parametrize("geom" [empty_point point polygon multi_point multi_polygon pygeos.geometrycollections([point]) pygeos.geometrycollections([polygon]) pygeos.geometrycollections([multi_line_string]) pygeos.geometrycollections([multi_point]) pygeos.geometrycollections([multi_polygon]) ] )<def_stmt>test_line_interpolate_point_invalid_type geom normalized<block_start><with_stmt>pytest.raises(TypeError)<block_start><assert_stmt>pygeos.line_interpolate_point(geom 0.2 normalized=normalized)<block_end><block_end><def_stmt>test_line_interpolate_point_none <block_start><assert_stmt>pygeos.line_interpolate_point(<none> 0.2)<is><none><block_end><def_stmt>test_line_interpolate_point_nan <block_start><assert_stmt>pygeos.line_interpolate_point(line_string np.nan)<is><none><block_end><def_stmt>test_line_locate_point_geom_array <block_start>point=pygeos.points(0 1)<line_sep>actual=pygeos.line_locate_point([line_string linear_ring] point)<line_sep>np.testing.assert_allclose(actual [0.0 3.0])<block_end><def_stmt>test_line_locate_point_geom_array2 <block_start>points=pygeos.points([[0 0] [1 0]])<line_sep>actual=pygeos.line_locate_point(line_string points)<line_sep>np.testing.assert_allclose(actual [0.0 1.0])<block_end>@pytest.mark.parametrize("normalized" [<false> <true>])<def_stmt>test_line_locate_point_none normalized<block_start><assert_stmt>np.isnan(pygeos.line_locate_point(line_string <none> normalized=normalized))<assert_stmt>np.isnan(pygeos.line_locate_point(<none> point normalized=normalized))<block_end>@pytest.mark.parametrize("normalized" [<false> <true>])<def_stmt>test_line_locate_point_empty normalized<block_start><assert_stmt>np.isnan(pygeos.line_locate_point(line_string empty_point normalized=normalized))<assert_stmt>np.isnan(pygeos.line_locate_point(empty_line_string point normalized=normalized))<block_end>@pytest.mark.parametrize("normalized" [<false> <true>])<def_stmt>test_line_locate_point_invalid_geometry normalized<block_start><with_stmt>pytest.raises(pygeos.GEOSException)<block_start>pygeos.line_locate_point(line_string line_string normalized=normalized)<block_end><with_stmt>pytest.raises(pygeos.GEOSException)<block_start>pygeos.line_locate_point(polygon point normalized=normalized)<block_end><block_end><def_stmt>test_line_merge_geom_array <block_start>actual=pygeos.line_merge([line_string multi_line_string])<line_sep>assert_geometries_equal(actual[0] line_string)<line_sep>assert_geometries_equal(actual[1] pygeos.Geometry("LINESTRING (0 0, 1 2)"))<block_end><def_stmt>test_shared_paths_linestring <block_start>g1=pygeos.linestrings([(0 0) (1 0) (1 1)])<line_sep>g2=pygeos.linestrings([(0 0) (1 0)])<line_sep>actual1=pygeos.shared_paths(g1 g2)<line_sep>assert_geometries_equal(pygeos.get_geometry(actual1 0) pygeos.multilinestrings([g2]))<block_end><def_stmt>test_shared_paths_none <block_start><assert_stmt>pygeos.shared_paths(line_string <none>)<is><none><assert_stmt>pygeos.shared_paths(<none> line_string)<is><none><assert_stmt>pygeos.shared_paths(<none> <none>)<is><none><block_end><def_stmt>test_shared_paths_non_linestring <block_start>g1=pygeos.linestrings([(0 0) (1 0) (1 1)])<line_sep>g2=pygeos.points(0 1)<with_stmt>pytest.raises(pygeos.GEOSException)<block_start>pygeos.shared_paths(g1 g2)<block_end><block_end><def_stmt>_prepare_input geometry prepare<block_start>"""Prepare without modifying inplace"""<if_stmt>prepare<block_start>geometry=pygeos.apply(geometry <lambda>x:x)# makes a copy pygeos.prepare(geometry)<line_sep><return>geometry<block_end><else_stmt><block_start><return>geometry<block_end><block_end>@pytest.mark.parametrize("prepare" [<true> <false>])<def_stmt>test_shortest_line prepare<block_start>g1=pygeos.linestrings([(0 0) (1 0) (1 1)])<line_sep>g2=pygeos.linestrings([(0 3) (3 0)])<line_sep>actual=pygeos.shortest_line(_prepare_input(g1 prepare) g2)<line_sep>expected=pygeos.linestrings([(1 1) (1.5 1.5)])<assert_stmt>pygeos.equals(actual expected)<block_end>@pytest.mark.parametrize("prepare" [<true> <false>])<def_stmt>test_shortest_line_none prepare<block_start><assert_stmt>pygeos.shortest_line(_prepare_input(line_string prepare) <none>)<is><none><assert_stmt>pygeos.shortest_line(<none> line_string)<is><none><assert_stmt>pygeos.shortest_line(<none> <none>)<is><none><block_end>@pytest.mark.parametrize("prepare" [<true> <false>])<def_stmt>test_shortest_line_empty prepare<block_start>g1=_prepare_input(line_string prepare)<assert_stmt>pygeos.shortest_line(g1 empty_line_string)<is><none><line_sep>g1_empty=_prepare_input(empty_line_string prepare)<assert_stmt>pygeos.shortest_line(g1_empty line_string)<is><none><assert_stmt>pygeos.shortest_line(g1_empty empty_line_string)<is><none><block_end>
''' Test the cert_update plugin. '''<line_sep># Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>ports<line_sep>Test.Summary=''' Test cert_update plugin. '''<line_sep>Test.SkipUnless(Condition.HasProgram("openssl" "Openssl need to be installed on system for this test to work") Condition.PluginExists('cert_update.so'))<line_sep># Set up origin server server=Test.MakeOriginServer("server")<line_sep>request_header={"headers":"GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n" "timestamp":"1469733493.993" "body":""}<line_sep>response_header={"headers":"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n" "timestamp":"1469733493.993" "body":""}<line_sep>server.addResponse("sessionlog.json" request_header response_header)<line_sep># Set up ATS ts=Test.MakeATSProcess("ts" command="traffic_manager" enable_tls=1)<line_sep># Set up ssl files ts.addSSLfile("ssl/server1.pem")<line_sep>ts.addSSLfile("ssl/server2.pem")<line_sep>ts.addSSLfile("ssl/client1.pem")<line_sep>ts.addSSLfile("ssl/client2.pem")<line_sep># reserve port, attach it to 'ts' so it is released later ports.get_port(ts 's_server_port')<line_sep>ts.Disk.records_config.update({'proxy.config.diags.debug.enabled':1 'proxy.config.diags.debug.tags':'cert_update' 'proxy.config.ssl.server.cert.path':'{0}'.format(ts.Variables.SSLDir) 'proxy.config.ssl.server.private_key.path':'{0}'.format(ts.Variables.SSLDir) 'proxy.config.ssl.client.cert.path':'{0}'.format(ts.Variables.SSLDir) 'proxy.config.ssl.client.private_key.path':'{0}'.format(ts.Variables.SSLDir) 'proxy.config.url_remap.pristine_host_hdr':1})<line_sep>ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem')<line_sep>ts.Disk.remap_config.AddLines(['map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port) 'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port)])<line_sep>ts.Disk.sni_yaml.AddLines(['sni:' '- fqdn: "*foo.com"' ' client_cert: "client1.pem"' ])<line_sep># Set up plugin Test.PrepareInstalledPlugin('cert_update.so' ts)<line_sep># Server-Cert-Pre # curl should see that Traffic Server presents bar.com cert from alice tr=Test.AddTestRun("Server-Cert-Pre")<line_sep>tr.Processes.Default.StartBefore(server)<line_sep>tr.Processes.Default.StartBefore(Test.Processes.ts)<line_sep>tr.Processes.Default.Command=('curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port))<line_sep>tr.Processes.Default.Streams.stderr="gold/server-cert-pre.gold"<line_sep>tr.Processes.Default.ReturnCode=0<line_sep>tr.StillRunningAfter=server<line_sep># Server-Cert-Update tr=Test.AddTestRun("Server-Cert-Update")<line_sep>tr.Processes.Default.Env=ts.Env<line_sep>tr.Processes.Default.Command=('{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR ts.Variables.SSLDir))<line_sep>ts.Streams.all="gold/update.gold"<line_sep>ts.StillRunningAfter=server<line_sep># Server-Cert-After # after use traffic_ctl to update server cert, curl should see bar.com cert from bob tr=Test.AddTestRun("Server-Cert-After")<line_sep>tr.Processes.Default.Env=ts.Env<line_sep>tr.Command='curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)<line_sep>tr.Processes.Default.Streams.stderr="gold/server-cert-after.gold"<line_sep>tr.Processes.Default.ReturnCode=0<line_sep>ts.StillRunningAfter=server<line_sep># Client-Cert-Pre # s_server should see client (Traffic Server) as alice.com tr=Test.AddTestRun("Client-Cert-Pre")<line_sep>s_server=tr.Processes.Process("s_server" "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir ts.Variables.s_server_port))<line_sep>s_server.Ready=When.PortReady(ts.Variables.s_server_port)<line_sep>tr.Command='curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port)<line_sep>tr.Processes.Default.StartBefore(s_server)<line_sep>s_server.Streams.all="gold/client-cert-pre.gold"<line_sep>tr.Processes.Default.ReturnCode=0<line_sep>ts.StillRunningAfter=server<line_sep># Client-Cert-Update tr=Test.AddTestRun("Client-Cert-Update")<line_sep>tr.Processes.Default.Env=ts.Env<line_sep>tr.Processes.Default.Command=('mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format(ts.Variables.SSLDir ts.Variables.BINDIR))<line_sep>ts.Streams.all="gold/update.gold"<line_sep>ts.StillRunningAfter=server<line_sep># Client-Cert-After # after use traffic_ctl to update client cert, s_server should see client (Traffic Server) as bob.com tr=Test.AddTestRun("Client-Cert-After")<line_sep>s_server=tr.Processes.Process("s_server" "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format(ts.Variables.SSLDir ts.Variables.s_server_port))<line_sep>s_server.Ready=When.PortReady(ts.Variables.s_server_port)<line_sep>tr.Processes.Default.Env=ts.Env<line_sep># Move client2.pem to replace client1.pem since cert path matters in client context mapping tr.Command='curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{0}'.format(ts.Variables.ssl_port)<line_sep>tr.Processes.Default.StartBefore(s_server)<line_sep>s_server.Streams.all="gold/client-cert-after.gold"<line_sep>tr.Processes.Default.ReturnCode=0<line_sep>ts.StillRunningAfter=server<line_sep>
""" Sum species """<line_sep>#***************************************************************************** # Copyright (C) 2008 <NAME> <<EMAIL>>, # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ #***************************************************************************** <import_from_stmt>.species GenericCombinatorialSpecies<import_from_stmt>.structure SpeciesStructureWrapper<import_from_stmt>sage.structure.unique_representation UniqueRepresentation<class_stmt>SumSpeciesStructure(SpeciesStructureWrapper)<block_start><pass><block_end><class_stmt>SumSpecies(GenericCombinatorialSpecies UniqueRepresentation)<block_start><def_stmt>__init__ self F G min=<none> max=<none> weight=<none><block_start>""" Returns the sum of two species. EXAMPLES:: sage: S = species.PermutationSpecies() sage: A = S+S sage: A.generating_series().coefficients(5) [2, 2, 2, 2, 2] sage: P = species.PermutationSpecies() sage: F = P + P sage: F._check() True sage: F == loads(dumps(F)) True TESTS:: sage: A = species.SingletonSpecies() + species.SingletonSpecies() sage: B = species.SingletonSpecies() + species.SingletonSpecies() sage: C = species.SingletonSpecies() + species.SingletonSpecies(min=2) sage: A is B True sage: (A is C) or (A == C) False """<line_sep>self._F=F<line_sep>self._G=G<line_sep>self._state_info=[F G]<line_sep>GenericCombinatorialSpecies.__init__(self min=<none> max=<none> weight=<none>)<block_end>_default_structure_class=SumSpeciesStructure<def_stmt>left_summand self<block_start>""" Returns the left summand of this species. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P*P sage: F.left_summand() Permutation species """<line_sep><return>self._F<block_end><def_stmt>right_summand self<block_start>""" Returns the right summand of this species. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P*P sage: F.right_summand() Product of (Permutation species) and (Permutation species) """<line_sep><return>self._G<block_end><def_stmt>_name self<block_start>""" Note that we use a function to return the name of this species because we can't do it in the __init__ method due to it requiring that self.left_summand() and self.right_summand() already be unpickled. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F._name() 'Sum of (Permutation species) and (Permutation species)' """<line_sep><return>"Sum of (%s) and (%s)"%(self.left_summand() self.right_summand())<block_end><def_stmt>_structures self structure_class labels<block_start>""" EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F.structures([1,2]).list() [[1, 2], [2, 1], [1, 2], [2, 1]] """<for_stmt>res self.left_summand().structures(labels)<block_start><yield>structure_class(self res tag="left")<block_end><for_stmt>res self.right_summand().structures(labels)<block_start><yield>structure_class(self res tag="right")<block_end><block_end><def_stmt>_isotypes self structure_class labels<block_start>""" EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F.isotypes([1,2]).list() [[2, 1], [1, 2], [2, 1], [1, 2]] """<for_stmt>res self._F.isotypes(labels)<block_start><yield>structure_class(self res tag="left")<block_end><for_stmt>res self._G.isotypes(labels)<block_start><yield>structure_class(self res tag="right")<block_end><block_end><def_stmt>_gs self series_ring base_ring<block_start>""" Returns the cycle index series of this species. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F.generating_series().coefficients(5) [2, 2, 2, 2, 2] """<line_sep><return>(self.left_summand().generating_series(base_ring)+self.right_summand().generating_series(base_ring))<block_end><def_stmt>_itgs self series_ring base_ring<block_start>""" Returns the isomorphism type generating series of this species. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F.isotype_generating_series().coefficients(5) [2, 2, 4, 6, 10] """<line_sep><return>(self.left_summand().isotype_generating_series(base_ring)+self.right_summand().isotype_generating_series(base_ring))<block_end><def_stmt>_cis self series_ring base_ring<block_start>""" Returns the generating series of this species. EXAMPLES:: sage: P = species.PermutationSpecies() sage: F = P + P sage: F.cycle_index_series().coefficients(5) [2*p[], 2*p[1], 2*p[1, 1] + 2*p[2], 2*p[1, 1, 1] + 2*p[2, 1] + 2*p[3], 2*p[1, 1, 1, 1] + 2*p[2, 1, 1] + 2*p[2, 2] + 2*p[3, 1] + 2*p[4]] """<line_sep><return>(self.left_summand().cycle_index_series(base_ring)+self.right_summand().cycle_index_series(base_ring))<block_end><def_stmt>weight_ring self<block_start>""" Returns the weight ring for this species. This is determined by asking Sage's coercion model what the result is when you add elements of the weight rings for each of the operands. EXAMPLES:: sage: S = species.SetSpecies() sage: C = S+S sage: C.weight_ring() Rational Field :: sage: S = species.SetSpecies(weight=QQ['t'].gen()) sage: C = S + S sage: C.weight_ring() Univariate Polynomial Ring in t over Rational Field """<line_sep><return>self._common_parent([self.left_summand().weight_ring() self.right_summand().weight_ring()])<block_end><def_stmt>_equation self var_mapping<block_start>""" Returns the right hand side of an algebraic equation satisfied by this species. This is a utility function called by the algebraic_equation_system method. EXAMPLES:: sage: X = species.SingletonSpecies() sage: S = X + X sage: S.algebraic_equation_system() [node1 + (-2*z)] """<line_sep><return>sum(var_mapping[operand]<for>operand self._state_info)<block_end><block_end>#Backward compatibility SumSpecies_class=SumSpecies<line_sep>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Address scope action implementations"""<import_stmt>logging<import_from_stmt>osc_lib.command command<import_from_stmt>osc_lib exceptions<import_from_stmt>osc_lib utils<import_from_stmt>openstackclient.i18n _<import_from_stmt>openstackclient.identity common<as>identity_common<import_from_stmt>openstackclient.network common<line_sep>LOG=logging.getLogger(__name__)<def_stmt>_get_columns item<block_start>column_map={'is_shared':'shared' 'tenant_id':'project_id' }<line_sep>hidden_columns=['location']<line_sep><return>utils.get_osc_show_columns_for_sdk_resource(item column_map hidden_columns)<block_end><def_stmt>_get_attrs client_manager parsed_args<block_start>attrs={}<line_sep>attrs['name']=parsed_args.name<line_sep>attrs['ip_version']=parsed_args.ip_version<if_stmt>parsed_args.share<block_start>attrs['shared']=<true><block_end><if_stmt>parsed_args.no_share<block_start>attrs['shared']=<false><block_end><if_stmt>'project'<in>parsed_args<and>parsed_args.project<is><not><none><block_start>identity_client=client_manager.identity<line_sep>project_id=identity_common.find_project(identity_client parsed_args.project parsed_args.project_domain ).id<line_sep>attrs['tenant_id']=project_id<block_end><return>attrs<block_end># TODO(rtheis): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. <class_stmt>CreateAddressScope(command.ShowOne common.NeutronCommandWithExtraArgs)<block_start>_description=_("Create a new Address Scope")<def_stmt>get_parser self prog_name<block_start>parser=super(CreateAddressScope self).get_parser(prog_name)<line_sep>parser.add_argument('name' metavar="<name>" help=_("New address scope name"))<line_sep>parser.add_argument('--ip-version' type=int default=4 choices=[4 6] help=_("IP version (default is 4)"))<line_sep>parser.add_argument('--project' metavar="<project>" help=_("Owner's project (name or ID)"))<line_sep>identity_common.add_project_domain_option_to_parser(parser)<line_sep>share_group=parser.add_mutually_exclusive_group()<line_sep>share_group.add_argument('--share' action='store_true' help=_('Share the address scope between projects'))<line_sep>share_group.add_argument('--no-share' action='store_true' help=_('Do not share the address scope between projects (default)'))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.network<line_sep>attrs=_get_attrs(self.app.client_manager parsed_args)<line_sep>attrs.update(self._parse_extra_properties(parsed_args.extra_properties))<line_sep>obj=client.create_address_scope(**attrs)<line_sep>display_columns,columns=_get_columns(obj)<line_sep>data=utils.get_item_properties(obj columns formatters={})<line_sep><return>(display_columns data)<block_end><block_end><class_stmt>DeleteAddressScope(command.Command)<block_start>_description=_("Delete address scope(s)")<def_stmt>get_parser self prog_name<block_start>parser=super(DeleteAddressScope self).get_parser(prog_name)<line_sep>parser.add_argument('address_scope' metavar="<address-scope>" nargs='+' help=_("Address scope(s) to delete (name or ID)"))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.network<line_sep>result=0<for_stmt>scope parsed_args.address_scope<block_start><try_stmt><block_start>obj=client.find_address_scope(scope ignore_missing=<false>)<line_sep>client.delete_address_scope(obj)<block_end><except_stmt>Exception<as>e<block_start>result<augadd>1<line_sep>LOG.error(_("Failed to delete address scope with "<concat>"name or ID '%(scope)s': %(e)s") {'scope':scope 'e':e})<block_end><block_end><if_stmt>result<g>0<block_start>total=len(parsed_args.address_scope)<line_sep>msg=(_("%(result)s of %(total)s address scopes failed "<concat>"to delete.")%{'result':result 'total':total})<line_sep><raise>exceptions.CommandError(msg)<block_end><block_end><block_end># TODO(yanxing'an): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. <class_stmt>ListAddressScope(command.Lister)<block_start>_description=_("List address scopes")<def_stmt>get_parser self prog_name<block_start>parser=super(ListAddressScope self).get_parser(prog_name)<line_sep>parser.add_argument('--name' metavar='<name>' help=_("List only address scopes of given name in output"))<line_sep>parser.add_argument('--ip-version' type=int choices=[4 6] metavar='<ip-version>' dest='ip_version' help=_("List address scopes of given IP version networks (4 or 6)"))<line_sep>parser.add_argument('--project' metavar="<project>" help=_("List address scopes according to their project "<concat>"(name or ID)"))<line_sep>identity_common.add_project_domain_option_to_parser(parser)<line_sep>shared_group=parser.add_mutually_exclusive_group()<line_sep>shared_group.add_argument('--share' action='store_true' help=_("List address scopes shared between projects"))<line_sep>shared_group.add_argument('--no-share' action='store_true' help=_("List address scopes not shared between projects"))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.network<line_sep>columns=('id' 'name' 'ip_version' 'is_shared' 'project_id' )<line_sep>column_headers=('ID' 'Name' 'IP Version' 'Shared' 'Project' )<line_sep>attrs={}<if_stmt>parsed_args.name<block_start>attrs['name']=parsed_args.name<block_end><if_stmt>parsed_args.ip_version<block_start>attrs['ip_version']=parsed_args.ip_version<block_end><if_stmt>parsed_args.share<block_start>attrs['is_shared']=<true><block_end><if_stmt>parsed_args.no_share<block_start>attrs['is_shared']=<false><block_end><if_stmt>'project'<in>parsed_args<and>parsed_args.project<is><not><none><block_start>identity_client=self.app.client_manager.identity<line_sep>project_id=identity_common.find_project(identity_client parsed_args.project parsed_args.project_domain ).id<line_sep>attrs['tenant_id']=project_id<line_sep>attrs['project_id']=project_id<block_end>data=client.address_scopes(**attrs)<line_sep><return>(column_headers (utils.get_item_properties(s columns formatters={} )<for>s data))<block_end><block_end># TODO(rtheis): Use the SDK resource mapped attribute names once the # OSC minimum requirements include SDK 1.0. <class_stmt>SetAddressScope(common.NeutronCommandWithExtraArgs)<block_start>_description=_("Set address scope properties")<def_stmt>get_parser self prog_name<block_start>parser=super(SetAddressScope self).get_parser(prog_name)<line_sep>parser.add_argument('address_scope' metavar="<address-scope>" help=_("Address scope to modify (name or ID)"))<line_sep>parser.add_argument('--name' metavar="<name>" help=_('Set address scope name'))<line_sep>share_group=parser.add_mutually_exclusive_group()<line_sep>share_group.add_argument('--share' action='store_true' help=_('Share the address scope between projects'))<line_sep>share_group.add_argument('--no-share' action='store_true' help=_('Do not share the address scope between projects'))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.network<line_sep>obj=client.find_address_scope(parsed_args.address_scope ignore_missing=<false>)<line_sep>attrs={}<if_stmt>parsed_args.name<is><not><none><block_start>attrs['name']=parsed_args.name<block_end><if_stmt>parsed_args.share<block_start>attrs['shared']=<true><block_end><if_stmt>parsed_args.no_share<block_start>attrs['shared']=<false><block_end>attrs.update(self._parse_extra_properties(parsed_args.extra_properties))<line_sep>client.update_address_scope(obj **attrs)<block_end><block_end><class_stmt>ShowAddressScope(command.ShowOne)<block_start>_description=_("Display address scope details")<def_stmt>get_parser self prog_name<block_start>parser=super(ShowAddressScope self).get_parser(prog_name)<line_sep>parser.add_argument('address_scope' metavar="<address-scope>" help=_("Address scope to display (name or ID)"))<line_sep><return>parser<block_end><def_stmt>take_action self parsed_args<block_start>client=self.app.client_manager.network<line_sep>obj=client.find_address_scope(parsed_args.address_scope ignore_missing=<false>)<line_sep>display_columns,columns=_get_columns(obj)<line_sep>data=utils.get_item_properties(obj columns formatters={})<line_sep><return>(display_columns data)<block_end><block_end>
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). <import_from_future_stmt> absolute_import<import_from_stmt>pex.typing TYPE_CHECKING<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>typing Dict IO List Mapping Optional Tuple<line_sep>Value=Optional[str]<line_sep>Attributes=Mapping[str Value]<block_end><class_stmt>DiGraph(object)<block_start>"""Renders a dot digraph built up from nodes and edges."""<line_sep>@staticmethod<def_stmt>_render_ID value# type: (str) -> str # See https://graphviz.org/doc/info/lang.html for the various forms of `ID`. <block_start><return>'"{}"'.format(value.replace('"' '\\"'))<block_end>@classmethod<def_stmt>_render_a_list cls attributes# type: (Attributes) -> str # See https://graphviz.org/doc/info/lang.html for the `a_list` production. <block_start><return>", ".join("{name}={value}".format(name=name value=cls._render_ID(value))<for>name,value attributes.items()<if>value<is><not><none>)<block_end><def_stmt>__init__ self name # type: str strict=<true> # type: bool **attributes# type: Value # type: (...) -> None <block_start>""" :param name: A name for the graph. :param strict: Whether or not duplicate edges are collapsed into one edge. """<line_sep>self._name=name<line_sep>self._strict=strict<line_sep>self._attributes=attributes# type: Attributes self._nodes={}# type: Dict[str, Attributes] self._edges=[]<block_end># type: List[Tuple[str, str, Attributes]] @property<def_stmt>name self<block_start><return>self._name<block_end><def_stmt>add_node self name # type: str **attributes# type: Value # type: (...) -> None <block_start>"""Adds a node to the graph. This is done implicitly by add_edge for the nodes the edge connects, but may be useful when the node is either isolated or else needs to be decorated with attributes. :param name: The name of the node. """<line_sep>self._nodes[name]=attributes<block_end><def_stmt>add_edge self start # type: str end # type: str **attributes# type: Value # type: (...) -> None <block_start>""" :param start: The name of the start node. :param end: The name of the end node. :param attributes: Any extra attributes for the edge connecting the start node to the end node. """<line_sep>self._edges.append((start end attributes))<block_end><def_stmt>emit self out# type: (IO[str]) -> None <block_start>"""Render the current state of this digraph to the given `out` stream. :param out: A stream to render this digraph to. N/B.: Will not be flushed or closed. """<def_stmt>emit_attr_stmt stmt # type: str attributes # type: Attributes # type: (...) -> None # See https://graphviz.org/doc/info/lang.html for the `attr_stmt` production. <block_start>out.write("{statement} [{a_list}];\n".format(statement=stmt a_list=self._render_a_list(attributes)))<block_end><if_stmt>self._strict<block_start>out.write("strict ")<block_end>out.write("digraph {name} {{\n".format(name=self._render_ID(self._name)))<line_sep>emit_attr_stmt("graph" self._attributes)<for_stmt>node,attributes self._nodes.items()<block_start>emit_attr_stmt(self._render_ID(node) attributes)<block_end><for_stmt>start,end,attributes self._edges<block_start>emit_attr_stmt("{start} -> {end}".format(start=self._render_ID(start) end=self._render_ID(end)) attributes )<block_end>out.write("}\n")<block_end><block_end>
""" Copyright Snap Inc. 2021. This sample code is made available by Snap Inc. for informational purposes only. No license, whether implied or otherwise, is granted in or to such code (including any rights to copy, modify, publish, distribute and/or commercialize such code), unless you have entered into a separate agreement for such rights. Such code is provided as-is, without warranty of any kind, express or implied, including any warranties of merchantability, title, fitness for a particular purpose, non-infringement, or that such code is free of defects, errors or viruses. In no event will Snap Inc. be liable for any damages or losses of any kind arising from the sample code or your use thereof. """<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.nn.parallel DistributedDataParallel<as>DDP<import_from_stmt>.rnn RNNModule<import_from_stmt>models.stylegan2 model<def_stmt>load_checkpoints path gpu<block_start><if_stmt>gpu<is><none><block_start>ckpt=torch.load(path)<block_end><else_stmt><block_start>loc='cuda:{}'.format(gpu)<line_sep>ckpt=torch.load(path map_location=loc)<block_end><return>ckpt<block_end><def_stmt>model_to_gpu model opt<block_start><if_stmt>opt.isTrain<block_start><if_stmt>opt.gpu<is><not><none><block_start>model.cuda(opt.gpu)<line_sep>model=DDP(model device_ids=[opt.gpu] find_unused_parameters=<true>)<block_end><else_stmt><block_start>model.cuda()<line_sep>model=DDP(model find_unused_parameters=<true>)<block_end><block_end><else_stmt><block_start>model.cuda()<line_sep>model=nn.DataParallel(model)<block_end><return>model<block_end><def_stmt>create_model opt<block_start>ckpt=load_checkpoints(opt.img_g_weights opt.gpu)<line_sep>modelG=model.Generator(size=opt.style_gan_size style_dim=opt.latent_dimension n_mlp=opt.n_mlp)<line_sep>modelG.load_state_dict(ckpt['g_ema'] strict=<false>)<line_sep>modelG.eval()<for_stmt>p modelG.parameters()<block_start>p.requires_grad=<false><block_end><if_stmt>opt.isPCA<block_start>modelS=modelG.style<line_sep>modelS.eval()<if_stmt>opt.gpu<is><not><none><block_start>modelS.cuda(opt.gpu)<block_end><return>modelS<block_end>pca_com_path=os.path.join(opt.save_pca_path 'pca_comp.npy')<line_sep>pca_stdev_path=os.path.join(opt.save_pca_path 'pca_stdev.npy')<line_sep>modelR=RNNModule(pca_com_path pca_stdev_path z_dim=opt.latent_dimension h_dim=opt.h_dim n_pca=opt.n_pca w_residual=opt.w_residual)<if_stmt>opt.isTrain<block_start><import_from_stmt>.D_3d ModelD_3d<line_sep>modelR.init_optim(opt.lr opt.beta1 opt.beta2)<line_sep>modelG.modelR=modelR<line_sep>modelD_3d=ModelD_3d(opt)<if_stmt>opt.cross_domain<block_start><import_from_stmt>.D_img ModelD_img<block_end><else_stmt><block_start><import_from_stmt>.D ModelD_img<block_end>modelD_img=ModelD_img(opt)<line_sep>modelG=model_to_gpu(modelG opt)<line_sep>modelD_3d=model_to_gpu(modelD_3d opt)<line_sep>modelD_img=model_to_gpu(modelD_img opt)<if_stmt>opt.load_pretrain_path<ne>'None'<and>opt.load_pretrain_epoch<g>-1<block_start>opt.checkpoints_dir=opt.load_pretrain_path<line_sep>m_name='/modelR_epoch_%d.pth'%(opt.load_pretrain_epoch)<line_sep>ckpt=load_checkpoints(opt.load_pretrain_path+m_name opt.gpu)<line_sep>modelG.module.modelR.load_state_dict(ckpt)<line_sep>m_name='/modelD_img_epoch_%d.pth'%(opt.load_pretrain_epoch)<line_sep>ckpt=load_checkpoints(opt.load_pretrain_path+m_name opt.gpu)<line_sep>modelD_img.load_state_dict(ckpt)<line_sep>m_name='/modelD_3d_epoch_%d.pth'%(opt.load_pretrain_epoch)<line_sep>ckpt=load_checkpoints(opt.load_pretrain_path+m_name opt.gpu)<line_sep>modelD_3d.load_state_dict(ckpt)<block_end><return>[modelG modelD_img modelD_3d]<block_end><else_stmt><block_start>modelR.eval()<for_stmt>p modelR.parameters()<block_start>p.requires_grad=<false><block_end>modelG.modelR=modelR<line_sep>modelG=model_to_gpu(modelG opt)<if_stmt>opt.load_pretrain_path<ne>'None'<and>opt.load_pretrain_epoch<g>-1<block_start>m_name='/modelR_epoch_%d.pth'%(opt.load_pretrain_epoch)<line_sep>ckpt=load_checkpoints(opt.load_pretrain_path+m_name opt.gpu)<line_sep>modelG.module.modelR.load_state_dict(ckpt)<block_end><return>modelG<block_end><block_end>
# # DX Package # # Valuation Classes # # dx_valuation.py # # Python for Finance, 2nd ed. # (c) Dr. <NAME> # <import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>dx_simulation *<import_from_stmt>valuation_class valuation_class<import_from_stmt>valuation_mcs_european valuation_mcs_european<import_from_stmt>valuation_mcs_american valuation_mcs_american<line_sep>
# Copyright 2014-2016 Presslabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_from_stmt>collections namedtuple<import_from_stmt>shutil rmtree<import_from_stmt>stat S_IFDIR S_IFREG S_IFLNK<import_from_stmt>pygit2 clone_repository Signature GIT_SORT_TOPOLOGICAL GIT_FILEMODE_TREE GIT_STATUS_CURRENT GIT_FILEMODE_LINK GIT_FILEMODE_BLOB GIT_BRANCH_REMOTE GIT_BRANCH_LOCAL GIT_FILEMODE_BLOB_EXECUTABLE <import_from_stmt>six iteritems<import_from_stmt>gitfs.cache CommitCache<import_from_stmt>gitfs.log log<import_from_stmt>gitfs.utils.path split_path_into_components<import_from_stmt>gitfs.utils.commits CommitsList<line_sep>DivergeCommits=namedtuple("DivergeCommits" ["common_parent" "first_commits" "second_commits"])<class_stmt>Repository(object)<block_start><def_stmt>__init__ self repository commits=<none><block_start>self._repo=repository<line_sep>self.commits=commits<or>CommitCache(self)<line_sep>self.behind=<false><block_end><def_stmt>__getitem__ self item<block_start>""" Proxy method for pygit2.Repository """<line_sep><return>self._repo[item]<block_end><def_stmt>__getattr__ self attr<block_start>""" Proxy method for pygit2.Repository """<if_stmt>attr<not><in>self.__dict__<block_start><return>getattr(self._repo attr)<block_end><else_stmt><block_start><return>self.__dict__[attr]<block_end><block_end><def_stmt>ahead self upstream branch<block_start>ahead,_=self.diverge(upstream branch)<line_sep><return>ahead<block_end><def_stmt>diverge self upstream branch<block_start>reference="{}/{}".format(upstream branch)<line_sep>remote_branch=self._repo.branches.remote.get(reference)<line_sep>local_branch=self._repo.branches.local.get(branch)<line_sep># TODO: check for missing branches <if_stmt>remote_branch.target<eq>local_branch.target<block_start><return><false> <false><block_end>diverge_commits=self.find_diverge_commits(local_branch remote_branch)<line_sep>behind=len(diverge_commits.second_commits)<g>0<line_sep>ahead=len(diverge_commits.first_commits)<g>0<line_sep><return>ahead behind<block_end><def_stmt>checkout self ref *args **kwargs<block_start>result=self._repo.checkout(ref *args **kwargs)<line_sep># update ignore cache after a checkout self.ignore.update()<line_sep>status=self._repo.status()<for_stmt>path,status iteritems(status)# path is in current status, move on <block_start><if_stmt>status<eq>GIT_STATUS_CURRENT<block_start><continue><block_end># check if file exists or not full_path=self._full_path(path)<if_stmt>path<not><in>self._repo.index<block_start><if_stmt>path<not><in>self.ignore<block_start><try_stmt><block_start>os.unlink(full_path)<block_end><except_stmt>OSError# path points to a directory containing untracked files <block_start>rmtree(full_path onerror=<lambda>function fpath excinfo:log.info("Repository: Checkout couldn't delete %s" fpath) )<block_end><block_end><continue><block_end># check files stats stats=self.get_git_object_default_stats(ref path)<line_sep>current_stat=os.lstat(full_path)<if_stmt>stats["st_mode"]<ne>current_stat.st_mode<block_start><try_stmt><block_start>os.chmod(full_path current_stat.st_mode)<block_end><except_stmt>OSError<block_start>log.info("Repository: Checkout couldn't chmod %s" full_path)<block_end>self._repo.index.add(self._sanitize(path))<block_end><block_end><return>result<block_end><def_stmt>_sanitize self path<block_start><if_stmt>path<is><not><none><and>path.startswith("/")<block_start>path=path[1:]<block_end><return>path<block_end><def_stmt>push self upstream branch credentials<block_start>""" Push changes from a branch to a remote Examples:: repo.push("origin", "master") """<line_sep>remote=self.get_remote(upstream)<line_sep>remote.push(["refs/heads/%s"%branch] callbacks=credentials)<block_end><def_stmt>fetch self upstream branch_name credentials<block_start>""" Fetch from remote and return True if we are behind or False otherwise """<line_sep>remote=self.get_remote(upstream)<line_sep>remote.fetch(callbacks=credentials)<line_sep>_,behind=self.diverge(upstream branch_name)<line_sep>self.behind=behind<line_sep><return>behind<block_end><def_stmt>commit self message author commiter parents=<none> ref="HEAD"<block_start>""" Wrapper for create_commit. It creates a commit from a given ref (default is HEAD) """<line_sep>status=self._repo.status()<if_stmt>status<eq>{}<block_start><return><none><block_end># sign the author author=Signature(author[0] author[1])<line_sep>commiter=Signature(commiter[0] commiter[1])<line_sep># write index localy tree=self._repo.index.write_tree()<line_sep>self._repo.index.write()<line_sep># get parent <if_stmt>parents<is><none><block_start>parents=[self._repo.revparse_single(ref).id]<block_end><return>self._repo.create_commit(ref author commiter message tree parents)<block_end>@classmethod<def_stmt>clone cls remote_url path branch=<none> credentials=<none><block_start>"""Clone a repo in a give path and update the working directory with a checkout to head (GIT_CHECKOUT_SAFE_CREATE) :param str remote_url: URL of the repository to clone :param str path: Local path to clone into :param str branch: Branch to checkout after the clone. The default is to use the remote's default branch. """<try_stmt><block_start>repo=clone_repository(remote_url path checkout_branch=branch callbacks=credentials)<block_end><except_stmt>Exception<as>e<block_start>log.error("Error on cloning the repository: " exc_info=<true>)<block_end>repo.checkout_head()<line_sep><return>cls(repo)<block_end><def_stmt>_is_searched_entry self entry_name searched_entry path_components<block_start>""" Checks if a tree entry is the one that is being searched for. For that, the name has to correspond and it has to be the last element in the path_components list (this means that the path corresponds exactly). :param entry_name: the name of the tree entry :param searched_entry: the name of the object that is being searched for :type searched_entry: str :param path_components: the path of the object being searched for :type path_components: list """<line_sep><return>(entry_name<eq>searched_entry<and>len(path_components)<eq>1<and>entry_name<eq>path_components[0])<block_end><def_stmt>_get_git_object self tree obj_name path_components modifier<block_start>""" It recursively searches for the object in the repository. To declare an object as found, the name and the relative path have to correspond. It also includes the relative path as a condition for success, to avoid finding an object with the correct name but with a wrong location. :param tree: a `pygit2.Tree` instance :param entry_name: the name of the object :type entry_name: str :param path_components: the path of the object being searched for as a list (e.g: for '/a/b/c/file.txt' => ['a', 'b', 'c', 'file.txt']) :type path_components: list :param modifier: a function used to retrieve some specific characteristic of the git object :type modifier: function :returns: an instance corresponding to the object that is being searched for in case of success, or None otherwise. :rtype: one of the following: an instance of `pygit2.Tree` an instance of `pygit2.Blob` None """<line_sep>git_obj=<none><for_stmt>entry tree<block_start><if_stmt>self._is_searched_entry(entry.name obj_name path_components)<block_start><return>modifier(entry)<block_end><elif_stmt>entry.filemode<eq>GIT_FILEMODE_TREE<block_start>git_obj=self._get_git_object(self._repo[entry.id] obj_name path_components[1:] modifier)<if_stmt>git_obj<block_start><return>git_obj<block_end><block_end><block_end><return>git_obj<block_end><def_stmt>get_git_object_type self tree path<block_start>""" Returns the filemode of the git object with the relative path <path>. :param tree: a `pygit2.Tree` instance :param path: the relative path of the object :type entry_name: str :returns: the filemode for the entry in case of success (which can be one of the following) or None otherwise. 0 (0000000) GIT_FILEMODE_NEW 16384 (0040000) GIT_FILEMODE_TREE 33188 (0100644) GIT_FILEMODE_BLOB 33261 (0100755) GIT_FILEMODE_BLOB_EXECUTABLE 40960 (0120000) GIT_FILEMODE_LINK 57344 (0160000) GIT_FILEMODE_COMMIT :rtype: int, None """<line_sep>path_components=split_path_into_components(path)<try_stmt><block_start><return>self._get_git_object(tree path_components[-1] path_components <lambda>entry:entry.filemode)<block_end><except_stmt><block_start><return>GIT_FILEMODE_TREE<block_end><block_end><def_stmt>get_git_object self tree path<block_start>""" Returns the git object with the relative path <path>. :param tree: a `pygit2.Tree` instance :param path: the relative path of the object :type path: str :returns: an instance corresponding to the object that is being searched for in case of success, or None else. :rtype: one of the following: an intance of `pygit2.Tree` an intance of `pygit2.Blob` None """<line_sep># It acts as a proxy for the _get_git_object method, which # does the actual searching. path_components=split_path_into_components(path)<line_sep><return>self._get_git_object(tree path_components[-1] path_components <lambda>entry:self._repo[entry.id] )<block_end><def_stmt>get_git_object_default_stats self ref path<block_start>types={GIT_FILEMODE_LINK:{"st_mode":S_IFLNK|0o444} GIT_FILEMODE_TREE:{"st_mode":S_IFDIR|0o555 "st_nlink":2} GIT_FILEMODE_BLOB:{"st_mode":S_IFREG|0o444} GIT_FILEMODE_BLOB_EXECUTABLE:{"st_mode":S_IFREG|0o555} }<if_stmt>path<eq>"/"<block_start><return>types[GIT_FILEMODE_TREE]<block_end>obj_type=self.get_git_object_type(ref path)<if_stmt>obj_type<is><none><block_start><return>obj_type<block_end>stats=types[obj_type]<if_stmt>obj_type<in>[GIT_FILEMODE_BLOB GIT_FILEMODE_BLOB_EXECUTABLE]<block_start>stats["st_size"]=self.get_blob_size(ref path)<block_end><return>stats<block_end><def_stmt>get_blob_size self tree path<block_start>""" Returns the size of a the data contained by a blob object with the relative path <path>. :param tree: a `pygit2.Tree` instance :param path: the relative path of the object :type path: str :returns: the size of data contained by the blob object. :rtype: int """<line_sep><return>self.get_git_object(tree path).size<block_end><def_stmt>get_blob_data self tree path<block_start>""" Returns the data contained by a blob object with the relative path <path>. :param tree: a `pygit2.Tree` instance :param path: the relative path of the object :type path: str :returns: the data contained by the blob object. :rtype: str """<line_sep><return>self.get_git_object(tree path).data<block_end><def_stmt>get_commit_dates self<block_start>""" Walk through all commits from current repo in order to compose the _history_ directory. """<line_sep><return>list(self.commits.keys())<block_end><def_stmt>get_commits_by_date self date<block_start>""" Retrieves all the commits from a particular date. :param date: date with the format: yyyy-mm-dd :type date: str :returns: a list containg the commits for that day. Each list item will have the format: hh:mm:ss-<short_sha1>, where short_sha1 is the short sha1 of the commit (first 10 characters). :rtype: list """<line_sep><return>list(map(str self.commits[date]))<block_end><def_stmt>walk_branches self sort *branches<block_start>""" Simple iterator which take a sorting strategy and some branch and iterates through those branches one commit at a time, yielding a list of commits :param sort: a sorting option `GIT_SORT_NONE, GIT_SORT_TOPOLOGICAL, GIT_SORT_TIME, GIT_SORT_REVERSE`. Default is 'GIT_SORT_TOPOLOGICAL' :param branches: branch to iterate through :type branches: list :returns: yields a list of commits corresponding to given branches :rtype: list """<line_sep>iterators=[iter(self._repo.walk(branch.target sort))<for>branch branches]<line_sep>stop_iteration=[<false><for>branch branches]<line_sep>commits=[]<for_stmt>iterator iterators<block_start><try_stmt><block_start>commit=next(iterator)<block_end><except_stmt>StopIteration<block_start>commit=<none><block_end>commits.append(commit)<block_end><yield>(commit<for>commit commits)<while_stmt><not>all(stop_iteration)<block_start><for_stmt>index,iterator enumerate(iterators)<block_start><try_stmt><block_start>commit=next(iterator)<line_sep>commits[index]=commit<block_end><except_stmt>StopIteration<block_start>stop_iteration[index]=<true><block_end><block_end><if_stmt><not>all(stop_iteration)<block_start><yield>(commit<for>commit commits)<block_end><block_end><block_end><def_stmt>remote_head self upstream branch<block_start>ref="%s/%s"%(upstream branch)<line_sep>remote=self._repo.lookup_branch(ref GIT_BRANCH_REMOTE)<line_sep><return>remote.get_object()<block_end><def_stmt>get_remote self name<block_start>""" Retrieve a remote by name. Raise a ValueError if the remote was not added to repo Examples:: repo.get_remote("fork") """<line_sep>remote=[remote<for>remote self._repo.remotes<if>remote.name<eq>name]<if_stmt><not>remote<block_start><raise>ValueError("Missing remote")<block_end><return>remote[0]<block_end><def_stmt>_full_path self partial<block_start><if_stmt>partial.startswith("/")<block_start>partial=partial[1:]<block_end><return>os.path.join(self._repo.workdir partial)<block_end><def_stmt>find_diverge_commits self first_branch second_branch<block_start>""" Take two branches and find diverge commits. 2--3--4--5 / 1--+ Return: \ - common parent: 1 6 - first list of commits: (2, 3, 4, 5) - second list of commits: (6) :param first_branch: first branch to look for common parent :type first_branch: `pygit2.Branch` :param second_branch: second branch to look for common parent :type second_branch: `pygit2.Branch` :returns: a namedtuple with common parent, a list of first's branch commits and another list with second's branch commits :rtype: DivergeCommits (namedtuple) """<line_sep>common_parent=<none><line_sep>first_commits=CommitsList()<line_sep>second_commits=CommitsList()<line_sep>walker=self.walk_branches(GIT_SORT_TOPOLOGICAL first_branch second_branch)<for_stmt>first_commit,second_commit walker<block_start><if_stmt>first_commit<in>second_commits<or>second_commit<in>first_commits<block_start><break><block_end><if_stmt>first_commit<not><in>first_commits<block_start>first_commits.append(first_commit)<block_end><if_stmt>second_commit<not><in>second_commits<block_start>second_commits.append(second_commit)<block_end><if_stmt>second_commit.hex<eq>first_commit.hex<block_start><break><block_end><block_end><try_stmt><block_start>index=second_commits.index(first_commit)<block_end><except_stmt>ValueError<block_start><pass><block_end><else_stmt><block_start>second_commits=second_commits[:index]<line_sep>common_parent=first_commit<block_end><try_stmt><block_start>index=first_commits.index(second_commit)<block_end><except_stmt>ValueError<block_start><pass><block_end><else_stmt><block_start>first_commits=first_commits[:index]<line_sep>common_parent=second_commit<block_end><return>DivergeCommits(common_parent first_commits second_commits)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<line_sep>range=getattr(__builtins__ 'xrange' range)<line_sep># end of py2 compatability boilerplate <import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>matrixprofile core<line_sep>logger=logging.getLogger(__name__)<line_sep>_EPS=1e-14<def_stmt>_batch_compute args<block_start>""" Internal function to compute a batch of the time series in parallel. Parameters ---------- args : tuple Various attributes used for computing the batch. ( batch_start : int The starting index for this batch. batch_end : int The ending index for this batch. ts : array_like The time series to compute the matrix profile for. query : array_like The query. window_size : int The size of the window to compute the profile over. data_length : int The number of elements in the time series. profile_length : int The number of elements that will be in the final matrix profile. exclusion_zone : int Used to exclude trivial matches. data_mu : array_like The moving average over the time series for the given window size. data_sig : array_like The moving standard deviation over the time series for the given window size. first_product : array_like The first sliding dot product for the time series over index 0 to window_size. skip_locs : array_like Indices that should be skipped for distance profile calculation due to a nan or inf. ) Returns ------- dict : profile The matrix profile, left and right matrix profiles and their respective profile indices. >>> { >>> 'mp': The matrix profile, >>> 'pi': The matrix profile 1NN indices, >>> 'rmp': The right matrix profile, >>> 'rpi': The right matrix profile 1NN indices, >>> 'lmp': The left matrix profile, >>> 'lpi': The left matrix profile 1NN indices, >>> } """<line_sep>num_dim,batch_start,batch_end,ts,query,window_size,data_length,profile_length,exclusion_zone,data_mu,data_sig,first_product,skip_locs,profile_dimension,return_dimension=args<line_sep># initialize matrices matrix_profile=np.full((num_dim profile_length) np.inf)<line_sep>profile_index=np.full((num_dim profile_length) 0)<line_sep>left_matrix_profile=<none><line_sep>right_matrix_profile=<none><line_sep>left_profile_index=<none><line_sep>right_profile_index=<none><line_sep>left_matrix_profile=np.copy(matrix_profile)<line_sep>right_matrix_profile=np.copy(matrix_profile)<line_sep>left_profile_index=np.copy(profile_index)<line_sep>right_profile_index=np.copy(profile_index)<line_sep># with batch 0 we do not need to recompute the dot product # however with other batch windows, we need the previous iterations sliding # dot product last_product=np.copy(first_product)<if_stmt>batch_start<is>0<block_start>first_window=query[: batch_start:batch_start+window_size]<block_end><else_stmt><block_start>first_window=query[: batch_start-1:batch_start+window_size-1]<for_stmt>i range(num_dim)<block_start>last_product[i :]=core.fft_convolve(ts[i :] first_window[i :])<block_end><block_end>query_sum=np.sum(first_window axis=1)<line_sep>query_2sum=np.sum(first_window<power>2 axis=1)<line_sep>query_mu,query_sig=np.empty(num_dim) np.empty(num_dim)<for_stmt>i range(num_dim)<block_start>query_mu[i],query_sig[i]=core.moving_avg_std(first_window[i :] window_size)<block_end>drop_value=np.empty(num_dim)<for_stmt>i range(num_dim)<block_start>drop_value[i]=first_window[i 0]<block_end>distance_profile=np.empty((num_dim profile_length))<line_sep># make sure to compute inclusively from batch start to batch end # otherwise there are gaps in the profile <if_stmt>batch_end<l>profile_length<block_start>batch_end<augadd>1<block_end># iteratively compute distance profile and update with element-wise mins <for_stmt>i range(batch_start batch_end)# check for nan or inf and skip <block_start><if_stmt>skip_locs[i]<block_start><continue><block_end><for_stmt>j range(num_dim)<block_start><if_stmt>i<eq>0<block_start>query_window=query[j i:i+window_size]<line_sep>distance_profile[j :]=core.distance_profile(last_product[j :] window_size data_mu[j :] data_sig[j :] query_mu[j] query_sig[j])<line_sep># apply exclusion zone distance_profile[j :]=core.apply_exclusion_zone(exclusion_zone 0 window_size data_length 0 distance_profile[j :])<block_end><else_stmt><block_start>query_window=query[j i:i+window_size]<line_sep>query_sum[j]=query_sum[j]-drop_value[j]+query_window[-1]<line_sep>query_2sum[j]=query_2sum[j]-drop_value[j]<power>2+query_window[-1]<power>2<line_sep>query_mu[j]=query_sum[j]/window_size<line_sep>query_sig2=query_2sum[j]/window_size-query_mu[j]<power>2<if_stmt>query_sig2<l>_EPS<block_start>query_sig2=_EPS<block_end>query_sig[j]=np.sqrt(query_sig2)<line_sep>last_product[j 1:]=last_product[j 0:data_length-window_size]-ts[j 0:data_length-window_size]<times>drop_value[j]+ts[j window_size:]<times>query_window[-1]<line_sep>last_product[j 0]=first_product[j i]<line_sep>distance_profile[j :]=core.distance_profile(last_product[j :] window_size data_mu[j :] data_sig[j :] query_mu[j] query_sig[j])<line_sep># apply the exclusion zone distance_profile[j :]=core.apply_exclusion_zone(exclusion_zone 0 window_size data_length i distance_profile[j :])<block_end>distance_profile[j distance_profile[j :]<l>_EPS]=0<line_sep>drop_value[j]=query_window[0]<block_end><if_stmt>np.any(query_sig<l>_EPS)<block_start><continue><block_end>distance_profile[: skip_locs]=np.inf<line_sep>distance_profile[data_sig<l>np.sqrt(_EPS)]=np.inf<line_sep>distance_profile_dim=np.argsort(distance_profile axis=0)<line_sep>distance_profile_sort=np.sort(distance_profile axis=0)<line_sep>distance_profile_cumsum=np.zeros(profile_length)<for_stmt>j range(num_dim)<block_start>distance_profile_cumsum<augadd>distance_profile_sort[j :]<line_sep>distance_profile_mean=distance_profile_cumsum/(j+1)<line_sep># update the matrix profile indices=(distance_profile_mean<l>matrix_profile[j :])<line_sep>matrix_profile[j indices]=distance_profile_mean[indices]<line_sep>profile_index[j indices]=i<if_stmt>return_dimension<block_start>profile_dimension[j][: indices]=distance_profile_dim[:j+1 indices]<block_end># update the left and right matrix profiles # find differences, shift left and update indices=distance_profile_mean[i:]<l>left_matrix_profile[j i:]<line_sep>falses=np.zeros(i).astype('bool')<line_sep>indices=np.append(falses indices)<line_sep>left_matrix_profile[j indices]=distance_profile_mean[indices]<line_sep>left_profile_index[j np.argwhere(indices)]=i<line_sep># find differences, shift right and update indices=distance_profile_mean[0:i]<l>right_matrix_profile[j 0:i]<line_sep>falses=np.zeros(profile_length-i).astype('bool')<line_sep>indices=np.append(indices falses)<line_sep>right_matrix_profile[j indices]=distance_profile_mean[indices]<line_sep>right_profile_index[j np.argwhere(indices)]=i<block_end><block_end><return>{'mp':matrix_profile 'pi':profile_index 'pd':profile_dimension 'rmp':right_matrix_profile 'rpi':right_profile_index 'lmp':left_matrix_profile 'lpi':left_profile_index }<block_end><def_stmt>mstomp ts window_size return_dimension=<false> n_jobs=1<block_start>""" Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing. Parameters ---------- ts : array_like, shape (n_dim, seq_len) The multidimensional time series to compute the multidimensional matrix profile for. window_size: int The size of the window to compute the matrix profile over. return_dimension : bool if True, also return the matrix profile dimension. It takses O(d^2 n) to store and O(d^2 n^2) to compute. (default is False) n_jobs : int, Default = 1 Number of cpu cores to use. Returns ------- dict : profile A MatrixProfile data structure. >>> { >>> 'mp': The matrix profile, >>> 'pi': The matrix profile 1NN indices, >>> 'rmp': The right matrix profile, >>> 'rpi': The right matrix profile 1NN indices, >>> 'lmp': The left matrix profile, >>> 'lpi': The left matrix profile 1NN indices, >>> 'metric': The distance metric computed for the mp, >>> 'w': The window size used to compute the matrix profile, >>> 'ez': The exclusion zone used, >>> 'sample_pct': Percentage of samples used in computing the MP, >>> 'data': { >>> 'ts': Time series data, >>> 'query': Query data if supplied >>> } >>> 'class': "MatrixProfile" >>> 'algorithm': "stomp_based_mstamp" >>> } Raises ------ ValueError If window_size < 4. If window_size > time series length / 2. If ts is not a list or np.array. """<line_sep>query=ts<line_sep># data conversion to np.array ts=core.to_np_array(ts)<line_sep>query=core.to_np_array(query)<if_stmt>window_size<l>4<block_start>error="window size must be at least 4."<line_sep><raise>ValueError(error)<block_end><if_stmt>ts.ndim<eq>1<block_start>ts=np.expand_dims(ts axis=0)<line_sep>query=np.expand_dims(query axis=0)<block_end><if_stmt>window_size<g>query.shape[1]/2<block_start>error="Time series is too short relative to desired window size"<line_sep><raise>ValueError(error)<block_end># multiprocessing or single threaded approach <if_stmt>n_jobs<eq>1<block_start><pass><block_end><else_stmt><block_start>n_jobs=core.valid_n_jobs(n_jobs)<block_end># precompute some common values - profile length, query length etc. profile_length=core.get_profile_length(ts query window_size)<line_sep>data_length=ts.shape[1]<line_sep>query_length=query.shape[1]<line_sep>num_queries=query_length-window_size+1<line_sep>exclusion_zone=int(np.ceil(window_size/2.0))<line_sep>num_dim=ts.shape[0]<line_sep># find skip locations, clean up nan and inf in the ts and query skip_locs=core.find_multid_skip_locations(ts profile_length window_size)<line_sep>ts=core.clean_nan_inf(ts)<line_sep>query=core.clean_nan_inf(query)<line_sep># initialize matrices matrix_profile=np.full((num_dim profile_length) np.inf)<line_sep>profile_index=np.full((num_dim profile_length) 0)<line_sep># profile_index = np.full((num_dim, profile_length), -1) # compute left and right matrix profile when similarity join does not happen left_matrix_profile=np.copy(matrix_profile)<line_sep>right_matrix_profile=np.copy(matrix_profile)<line_sep>left_profile_index=np.copy(profile_index)<line_sep>right_profile_index=np.copy(profile_index)<line_sep>profile_dimension=[]<if_stmt>return_dimension<block_start>n_jobs=1<for_stmt>i range(num_dim)<block_start>profile_dimension.append(np.empty((i+1 profile_length) dtype=int))<block_end><block_end># precompute some statistics on ts data_mu,data_sig,first_product=np.empty((num_dim profile_length)) np.empty((num_dim profile_length)) np.empty((num_dim profile_length))<for_stmt>i range(num_dim)<block_start>data_mu[i :],data_sig[i :]=core.moving_avg_std(ts[i :] window_size)<line_sep>first_window=query[i 0:window_size]<line_sep>first_product[i :]=core.fft_convolve(ts[i :] first_window)<block_end>batch_windows=[]<line_sep>results=[]<line_sep># batch compute with multiprocessing args=[]<for_stmt>start,end core.generate_batch_jobs(num_queries n_jobs)<block_start>args.append((num_dim start end ts query window_size data_length profile_length exclusion_zone data_mu data_sig first_product skip_locs profile_dimension return_dimension))<line_sep>batch_windows.append((start end))<block_end># we are running single threaded stomp - no need to initialize any # parallel environments. <if_stmt>n_jobs<eq>1<or>len(args)<eq>1<block_start>results.append(_batch_compute(args[0]))<block_end><else_stmt># parallelize <block_start><with_stmt>core.mp_pool()(n_jobs)<as>pool<block_start>results=pool.map(_batch_compute args)<block_end><block_end># now we combine the batch results <if_stmt>len(results)<eq>1<block_start>result=results[0]<line_sep>matrix_profile=result['mp']<line_sep>profile_index=result['pi']<line_sep>profile_dimension=result['pd']<line_sep>left_matrix_profile=result['lmp']<line_sep>left_profile_index=result['lpi']<line_sep>right_matrix_profile=result['rmp']<line_sep>right_profile_index=result['rpi']<block_end><else_stmt><block_start><for_stmt>index,result enumerate(results)<block_start>start=batch_windows[index][0]<line_sep>end=batch_windows[index][1]<line_sep># update the matrix profile indices=result['mp']<l>matrix_profile<line_sep>matrix_profile[indices]=result['mp'][indices]<line_sep>profile_index[indices]=result['pi'][indices]<line_sep># update the left and right matrix profiles indices=result['lmp']<l>left_matrix_profile<line_sep>left_matrix_profile[indices]=result['lmp'][indices]<line_sep>left_profile_index[indices]=result['lpi'][indices]<line_sep>indices=result['rmp']<l>right_matrix_profile<line_sep>right_matrix_profile[indices]=result['rmp'][indices]<line_sep>right_profile_index[indices]=result['rpi'][indices]<block_end><block_end><return>{'mp':matrix_profile 'pi':profile_index 'pd':profile_dimension 'rmp':right_matrix_profile 'rpi':right_profile_index 'lmp':left_matrix_profile 'lpi':left_profile_index 'metric':'euclidean' 'w':window_size 'ez':exclusion_zone 'sample_pct':1 'data':{'ts':ts 'query':query} 'class':"MatrixProfile" 'algorithm':"stomp_based_mstamp"}<block_end>
""" Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Convert sail sim log to standard riscv instruction trace format """<import_stmt>argparse<import_stmt>os<import_stmt>re<import_stmt>sys<import_stmt>logging<line_sep>sys.path.insert(0 os.path.dirname(os.path.realpath(__file__)))<import_from_stmt>riscv_trace_csv *<line_sep>START_RE=re.compile(r"\[4\] \[M\]: 0x.*00001010")<line_sep>END_RE=re.compile(r"ecall")<line_sep>INSTR_RE=re.compile(r"\[[0-9].*\] \[(?P<pri>.)\]: 0x(?P<addr>[A-F0-9]+?)"<concat>" \(0x(?P<bin>[A-F0-9]+?)\) (?P<instr>.+?$)")<line_sep>RD_RE=re.compile(r"x(?P<reg>[0-9]+?) <- 0x(?P<val>[A-F0-9]*)")<def_stmt>process_sail_sim_log sail_log csv<block_start>"""Process SAIL RISCV simulation log. Extract instruction and affected register information from sail simulation log and save to a list. """<line_sep>logging.info("Processing sail log : %s"%sail_log)<line_sep>instr_cnt=0<line_sep>sail_instr=""<with_stmt>open(sail_log "r")<as>f open(csv "w")<as>csv_fd<block_start>search_start=0<line_sep>instr_start=0<line_sep>trace_csv=RiscvInstructionTraceCsv(csv_fd)<line_sep>trace_csv.start_new_trace()<line_sep>instr=<none><for_stmt>line f# Extract instruction infromation <block_start>m=START_RE.search(line)<if_stmt>m<block_start>search_start=1<line_sep><continue><block_end>m=END_RE.search(line)<if_stmt>m<block_start><break><block_end><if_stmt>search_start<block_start>instr=INSTR_RE.search(line)<if_stmt>instr<block_start>instr_start=1<line_sep>pri=instr.group("pri")<line_sep>addr=instr.group("addr").lower()<line_sep>binary=instr.group("bin").lower()<line_sep>instr_str=instr.group("instr")<line_sep><continue><block_end><if_stmt>instr_start<block_start>m=RD_RE.search(line)<if_stmt>m# Write the extracted instruction to a csvcol buffer file <block_start>instr_cnt<augadd>1<line_sep>rv_instr_trace=RiscvInstructionTraceEntry()<line_sep>rv_instr_trace.rd=gpr_to_abi("x%0s"%m.group("reg"))<line_sep>rv_instr_trace.rd_val=m.group("val").lower()<line_sep>rv_instr_trace.privileged_mode=pri<line_sep>rv_instr_trace.addr=addr<line_sep>rv_instr_trace.binary=binary<line_sep>rv_instr_trace.instr_str=instr_str<line_sep>trace_csv.write_trace_entry(rv_instr_trace)<line_sep>instr_start=0<block_end><block_end><block_end><block_end><block_end>logging.info("Processed instruction count : %d"%instr_cnt)<block_end><def_stmt>main <block_start>instr_trace=[]<line_sep># Parse input arguments parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--log" type=str help="Input sail simulation log")<line_sep>parser.add_argument("--csv" type=str help="Output trace csv_buf file")<line_sep>args=parser.parse_args()<line_sep># Process sail log process_sail_sim_log(args.log args.csv)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
"""fauxmo.plugins :: Provide ABC for Fauxmo plugins."""<import_stmt>abc<import_from_stmt>typing Callable<class_stmt>FauxmoPlugin(abc.ABC)<block_start>"""Provide ABC for Fauxmo plugins. This will become the `plugin` attribute of a `Fauxmo` instance. Its `on` and `off` methods will be called when Alexa turns something `on` or `off`. All keys (other than the list of `DEVICES`) from the config will be passed into FauxmoPlugin as kwargs at initialization, which should let users do some interesting things. However, that means users employing custom config keys will need to override `__init__` and either set the `name` and "private" `_port` attributes manually or pass the appropriate args to `super().__init__()`. """<def_stmt>__init__ self * name:str port:int<arrow><none><block_start>"""Initialize FauxmoPlugin. Keyword Args: name: Required, device name port: Required, port that the Fauxmo associated with this plugin should run on Note about `port`: if not given in config, it will be set to an apparently free port in `fauxmo.fauxmo` before FauxmoPlugin initialization. This attribute serves no default purpose in the FauxmoPlugin but is passed in to be accessible by user code (i.e. for logging / debugging). Alternatively, one could accept and throw away the passed in `port` value and generate their own port in a plugin, since the Fauxmo device determines its port from the plugin's instance attribute. The `_latest_action` attribute stores the most recent successful action, which is set by the `__getattribute__` hackery for successful `.on()` and `.off()` commands. """<line_sep>self._name=name<line_sep>self._port=port<line_sep>self._latest_action="off"<block_end><def_stmt>__getattribute__ self name:str<arrow>Callable<block_start>"""Intercept `.on()` and `.off()` to set `_latest_action` attribute."""<if_stmt>name<in>["on" "off"]<block_start>success=object.__getattribute__(self name)()<if_stmt>success<is><true><block_start>self._latest_action=name<block_end><return><lambda>:success<block_end><else_stmt><block_start><return>object.__getattribute__(self name)<block_end><block_end>@property<def_stmt>port self<arrow>int<block_start>"""Return port attribute in read-only manner."""<line_sep><return>self._port<block_end>@property<def_stmt>name self<arrow>str<block_start>"""Return name attribute in read-only manner."""<line_sep><return>self._name<block_end>@abc.abstractmethod<def_stmt>on self<arrow>bool<block_start>"""Run function when Alexa turns this Fauxmo device on."""<line_sep><pass><block_end>@abc.abstractmethod<def_stmt>off self<arrow>bool<block_start>"""Run function when Alexa turns this Fauxmo device off."""<line_sep><pass><block_end>@abc.abstractmethod<def_stmt>get_state self<arrow>str<block_start>"""Run function when Alexa requests device state. Should return "on" or "off" if it can be determined, or "unknown" if there is no mechanism for determining the device state, in which case Alexa will complain that the device is not responding. If state cannot be determined, a plugin can opt into this implementation, which falls back on the `_latest_action` attribute. It is intentionally left as an abstract method so that plugins cannot omit a `get_state` method completely, which could lead to unexpected behavior; instead, they should explicitly `return super().get_state()`. """<line_sep><return>self.latest_action<block_end><def_stmt>close self<arrow><none><block_start>"""Run when shutting down; allows plugin to clean up state."""<line_sep><pass><block_end>@property<def_stmt>latest_action self<arrow>str<block_start>"""Return latest action in read-only manner. Must be a function instead of e.g. property because it overrides `get_state`, and therefore must be callable. """<line_sep><return>self._latest_action<block_end><def_stmt>__repr__ self<arrow>str<block_start>"""Provide a default human-readable representation of the plugin."""<line_sep>attrs=", ".join(f"{k}={v!r}"<for>k,v self.__dict__.items())<line_sep><return>f"{self.__class__.__name__}({attrs})"<block_end><block_end>
# encoding: utf-8 """Exceptions used by marrow.mailer to report common errors."""<line_sep>__all__=['MailException' 'MailConfigurationException' 'TransportException' 'TransportFailedException' 'MessageFailedException' 'TransportExhaustedException' 'ManagerException']<class_stmt>MailException(Exception)<block_start>"""The base for all marrow.mailer exceptions."""<line_sep><pass><block_end># Application Exceptions <class_stmt>DeliveryException(MailException)<block_start>"""The base class for all public-facing exceptions."""<line_sep><pass><block_end><class_stmt>DeliveryFailedException(DeliveryException)<block_start>"""The message stored in args[0] could not be delivered for the reason given in args[1]. (These can be accessed as e.msg and e.reason.)"""<def_stmt>__init__ self message reason<block_start>self.msg=message<line_sep>self.reason=reason<line_sep>super(DeliveryFailedException self).__init__(message reason)<block_end><block_end># Internal Exceptions <class_stmt>MailerNotRunning(MailException)<block_start>"""Raised when attempting to deliver messages using a dead interface."""<line_sep><pass><block_end><class_stmt>MailConfigurationException(MailException)<block_start>"""There was an error in the configuration of marrow.mailer."""<line_sep><pass><block_end><class_stmt>TransportException(MailException)<block_start>"""The base for all marrow.mailer Transport exceptions."""<line_sep><pass><block_end><class_stmt>TransportFailedException(TransportException)<block_start>"""The transport has failed to deliver the message due to an internal error; a new instance of the transport should be used to retry."""<line_sep><pass><block_end><class_stmt>MessageFailedException(TransportException)<block_start>"""The transport has failed to deliver the message due to a problem with the message itself, and no attempt should be made to retry delivery of this message. The transport may still be re-used, however. The reason for the failure should be the first argument. """<line_sep><pass><block_end><class_stmt>TransportExhaustedException(TransportException)<block_start>"""The transport has successfully delivered the message, but can no longer be used for future message delivery; a new instance should be used on the next request."""<line_sep><pass><block_end><class_stmt>ManagerException(MailException)<block_start>"""The base for all marrow.mailer Manager exceptions."""<line_sep><pass><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>torch<import_from_stmt>mmdeploy.codebase.mmdet.deploy clip_bboxes<def_stmt>distance2bbox points distance max_shape=<none><block_start>"""Rewrite `mmdet.core.bbox.transforms.distance2bbox` Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """<line_sep>x1=points[<ellipsis> 0]-distance[<ellipsis> 0]<line_sep>y1=points[<ellipsis> 1]-distance[<ellipsis> 1]<line_sep>x2=points[<ellipsis> 0]+distance[<ellipsis> 2]<line_sep>y2=points[<ellipsis> 1]+distance[<ellipsis> 3]<line_sep>bboxes=torch.stack([x1 y1 x2 y2] -1)<if_stmt>max_shape<is><not><none># clip bboxes with dynamic `min` and `max` <block_start>x1,y1,x2,y2=clip_bboxes(x1 y1 x2 y2 max_shape)<line_sep>bboxes=torch.stack([x1 y1 x2 y2] dim=-1)<line_sep><return>bboxes<block_end><return>bboxes<block_end>
"""All global configurations for this project."""<import_stmt>os<class_stmt>GlobalConfig<block_start>"""The global configuration of alpa."""<def_stmt>__init__ self########## Options of device mesh ########## <block_start>self.xla_client_mem_fraction=float(os.environ.get("XLA_PYTHON_CLIENT_MEM_FRACTION" 0.9))<line_sep>self.xla_gpu_autotune_level=4<line_sep>self.delete_remote_buffers_threshold=200<line_sep># use AWS EFA network interface self.use_aws_efa=os.environ.get("ALPA_USE_AWS_EFA" "").lower()<in>["true" "1"]<line_sep>########## Options of shard_parallel ########## self.shard_parallel_sync_for_timer=<false><line_sep>########## Options of pipeline_parallel ########## # Whether to debug with local runtime. The local runtime checks # correctness of stage construction and other graph level operations. self.debug_with_local_runtime=<false><line_sep># Whether to debug with pipeshard runtime. If turned on, no physical resource # is required until launching PipeshardExecutable. # TODO(yonghao): deprecate it later. self.debug_with_pipeshard_runtime=<false><line_sep># Whether to use the whole cluster for stage profiling. If not, only use the given mesh. self.profile_with_whole_ray_cluster=<true><line_sep># Stage construction profiling time threshold. self.profile_timeout=500<line_sep># Stage construction profiling retry threshold. # Some communication patterns may meet deadlock, so it needs retry. self.profile_maximum_retry=2<line_sep># Whether to forcely set stage construction's submesh choices self.overwrite_submesh_choices=<none><line_sep>########## Options of pipeline runtime ########## self.pipeline_check_alive=<true><line_sep># Whether to sync before and after the executable for accurate internal timer self.pipeline_sync_for_timer=<false><line_sep># Whether to use distributed compilation in pipeline parallel for # each stage. Disabling it helps debug. self.pipeline_distributed_compile=<true><line_sep>self.pipeline_use_signal_send_recv=<false><line_sep>self.precompile_resharding_tasks=<true><line_sep>self.use_scatter_gather=<true><line_sep>self.eagerly_create_communicators=<true><line_sep>self.use_memzero_for_gradient_accumulation=<false><line_sep># Cross mesh resharding mode. Possible choices: {"send_recv", "broadcast"} self.resharding_mode="send_recv"<line_sep>########## Options of XLA compilation ########## self.build_random_seed=42<line_sep># Whether to use xla while instruction for preventing CSE in rematerialization self.remat_using_while=<false><line_sep>########## Options of benchmark ########## # If true, the system is allowed to use dummy values during # tensor creation and copy to reduce the initialization and copy time. # This will produce wrong results but is acceptable for # data-independent benchmarks. self.use_dummy_value_for_benchmarking=<false><line_sep>########## Options of logging ########## self.print_compilation_time=<false><line_sep>########## Options of ray namespace ########## self.default_ray_namespace_prefix="alpa-train"<line_sep>self.unittest_ray_namespace_prefix="alpa-unittest"<block_end><block_end>global_config=GlobalConfig()<line_sep># Other environment setup is_worker=os.environ.get("ALPA_IS_WORKER" "False")<eq>"True"<line_sep>os.environ["XLA_FLAGS"]=os.environ.get("XLA_FLAGS" "")+" --xla_gpu_enable_async_all_reduce=false"<line_sep>
# -*- coding: utf-8 -*- """ flaskbb.core.exceptions ~~~~~~~~~~~~~~~~~~~~~~~ Exceptions raised by flaskbb.core, forms the root of all exceptions in FlaskBB. :copyright: (c) 2014-2018 the FlaskBB Team :license: BSD, see LICENSE for more details """<class_stmt>BaseFlaskBBError(Exception)<block_start>""" Root exception for FlaskBB. """<block_end><class_stmt>ValidationError(BaseFlaskBBError)<block_start>""" Used to signal validation errors for things such as token verification, user registration, etc. :param str attribute: The attribute the validation error applies to, if the validation error applies to multiple attributes or to the entire object, this should be set to None :param str reason: Why the attribute, collection of attributes or object is invalid. """<def_stmt>__init__ self attribute reason<block_start>self.attribute=attribute<line_sep>self.reason=reason<line_sep>super(ValidationError self).__init__((attribute reason))<block_end><block_end><class_stmt>StopValidation(BaseFlaskBBError)<block_start>""" Raised from validation handlers to signal that validation should end immediately and no further processing should be done. Can also be used to communicate all errors raised during a validation run. :param reasons: A sequence of `(attribute, reason)` pairs explaining why the object is invalid. """<def_stmt>__init__ self reasons<block_start>self.reasons=reasons<line_sep>super(StopValidation self).__init__(reasons)<block_end><block_end><class_stmt>PersistenceError(BaseFlaskBBError)<block_start>""" Used to catch down errors when persisting models to the database instead of letting all issues percolate up, this should be raised from those exceptions without smashing their tracebacks. Example:: try: db.session.add(new_user) db.session.commit() except Exception: raise PersistenceError("Couldn't save user account") """<block_end><def_stmt>accumulate_errors caller validators throw=<true><block_start>errors=[]<for_stmt>validator validators<block_start><try_stmt><block_start>caller(validator)<block_end><except_stmt>ValidationError<as>e<block_start>errors.append((e.attribute e.reason))<block_end><block_end><if_stmt>len(errors)<and>throw<block_start><raise>StopValidation(errors)<block_end><return>errors<block_end>
# This code is based on ufoProcessor code, which is licensed as follows: # Copyright (c) 2017-2018 LettError and <NAME> # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """Module for generating static font instances. It is an alternative to mutatorMath (used internally by fontmake) and ufoProcessor. The aim is to be a minimal implementation that is focussed on using ufoLib2 for font data abstraction, varLib for instance computation and fontMath as a font data shell for instance computation directly and exclusively. At the time of this writing, varLib lacks support for anisotropic (x, y) locations and extrapolation. """<import_stmt>copy<import_stmt>logging<import_stmt>typing<import_from_stmt>typing Any Dict List Mapping Set Tuple Union<import_stmt>attr<import_stmt>fontMath<import_stmt>fontTools.designspaceLib<as>designspaceLib<import_stmt>fontTools.misc.fixedTools<import_stmt>fontTools.varLib<as>varLib<import_stmt>ufoLib2<line_sep>logger=logging.getLogger(__name__)<line_sep># Use the same rounding function used by varLib to round things for the variable font # to reduce differences between the variable and static instances. fontMath.mathFunctions.setRoundIntegerFunction(fontTools.misc.fixedTools.otRound)<line_sep># Stand-in type for any of the fontMath classes we use. FontMathObject=Union[fontMath.MathGlyph fontMath.MathInfo fontMath.MathKerning]<line_sep># MutatorMath-style location mapping type, i.e. # `{"wght": 1.0, "wdth": 0.0, "bleep": 0.5}`. # LocationKey is a Location turned into a tuple so we can use it as a dict key. Location=Mapping[str float]<line_sep>LocationKey=Tuple[Tuple[str float] <ellipsis>]<line_sep># Type of mapping of axes to their minimum, default and maximum values, i.e. # `{"wght": (100.0, 400.0, 900.0), "wdth": (75.0, 100.0, 100.0)}`. AxisBounds=Dict[str Tuple[float float float]]<line_sep># For mapping `wdth` axis user values to the OS2 table's width class field. WDTH_VALUE_TO_OS2_WIDTH_CLASS={50:1 62.5:2 75:3 87.5:4 100:5 112.5:6 125:7 150:8 200:9 }<line_sep># Font info fields that are not interpolated and should be copied from the # default font to the instance. # # fontMath at the time of this writing handles the following attributes: # https://github.com/robotools/fontMath/blob/0.5.0/Lib/fontMath/mathInfo.py#L360-L422 # # From the attributes that are left, we skip instance-specific ones on purpose: # - guidelines # - postscriptFontName # - styleMapFamilyName # - styleMapStyleName # - styleName # - openTypeNameCompatibleFullName # - openTypeNamePreferredFamilyName # - openTypeNamePreferredSubfamilyName # - openTypeNameUniqueID # - openTypeNameWWSFamilyName # - openTypeNameWWSSubfamilyName # - openTypeOS2Panose # - postscriptFullName # - postscriptUniqueID # - woffMetadataUniqueID # # Some, we skip because they are deprecated: # - macintoshFONDFamilyID # - macintoshFONDName # - year # # This means we implicitly require the `stylename` attribute in the Designspace # `<instance>` element. UFO_INFO_ATTRIBUTES_TO_COPY_TO_INSTANCES={"copyright" "familyName" "note" "openTypeGaspRangeRecords" "openTypeHeadCreated" "openTypeHeadFlags" "openTypeNameDescription" "openTypeNameDesigner" "openTypeNameDesignerURL" "openTypeNameLicense" "openTypeNameLicenseURL" "openTypeNameManufacturer" "openTypeNameManufacturerURL" "openTypeNameRecords" "openTypeNameSampleText" "openTypeNameVersion" "openTypeOS2CodePageRanges" "openTypeOS2FamilyClass" "openTypeOS2Selection" "openTypeOS2Type" "openTypeOS2UnicodeRanges" "openTypeOS2VendorID" "postscriptDefaultCharacter" "postscriptForceBold" "postscriptIsFixedPitch" "postscriptWindowsCharacterSet" "trademark" "versionMajor" "versionMinor" "woffMajorVersion" "woffMetadataCopyright" "woffMetadataCredits" "woffMetadataDescription" "woffMetadataExtensions" "woffMetadataLicense" "woffMetadataLicensee" "woffMetadataTrademark" "woffMetadataVendor" "woffMinorVersion" }<line_sep># Custom exception for this module <class_stmt>InstantiatorError(Exception)<block_start><pass><block_end><def_stmt>process_rules_swaps rules location glyphNames<block_start>"""Apply these rules at this location to these glyphnames - rule order matters Return a list of (oldName, newName) in the same order as the rules. """<line_sep>swaps=[]<line_sep>glyphNames=set(glyphNames)<for_stmt>rule rules<block_start><if_stmt>designspaceLib.evaluateRule(rule location)<block_start><for_stmt>oldName,newName rule.subs# Here I don't check if the new name is also in glyphNames... # I guess it should be, so that we can swap, and if it isn't, # then it's better to error out later when we try to swap, # instead of silently ignoring the rule here. <block_start><if_stmt>oldName<in>glyphNames<block_start>swaps.append((oldName newName))<block_end><block_end><block_end><block_end><return>swaps<block_end>@attr.s(auto_attribs=<true> frozen=<true> slots=<true>)<class_stmt>Instantiator<block_start>"""Data class that holds all necessary information to generate a static font instance object at an arbitary location within the design space."""<line_sep>axis_bounds:AxisBounds# Design space! copy_feature_text:str<line_sep>copy_nonkerning_groups:Mapping[str List[str]]<line_sep>copy_info:ufoLib2.objects.Info<line_sep>copy_lib:Mapping[str Any]<line_sep>default_design_location:Location<line_sep>designspace_rules:List[designspaceLib.RuleDescriptor]<line_sep>glyph_mutators:Mapping[str "Variator"]<line_sep>glyph_name_to_unicodes:Dict[str List[int]]<line_sep>info_mutator:"Variator"<line_sep>kerning_mutator:"Variator"<line_sep>round_geometry:bool<line_sep>skip_export_glyphs:List[str]<line_sep>special_axes:Mapping[str designspaceLib.AxisDescriptor]<line_sep>@classmethod<def_stmt>from_designspace cls designspace:designspaceLib.DesignSpaceDocument round_geometry:bool=<true> <block_start>"""Instantiates a new data class from a Designspace object."""<if_stmt>designspace.default<is><none><block_start><raise>InstantiatorError(_error_msg_no_default(designspace))<block_end><if_stmt>any(hasattr(axis "values")<for>axis designspace.axes)<block_start><raise>InstantiatorError("The given designspace has one or more discrete (= non-interpolating) "<concat>"axes. You should split this designspace into smaller interpolating "<concat>"spaces and use the Instantiator on each. See the method "<concat>"`fontTools.designspaceLib.split.splitInterpolable()`")<block_end><if_stmt>any(anisotropic(instance.location)<for>instance designspace.instances)<block_start><raise>InstantiatorError("The Designspace contains anisotropic instance locations, which are "<concat>"not supported by varLib. Look for and remove all 'yvalue=\"...\"' or "<concat>"use MutatorMath instead.")<block_end>designspace.loadSourceFonts(ufoLib2.Font.open)<line_sep># The default font (default layer) determines which glyphs are interpolated, # because the math behind varLib and MutatorMath uses the default font as the # point of reference for all data. default_font=designspace.default.font<line_sep>glyph_names:Set[str]=set(default_font.keys())<for_stmt>source designspace.sources<block_start>other_names=set(source.font.keys())<line_sep>diff_names=other_names-glyph_names<if_stmt>diff_names<block_start>logger.warning("The source %s (%s) contains glyphs that are missing from the "<concat>"default source, which will be ignored: %s. If this is unintended, "<concat>"check that these glyphs have the exact same name as the "<concat>"corresponding glyphs in the default source." source.name source.filename ", ".join(sorted(diff_names)) )<block_end><block_end># Construct Variators axis_bounds:AxisBounds={}# Design space! axis_order:List[str]=[]<line_sep>special_axes={}<for_stmt>axis designspace.axes<block_start>axis_order.append(axis.name)<line_sep>axis_bounds[axis.name]=(axis.map_forward(axis.minimum) axis.map_forward(axis.default) axis.map_forward(axis.maximum) )<line_sep># Some axes relate to existing OpenType fields and get special attention. <if_stmt>axis.tag<in>{"wght" "wdth" "slnt"}<block_start>special_axes[axis.tag]=axis<block_end><block_end>masters_info=collect_info_masters(designspace axis_bounds)<try_stmt><block_start>info_mutator=Variator.from_masters(masters_info axis_order)<block_end><except_stmt>varLib.errors.VarLibError<as>e<block_start><raise>InstantiatorError(f"Cannot set up fontinfo for interpolation: {e}'")<from>e<block_end>masters_kerning=collect_kerning_masters(designspace axis_bounds)<try_stmt><block_start>kerning_mutator=Variator.from_masters(masters_kerning axis_order)<block_end><except_stmt>varLib.errors.VarLibError<as>e<block_start><raise>InstantiatorError(f"Cannot set up kerning for interpolation: {e}'")<from>e<block_end>glyph_mutators:Dict[str Variator]={}<line_sep>glyph_name_to_unicodes:Dict[str List[int]]={}<for_stmt>glyph_name glyph_names<block_start>items=collect_glyph_masters(designspace glyph_name axis_bounds)<try_stmt><block_start>glyph_mutators[glyph_name]=Variator.from_masters(items axis_order)<block_end><except_stmt>varLib.errors.VarLibError<as>e<block_start><raise>InstantiatorError(f"Cannot set up glyph '{glyph_name}' for interpolation: {e}'")<from>e<block_end>glyph_name_to_unicodes[glyph_name]=default_font[glyph_name].unicodes<block_end># Construct defaults to copy over copy_feature_text:str=default_font.features.text<line_sep>copy_nonkerning_groups:Mapping[str List[str]]={key:glyph_names<for>key,glyph_names default_font.groups.items()<if><not>key.startswith(("public.kern1." "public.kern2."))}<line_sep># Kerning groups are taken care of by the kerning Variator. copy_info:ufoLib2.objects.Info=default_font.info<line_sep>copy_lib:Mapping[str Any]=default_font.lib<line_sep># The list of glyphs-not-to-export-and-decompose-where-used-as-a-component is # supposed to be taken from the Designspace when a Designspace is used as the # starting point of the compilation process. It should be exported to all # instance libs, where the ufo2ft compilation functions will pick it up. skip_export_glyphs=designspace.lib.get("public.skipExportGlyphs" [])<line_sep><return>cls(axis_bounds copy_feature_text copy_nonkerning_groups copy_info copy_lib designspace.default.location designspace.rules glyph_mutators glyph_name_to_unicodes info_mutator kerning_mutator round_geometry skip_export_glyphs special_axes )<block_end><def_stmt>generate_instance self instance:designspaceLib.InstanceDescriptor<arrow>ufoLib2.Font<block_start>"""Generate an interpolated instance font object for an InstanceDescriptor."""<if_stmt>anisotropic(instance.location)<block_start><raise>InstantiatorError(f"Instance {instance.familyName}-"<concat>f"{instance.styleName}: Anisotropic location "<concat>f"{instance.location} not supported by varLib.")<block_end>font=ufoLib2.Font()<line_sep># Instances may leave out locations that match the default source, so merge # default location with the instance's location. location={**self.default_design_location **instance.location}<line_sep>location_normalized=varLib.models.normalizeLocation(location self.axis_bounds)<line_sep># Kerning kerning_instance=self.kerning_mutator.instance_at(location_normalized)<if_stmt>self.round_geometry<block_start>kerning_instance.round()<block_end>kerning_instance.extractKerning(font)<line_sep># Info self._generate_instance_info(instance location_normalized location font)<line_sep># Non-kerning groups. Kerning groups have been taken care of by the kerning # instance. <for_stmt>key,glyph_names self.copy_nonkerning_groups.items()<block_start>font.groups[key]=[name<for>name glyph_names]<block_end># Features font.features.text=self.copy_feature_text<line_sep># Lib # 1. Copy the default lib to the instance. font.lib=typing.cast(dict copy.deepcopy(self.copy_lib))<line_sep># 2. Copy the Designspace's skipExportGlyphs list over to the UFO to # make sure it wins over the default UFO one. font.lib["public.skipExportGlyphs"]=[name<for>name self.skip_export_glyphs]<line_sep># 3. Write _design_ location to instance's lib. font.lib["designspace.location"]=[loc<for>loc location.items()]<line_sep># Glyphs <for_stmt>glyph_name,glyph_mutator self.glyph_mutators.items()<block_start>glyph=font.newGlyph(glyph_name)<try_stmt><block_start>glyph_instance=glyph_mutator.instance_at(location_normalized)<if_stmt>self.round_geometry<block_start>glyph_instance=glyph_instance.round()<block_end># onlyGeometry=True does not set name and unicodes, in ufoLib2 we can't # modify a glyph's name. Copy unicodes from default font. glyph_instance.extractGlyph(glyph onlyGeometry=<true>)<block_end><except_stmt>Exception<as>e# TODO: Figure out what exceptions fontMath/varLib can throw. # By default, explode if we cannot generate a glyph instance for # whatever reason (usually outline incompatibility)... <block_start><if_stmt>glyph_name<not><in>self.skip_export_glyphs<block_start><raise>InstantiatorError(f"Failed to generate instance of glyph '{glyph_name}': "<concat>f"{str(e)}. (Note: the most common cause for an error here is "<concat>"that the glyph outlines are not point-for-point compatible or "<concat>"have the same starting point or are in the same order in all "<concat>"masters.)")<from>e<block_end># ...except if the glyph is in public.skipExportGlyphs and would # therefore be removed from the compiled font anyway. There's not much # we can do except leave it empty in the instance and tell the user. logger.warning("Failed to generate instance of glyph '%s', which is marked as "<concat>"non-exportable. Glyph will be left empty. Failure reason: %s" glyph_name e )<block_end>glyph.unicodes=[uv<for>uv self.glyph_name_to_unicodes[glyph_name]]<block_end># Process rules glyph_names_list=self.glyph_mutators.keys()<line_sep># The order of the swaps below is independent of the order of glyph names. # It depends on the order of the <sub>s in the designspace rules. swaps=process_rules_swaps(self.designspace_rules location glyph_names_list)<for_stmt>name_old,name_new swaps<block_start><if_stmt>name_old<ne>name_new<block_start>swap_glyph_names(font name_old name_new)<block_end><block_end><return>font<block_end><def_stmt>_generate_instance_info self instance:designspaceLib.InstanceDescriptor location_normalized:Location location:Location font:ufoLib2.Font <arrow><none><block_start>"""Generate fontinfo related attributes. Separate, as fontinfo treatment is more extensive than the rest. """<line_sep>info_instance=self.info_mutator.instance_at(location_normalized)<if_stmt>self.round_geometry<block_start>info_instance=info_instance.round()<block_end>info_instance.extractInfo(font.info)<line_sep># Copy non-interpolating metadata from the default font. <for_stmt>attribute UFO_INFO_ATTRIBUTES_TO_COPY_TO_INSTANCES<block_start><if_stmt>hasattr(self.copy_info attribute)<block_start>setattr(font.info attribute copy.deepcopy(getattr(self.copy_info attribute)) )<block_end><block_end># TODO: multilingual names to replace possibly existing name records. <if_stmt>instance.familyName<block_start>font.info.familyName=instance.familyName<block_end><if_stmt>instance.styleName<is><none><block_start>logger.warning("The given instance or instance at location %s is missing the "<concat>"stylename attribute, which is required. Copying over the styleName "<concat>"from the default font, which is probably wrong." location )<line_sep>font.info.styleName=self.copy_info.styleName<block_end><else_stmt><block_start>font.info.styleName=instance.styleName<block_end><if_stmt>instance.postScriptFontName<block_start>font.info.postscriptFontName=instance.postScriptFontName<block_end><if_stmt>instance.styleMapFamilyName<block_start>font.info.styleMapFamilyName=instance.styleMapFamilyName<block_end><if_stmt>instance.styleMapStyleName<block_start>font.info.styleMapStyleName=instance.styleMapStyleName<block_end># If the masters haven't set the OS/2 weight and width class, use the # user-space values ("input") of the axis mapping in the Designspace file for # weight and width axes, if they exist. The slnt axis' value maps 1:1 to # italicAngle. Clamp the values to the valid ranges. <if_stmt>info_instance.openTypeOS2WeightClass<is><none><and>"wght"<in>self.special_axes<block_start>weight_axis=self.special_axes["wght"]<line_sep>font.info.openTypeOS2WeightClass=weight_class_from_wght_value(weight_axis.map_backward(location[weight_axis.name]))<block_end><if_stmt>info_instance.openTypeOS2WidthClass<is><none><and>"wdth"<in>self.special_axes<block_start>width_axis=self.special_axes["wdth"]<line_sep>font.info.openTypeOS2WidthClass=width_class_from_wdth_value(width_axis.map_backward(location[width_axis.name]))<block_end><if_stmt>info_instance.italicAngle<is><none><and>"slnt"<in>self.special_axes<block_start>slant_axis=self.special_axes["slnt"]<line_sep>font.info.italicAngle=italic_angle_from_slnt_value(slant_axis.map_backward(location[slant_axis.name]))<block_end><block_end><block_end><def_stmt>_error_msg_no_default designspace:designspaceLib.DesignSpaceDocument<arrow>str<block_start><if_stmt>any(axis.map<for>axis designspace.axes)<block_start>bonus_msg=("For axes with a mapping, the 'default' values should have an "<concat>"'input=\"...\"' map value, where the corresponding 'output=\"...\"' "<concat>"value then points to the master source.")<block_end><else_stmt><block_start>bonus_msg=""<block_end>default_location=", ".join(f"{k}: {v}"<for>k,v designspace.newDefaultLocation().items())<line_sep><return>("Can't generate UFOs from this Designspace because there is no default "<concat>f"master source at location '{default_location}'. Check that all 'default' "<concat>"values of all axes together point to a single actual master source. "<concat>f"{bonus_msg}")<block_end><def_stmt>location_to_key location:Location<arrow>LocationKey<block_start>"""Converts a Location into a sorted tuple so it can be used as a dict key."""<line_sep><return>tuple(sorted(location.items()))<block_end><def_stmt>anisotropic location:Location<arrow>bool<block_start>"""Tests if any single location value is a MutatorMath-style anisotropic value, i.e. is a tuple of (x, y)."""<line_sep><return>any(isinstance(v tuple)<for>v location.values())<block_end><def_stmt>collect_info_masters designspace:designspaceLib.DesignSpaceDocument axis_bounds:AxisBounds<arrow>List[Tuple[Location FontMathObject]]<block_start>"""Return master Info objects wrapped by MathInfo."""<line_sep>locations_and_masters=[]<for_stmt>source designspace.sources<block_start><if_stmt>source.layerName<is><not><none><block_start><continue><block_end># No font info in source layers. normalized_location=varLib.models.normalizeLocation(source.location axis_bounds)<line_sep>locations_and_masters.append((normalized_location fontMath.MathInfo(source.font.info)))<block_end><return>locations_and_masters<block_end><def_stmt>collect_kerning_masters designspace:designspaceLib.DesignSpaceDocument axis_bounds:AxisBounds<arrow>List[Tuple[Location FontMathObject]]<block_start>"""Return master kerning objects wrapped by MathKerning."""<line_sep># Always take the groups from the default source. This also avoids fontMath # making a union of all groups it is given. groups=designspace.default.font.groups<line_sep>locations_and_masters=[]<for_stmt>source designspace.sources<block_start><if_stmt>source.layerName<is><not><none><block_start><continue><block_end># No kerning in source layers. # If a source has groups, they should match the default's. <if_stmt>source.font.groups<and>source.font.groups<ne>groups<block_start>logger.warning("The source %s (%s) contains different groups than the default source. "<concat>"The default source's groups will be used for the instances." source.name source.filename )<block_end># This assumes that groups of all sources are the same. normalized_location=varLib.models.normalizeLocation(source.location axis_bounds)<line_sep>locations_and_masters.append((normalized_location fontMath.MathKerning(source.font.kerning groups)))<block_end><return>locations_and_masters<block_end><def_stmt>collect_glyph_masters designspace:designspaceLib.DesignSpaceDocument glyph_name:str axis_bounds:AxisBounds <arrow>List[Tuple[Location FontMathObject]]<block_start>"""Return master glyph objects for glyph_name wrapped by MathGlyph. Note: skips empty source glyphs if the default glyph is not empty to almost match what ufoProcessor is doing. In e.g. Mutator Sans, the 'S.closed' glyph is left empty in one source layer. One could treat this as a source error, but ufoProcessor specifically has code to skip that empty glyph and carry on. """<line_sep>locations_and_masters=[]<line_sep>default_glyph_empty=<false><line_sep>other_glyph_empty=<false><for_stmt>source designspace.sources<block_start><if_stmt>source.layerName<is><none># Source font. <block_start>source_layer=source.font.layers.defaultLayer<block_end><else_stmt># Source layer. <block_start>source_layer=source.font.layers[source.layerName]<block_end># Sparse fonts do not and layers may not contain every glyph. <if_stmt>glyph_name<not><in>source_layer<block_start><continue><block_end>source_glyph=source_layer[glyph_name]<if_stmt><not>(source_glyph.contours<or>source_glyph.components)<block_start><if_stmt>source<is>designspace.findDefault()<block_start>default_glyph_empty=<true><block_end><else_stmt><block_start>other_glyph_empty=<true><block_end><block_end>normalized_location=varLib.models.normalizeLocation(source.location axis_bounds)<line_sep>locations_and_masters.append((normalized_location fontMath.MathGlyph(source_glyph)))<block_end># Filter out empty glyphs if the default glyph is not empty. <if_stmt><not>default_glyph_empty<and>other_glyph_empty<block_start>locations_and_masters=[(loc master)<for>loc,master locations_and_masters<if>master.contours<or>master.components]<block_end><return>locations_and_masters<block_end><def_stmt>width_class_from_wdth_value wdth_user_value<arrow>int<block_start>"""Return the OS/2 width class from the wdth axis user value. The OpenType 1.8.3 specification states: When mapping from 'wdth' values to usWidthClass, interpolate fractional values between the mapped values and then round, and clamp to the range 1 to 9. "Mapped values" probably means the in-percent numbers layed out for the OS/2 width class, so we are forcing these numerical semantics on the user values of the wdth axis. """<line_sep>width_user_value=min(max(wdth_user_value 50) 200)<line_sep>width_user_value_mapped=varLib.models.piecewiseLinearMap(width_user_value WDTH_VALUE_TO_OS2_WIDTH_CLASS)<line_sep><return>fontTools.misc.fixedTools.otRound(width_user_value_mapped)<block_end><def_stmt>weight_class_from_wght_value wght_user_value<arrow>int<block_start>"""Return the OS/2 weight class from the wght axis user value."""<line_sep>weight_user_value=min(max(wght_user_value 1) 1000)<line_sep><return>fontTools.misc.fixedTools.otRound(weight_user_value)<block_end><def_stmt>italic_angle_from_slnt_value slnt_user_value<arrow>Union[int float]<block_start>"""Return the italic angle from the slnt axis user value."""<line_sep>slant_user_value=min(max(slnt_user_value -90) 90)<line_sep><return>slant_user_value<block_end><def_stmt>swap_glyph_names font:ufoLib2.Font name_old:str name_new:str<block_start>"""Swap two existing glyphs in the default layer of a font (outlines, width, component references, kerning references, group membership). The idea behind swapping instead of overwriting is explained in https://github.com/fonttools/fonttools/tree/main/Doc/source/designspaceLib#ufo-instances. We need to keep the old glyph around in case any other glyph references it; glyphs that are not explicitly substituted by rules should not be affected by the rule application. The .unicodes are not swapped. The rules mechanism is supposed to swap glyphs, not characters. """<if_stmt>name_old<not><in>font<or>name_new<not><in>font<block_start><raise>InstantiatorError(f"Cannot swap glyphs '{name_old}' and '{name_new}', as either or both are "<concat>"missing.")<block_end># 1. Swap outlines and glyph width. Ignore lib content and other properties. glyph_swap=ufoLib2.objects.Glyph(name="temporary_swap_glyph")<line_sep>glyph_old=font[name_old]<line_sep>glyph_new=font[name_new]<line_sep>p=glyph_swap.getPointPen()<line_sep>glyph_old.drawPoints(p)<line_sep>glyph_swap.width=glyph_old.width<line_sep>glyph_old.clearContours()<line_sep>glyph_old.clearComponents()<line_sep>p=glyph_old.getPointPen()<line_sep>glyph_new.drawPoints(p)<line_sep>glyph_old.width=glyph_new.width<line_sep>glyph_new.clearContours()<line_sep>glyph_new.clearComponents()<line_sep>p=glyph_new.getPointPen()<line_sep>glyph_swap.drawPoints(p)<line_sep>glyph_new.width=glyph_swap.width<line_sep># 2. Swap anchors. glyph_swap.anchors=glyph_old.anchors<line_sep>glyph_old.anchors=glyph_new.anchors<line_sep>glyph_new.anchors=glyph_swap.anchors<line_sep># 3. Remap components. <for_stmt>g font<block_start><for_stmt>c g.components<block_start><if_stmt>c.baseGlyph<eq>name_old<block_start>c.baseGlyph=name_new<block_end><elif_stmt>c.baseGlyph<eq>name_new<block_start>c.baseGlyph=name_old<block_end><block_end><block_end># 4. Swap literal names in kerning. kerning_new={}<for_stmt>first,second font.kerning.keys()<block_start>value=font.kerning[(first second)]<if_stmt>first<eq>name_old<block_start>first=name_new<block_end><elif_stmt>first<eq>name_new<block_start>first=name_old<block_end><if_stmt>second<eq>name_old<block_start>second=name_new<block_end><elif_stmt>second<eq>name_new<block_start>second=name_old<block_end>kerning_new[(first second)]=value<block_end>font.kerning=kerning_new<line_sep># 5. Swap names in groups. <for_stmt>group_name,group_members font.groups.items()<block_start>group_members_new=[]<for_stmt>name group_members<block_start><if_stmt>name<eq>name_old<block_start>group_members_new.append(name_new)<block_end><elif_stmt>name<eq>name_new<block_start>group_members_new.append(name_old)<block_end><else_stmt><block_start>group_members_new.append(name)<block_end><block_end>font.groups[group_name]=group_members_new<block_end><block_end>@attr.s(auto_attribs=<true> frozen=<true> slots=<true>)<class_stmt>Variator<block_start>"""A middle-man class that ingests a mapping of normalized locations to masters plus axis definitions and uses varLib to spit out interpolated instances at specified normalized locations. fontMath objects stand in for the actual master objects from the UFO. Upon generating an instance, these objects have to be extracted into an actual UFO object. """<line_sep>masters:List[FontMathObject]<line_sep>location_to_master:Mapping[LocationKey FontMathObject]<line_sep>model:varLib.models.VariationModel<line_sep>@classmethod<def_stmt>from_masters cls items:List[Tuple[Location FontMathObject]] axis_order:List[str]<block_start>masters=[]<line_sep>master_locations=[]<line_sep>location_to_master={}<for_stmt>normalized_location,master items<block_start>master_locations.append(normalized_location)<line_sep>masters.append(master)<line_sep>location_to_master[location_to_key(normalized_location)]=master<block_end>model=varLib.models.VariationModel(master_locations axis_order)<line_sep><return>cls(masters location_to_master model)<block_end><def_stmt>instance_at self normalized_location:Location<arrow>FontMathObject<block_start>"""Return a FontMathObject for the specified location ready to be inflated. If an instance location matches a master location, this method returns the master data instead of running through varLib. This is both an optimization _and_ it enables having a Designspace with instances matching their masters without requiring them to be compatible. Glyphs.app works this way; it will only generate a font from an instance, but compatibility is only required if there is actual interpolation to be done. This enables us to store incompatible bare masters in one Designspace and having arbitrary instance data applied to them. """<line_sep>normalized_location_key=location_to_key(normalized_location)<if_stmt>normalized_location_key<in>self.location_to_master<block_start><return>copy.deepcopy(self.location_to_master[normalized_location_key])<block_end><return>self.model.interpolateFromMasters(normalized_location self.masters)<block_end><block_end>
<import_stmt>os<line_sep>train_src="../dynet_nmt/data/train.de-en.de.wmixerprep"<line_sep>train_tgt="../dynet_nmt/data/train.de-en.en.wmixerprep"<line_sep>dev_src="../dynet_nmt/data/valid.de-en.de"<line_sep>dev_tgt="../dynet_nmt/data/valid.de-en.en"<line_sep>test_src="../dynet_nmt/data/test.de-en.de"<line_sep>test_tgt="../dynet_nmt/data/test.de-en.en"<for_stmt>temp [0.5]<block_start>job_name='iwslt14.raml.corrupt_ngram.t%.3f'%temp<line_sep>train_log='train.'+job_name+'.log'<line_sep>model_name='model.'+job_name<line_sep>decode_file='iwslt14.test.en.raml.corrupt_ngram.t%.3f'%temp<line_sep>job_file='scripts/train.%s.sh'%job_name<with_stmt>open(job_file 'w')<as>f<block_start>f.write("""#!/bin/sh python nmt.py \ --cuda \ --mode test \ --load_model models/{model_name}.bin \ --beam_size 5 \ --decode_max_time_step 100 \ --save_to_file decode/{decode_file} \ --test_src {test_src} \ --test_tgt {test_tgt} echo "test result" >> logs/{train_log} perl multi-bleu.perl {test_tgt} < decode/{decode_file} >> logs/{train_log} """.format(model_name=model_name temp=temp train_src=train_src train_tgt=train_tgt dev_src=dev_src dev_tgt=dev_tgt test_src=test_src test_tgt=test_tgt train_log=train_log decode_file=decode_file))<block_end>os.system('bash submit_job.sh %s'%job_file)<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making GameAISDK available. This source code file is licensed under the GNU General Public License Version 3. For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package. Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. """<import_stmt>os<import_stmt>sys<import_stmt>logging<import_from_stmt>logging.handlers RotatingFileHandler<import_stmt>cv2<import_from_stmt>.androidDeviceAPI AndroidDeviceAPI<import_from_stmt>.APIDefine LOG_DEBUG LOG_DEFAULT TOUCH_CMD_LIST DEVICE_CMD_LIST TOUCH_KEY TOUCH_CLICK TOUCH_UP TOUCH_MOVE TOUCH_DOWN TOUCH_SWIPE TOUCH_SWIPEMOVE TOUCH_RESET DEVICE_KEY DEVICE_CLICK DEVICE_CLEARAPP DEVICE_CURAPP DEVICE_EXIT DEVICE_INSTALL DEVICE_START DEVICE_TEXT DEVICE_SCREENORI DEVICE_SCREENSHOT DEVICE_MAXCONTACT DEVICE_PARAM DEVICE_SLEEP DEVICE_SWIPE DEVICE_WAKE DEVICE_WMSIZE LOG_LIST LOG_FORMAT<import_from_stmt>...iDevice IDevice<line_sep>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.append(cur_dir)<line_sep>PP_RET_OK=0<class_stmt>AndroidDevice(IDevice)<block_start><def_stmt>__init__ self platform_type<block_start>super(AndroidDevice self).__init__(platform_type)<line_sep>self.__deviceApi=AndroidDeviceAPI(platform_type)<line_sep>self.__height=-1<line_sep>self.__width=-1<line_sep>self.__pid=os.getpid()<line_sep>self.__serial='*'<line_sep>self.__showScreen=<false><line_sep>self.__maxContact=10<line_sep>self.__logger=<none><block_end><def_stmt>initialize self log_dir **kwargs<block_start>""" :param device_serial: str, 手机序列号,默认为None,当接入一个设备时可不指定序列号,当接入多个设备时需要指定 :param long_edge: int, 长边的长度 :param log_dir: str, 日志存放目录 :param level: enum, 指定日志级别, 取值为[LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_CRITICAL],默认为LOG_DEBUG :param show_raw_screen: bool, 是否显示手机图片 :param kwargs: dict, 一些组件需要的参数,可以自己定义,例如端口号等等 """<line_sep>level=kwargs.pop('level')<if>'level'<in>kwargs<else>logging.DEBUG<line_sep>long_edge=kwargs.pop('long_edge')<if>'long_edge'<in>kwargs<else>1280<line_sep>device_serial=kwargs.pop('device_serial')<if>'device_serial'<in>kwargs<else><none><line_sep>show_raw_screen=kwargs.pop('show_raw_screen')<if>'show_raw_screen'<in>kwargs<else><false><if_stmt>device_serial<is><not><none><block_start>log_dir=os.path.join(log_dir device_serial.replace(':' "_"))+os.path.sep<line_sep>self.__serial=device_serial<if_stmt><not>self._LogInit(log_dir level device_serial)<block_start><raise>RuntimeError("init log failed")<block_end><block_end><else_stmt><block_start>log_dir=os.path.join(log_dir LOG_DEFAULT)+os.path.sep<if_stmt><not>self._LogInit(log_dir level LOG_DEFAULT)<block_start><raise>RuntimeError("init log failed")<block_end><block_end>kwargs['standalone']=0<if>os.environ.get("PLATFORM_IP")<else>1<if_stmt><not>self.__deviceApi.Initialize(device_serial long_edge **kwargs)<block_start>self.__logger.error('DeviceAPI initial failed')<line_sep><raise>RuntimeError("DeviceAPI initial failed")<block_end>self.__showScreen=show_raw_screen<line_sep>self.__maxContact=self.__deviceApi.GetMaxContact()<line_sep>self.__height,self.__width,strError=self.__deviceApi.GetScreenResolution()<if_stmt>self.__height<eq>-1<and>self.__width<eq>-1<block_start>self.__logger.error(strError)<line_sep><raise>RuntimeError(strError)<block_end>height=long_edge<line_sep>width=self.__width<times>height/self.__height<line_sep>self.__width=width<line_sep>self.__height=height<line_sep>self.__logger.info("init successful")<line_sep><return><true><block_end><def_stmt>deInitialize self<block_start><return>self.__deviceApi.DeInitialize()<block_end><def_stmt>getScreen self **kwargs<block_start>""" :return: Mat类型的图像/None """<line_sep>err,image=self.__deviceApi.GetFrame()<if_stmt>err<ne>PP_RET_OK<block_start>self.__logger.error('failed to get frame')<line_sep><return><none><block_end><if_stmt>image<is><not><none><and>self.__showScreen<block_start>self.__logger.info("get image")<line_sep>cv2.imshow('pid:'+str(self.__pid)+' serial:'+str(self.__serial) image)<line_sep>cv2.waitKey(1)<block_end><return>image<block_end><def_stmt>doAction self **kwargs<block_start>aType=kwargs['aType']<if_stmt>aType<in>TOUCH_CMD_LIST<block_start><return>self.TouchCMD(**kwargs)<block_end><if_stmt>aType<in>DEVICE_CMD_LIST<block_start><return>self.DeviceCMD(**kwargs)<block_end><raise>Exception("unknown action type: %s, %s" aType kwargs)<block_end><def_stmt>TouchCMD self **kwargs<block_start>""" 执行操作 :kwargs: dict, aType参数表示动作类型[TOUCH_CLICK, TOUCH_DOWN, TOUCH_UP, TOUCH_SWIPE, TOUCH_MOVE] sx为x坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的x坐标, 当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的x坐标 sy为y坐标,当aType为[TOUCH_CLICK, TOUCH_DOWN]时表示按压点的y坐标, 当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示起始点的y坐标 ex为x坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的x坐标 ex为y坐标,当aType为[TOUCH_SWIPE, TOUCH_MOVE]时表示结束点的y坐标 DaType为执行该操作的方式,有minitouch方式和ADB命令方式,分别表示为[DACT_TOUCH, DACT_ADB],默认为DACT_TOUCH contact为触点,默认为0 durationMS为执行一次动作持续的时间,在aType为[TOUCH_CLICK, TOUCH_SWIPE]时使用, 当aType为TOUCH_CLICK时默认为-1,当aType为TOUCH_SWIPE时默认为50 needUp仅在aType为TOUCH_SWIPE时使用,表示滑动后是否需要抬起,默认为True :return: True or False """<for_stmt>key kwargs<block_start><if_stmt>key<not><in>TOUCH_KEY<block_start>self.__logger.error('wrong key of kwargs: %s' key)<line_sep><return><false><block_end><block_end>actionType=kwargs.get('aType')<if_stmt><not>actionType<block_start>self.__logger.error('aType is needed when exec TouchCommand')<line_sep><return><false><block_end>px=sx=kwargs.get('sx' <none>)<line_sep>py=sy=kwargs.get('sy' <none>)<line_sep>ex=kwargs.get('ex' <none>)<line_sep>ey=kwargs.get('ey' <none>)<line_sep>contact=kwargs.get('contact' 0)<line_sep>durationMS=kwargs.get('durationMS' 0)<line_sep>needUp=kwargs.get('needUp' <true>)<line_sep>wait_time=kwargs.get('wait_time' 0)<if_stmt>actionType<eq>TOUCH_CLICK<block_start>self.__logger.info("platform Click, x: %s, y: %s, contact: %s, durationMS: %s, waitTime: %s" px py contact durationMS wait_time)<line_sep>self.__deviceApi.Click(px py contact durationMS wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_DOWN<block_start>self.__logger.info("platform Down, x: %s, y: %s, contact: %s, waitTime: %s" px py contact wait_time)<line_sep>self.__deviceApi.Down(px py contact wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_UP<block_start>self.__logger.info("platform Up, contact: %s, waitTime: %s" contact wait_time)<line_sep>self.__deviceApi.Up(contact wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_SWIPE<block_start><if_stmt>durationMS<le>0<block_start>durationMS=50<block_end>self.__logger.info("platform Swipe, sx: %s, sy: %s, ex: %s, ey: %s, "<concat>"contact: %s, durationMS: %s, waitTime: %s" sx sy ex ey contact durationMS wait_time)<line_sep>self.__deviceApi.Swipe(sx sy ex ey contact durationMS needUp wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_MOVE<block_start>self.__logger.info("platform Move, x: %s, y: %s, contact: %s, waitTime: %s" px py contact wait_time)<line_sep>self.__deviceApi.Move(px py contact wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_SWIPEMOVE<block_start><if_stmt>durationMS<le>0<block_start>durationMS=50<block_end>self.__logger.info("platform SwipeMove, px: %s, py: %s, contact: %s, durationMS: %s waitTime: %s" px py contact durationMS wait_time)<line_sep>self.__deviceApi.SwipeMove(px py contact durationMS wait_time)<block_end><elif_stmt>actionType<eq>TOUCH_RESET<block_start>self.__logger.info("platform Reset, waitTime: %s" wait_time)<line_sep>self.__deviceApi.Reset(wait_time=wait_time)<block_end><else_stmt><block_start>self.__logger.error('Wrong aType when TouchCommand, aType:%s' actionType)<line_sep><return><false><block_end><return><true><block_end><def_stmt>DeviceCMD self **kwargs<block_start>""" 执行设备相关的操作 aType:操作类型[DEVICE_INSTALL, DEVICE_START, DEVICE_EXIT, DEVICE_CURAPP, DEVICE_CLEARAPP, DEVICE_KEY, DEVICE_TEXT, DEVICE_SLEEP, DEVICE_WAKE, DEVICE_WMSIZE, DEVICE_BINDRO, DEVICE_SCREENSHOT, DEVICE_SCREENORI, DEVICE_PARAM] APKPath:安装包路径 PKGName:包名 ActivityName:包的activity key:字母 text:键盘输入的字符串 """<line_sep>actionType=kwargs.get('aType')<if_stmt><not>actionType<block_start>self.__logger.error('aType is needed when exec DeviceCommand')<line_sep><return><false><block_end><if_stmt>actionType<eq>DEVICE_INSTALL<block_start>APKPath=kwargs.get('APKPath' <none>)<if_stmt><not>self.__deviceApi.InstallAPP(APKPath)<block_start>self.__logger.error('install app failed: %s' APKPath)<line_sep><return><false><block_end><block_end><elif_stmt>actionType<eq>DEVICE_START<block_start>PKGName=kwargs.get('PKGName' <none>)<line_sep>ActivityName=kwargs.get('ActivityName' <none>)<line_sep>self.__deviceApi.LaunchAPP(PKGName ActivityName)<block_end><elif_stmt>actionType<eq>DEVICE_EXIT<block_start>PKGName=kwargs.get('PKGName' <none>)<line_sep>self.__deviceApi.ExitAPP(PKGName)<block_end><elif_stmt>actionType<eq>DEVICE_CURAPP<block_start><return>self.__deviceApi.CurrentApp()<block_end><elif_stmt>actionType<eq>DEVICE_CLEARAPP<block_start>PKGName=kwargs.get('PKGName' <none>)<line_sep>self.__deviceApi.ClearAppData(PKGName)<block_end><elif_stmt>actionType<eq>DEVICE_KEY<block_start>key=kwargs.get('key' <none>)<line_sep>self.__deviceApi.Key(key)<block_end><elif_stmt>actionType<eq>DEVICE_TEXT<block_start>text=kwargs.get('text' <none>)<line_sep>self.__deviceApi.Text(text)<block_end><elif_stmt>actionType<eq>DEVICE_SLEEP<block_start>self.__deviceApi.Sleep()<block_end><elif_stmt>actionType<eq>DEVICE_WAKE<block_start>self.__deviceApi.Wake()<block_end><elif_stmt>actionType<eq>DEVICE_WMSIZE<block_start><return>self.__deviceApi.WMSize()<block_end><elif_stmt>actionType<eq>DEVICE_SCREENSHOT<block_start>targetPath=kwargs.get('targetPath' <none>)<line_sep>self.__deviceApi.TakeScreenshot(targetPath)<block_end><elif_stmt>actionType<eq>DEVICE_SCREENORI<block_start><return>self.__deviceApi.GetScreenOri()<block_end><elif_stmt>actionType<eq>DEVICE_MAXCONTACT<block_start><return>self.__maxContact<block_end><elif_stmt>actionType<eq>DEVICE_CLICK<block_start>px=kwargs.get('sx' <none>)<line_sep>py=kwargs.get('sy' <none>)<line_sep>self.__deviceApi.ADBClick(px py)<block_end><elif_stmt>actionType<eq>DEVICE_SWIPE<block_start>sx=kwargs.get('sx' <none>)<line_sep>sy=kwargs.get('sy' <none>)<line_sep>ex=kwargs.get('ex' <none>)<line_sep>ey=kwargs.get('ey' <none>)<line_sep>durationMS=kwargs.get('durationMS' 50)<line_sep>self.__deviceApi.ADBSwipe(sx sy ex ey durationMS=durationMS)<block_end><elif_stmt>actionType<eq>DEVICE_PARAM<block_start>packageName=kwargs.get('PKGName' <none>)<line_sep><return>self.__deviceApi.GetDeviceParame(packageName)<block_end><else_stmt><block_start>self.__logger.error('wrong aType when exec DeviceCommand, aType:%s' actionType)<line_sep><return><false><block_end><return><true><block_end># def _GetValuesInkwargs(self, key, isNessesary, defaultValue, kwargs): # try: # if not isNessesary: # if key not in kwargs: # return True, defaultValue # else: # return True, kwargs[key] # else: # return True, kwargs[key] # except KeyError as e: # self.__logger.error(e) # return False, 'key error' <def_stmt>_LogInit self log_dir level device_serial<block_start><if_stmt><not>isinstance(log_dir str)<block_start>logging.error('wrong log_dir when init LOG, log_dir:%s' log_dir)<line_sep><return><false><block_end><if_stmt>level<not><in>LOG_LIST<block_start>logging.warning('wrong level when init LOG, level:%s, use default level: DEBUG' level)<line_sep>level=LOG_DEBUG<block_end><if_stmt><not>os.path.exists(log_dir)<block_start>os.makedirs(log_dir)<block_end>self.__logger=logging.getLogger(device_serial)<if_stmt><not>self.__logger.handlers<block_start>console=logging.StreamHandler()<line_sep>formatter=logging.Formatter(LOG_FORMAT)<line_sep>console.setFormatter(formatter)<line_sep>fileHandler=RotatingFileHandler(filename=os.path.join(log_dir 'DeviceAPI.log') maxBytes=2048000 backupCount=10)<line_sep>fileHandler.setFormatter(formatter)<line_sep>self.__logger.addHandler(fileHandler)<line_sep>self.__logger.addHandler(console)<line_sep>self.__logger.setLevel(level)<block_end>loggerWeTest=logging.getLogger('PlatformWeTest')<if_stmt><not>loggerWeTest.handlers<block_start>fileHandler=RotatingFileHandler(filename=os.path.join(log_dir 'PlatformWeTest.log') maxBytes=2048000 backupCount=10)<line_sep>fileHandler.setFormatter(formatter)<line_sep>loggerWeTest.addHandler(fileHandler)<line_sep>loggerWeTest.setLevel(level)<block_end><return><true><block_end># def _CheckException(self): # if exceptionQueue.empty() is False: # errorStr = exceptionQueue.get() # while exceptionQueue.empty() is False: # errorStr = exceptionQueue.get() # raise Exception(errorStr) <block_end>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sharded_mutable_dense_hashtable.py."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.platform googletest<import_from_stmt>tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sharded_mutable_dense_hashtable _ShardedMutableDenseHashTable<class_stmt>_ShardedMutableDenseHashTableTest(tf.test.TestCase)<block_start>"""Tests for the ShardedMutableHashTable class."""<def_stmt>testShardedMutableHashTable self<block_start><for_stmt>num_shards [1 3 10]<block_start><with_stmt>self.cached_session()<block_start>default_val=-1<line_sep>empty_key=0<line_sep>deleted_key=-1<line_sep>keys=tf.constant([11 12 13] tf.dtypes.int64)<line_sep>values=tf.constant([0 1 2] tf.dtypes.int64)<line_sep>table=_ShardedMutableDenseHashTable(tf.dtypes.int64 tf.dtypes.int64 default_val empty_key deleted_key num_shards=num_shards)<line_sep>self.assertAllEqual(0 self.evaluate(table.size()))<line_sep>self.evaluate(table.insert(keys values))<line_sep>self.assertAllEqual(3 self.evaluate(table.size()))<line_sep>input_string=tf.constant([11 12 14] tf.dtypes.int64)<line_sep>output=table.lookup(input_string)<line_sep>self.assertAllEqual([3] output.get_shape())<line_sep>self.assertAllEqual([0 1 -1] self.evaluate(output))<block_end><block_end><block_end><def_stmt>testShardedMutableHashTableVectors self<block_start><for_stmt>num_shards [1 3 10]<block_start><with_stmt>self.cached_session()<block_start>default_val=[-0.1 0.2]<line_sep>empty_key=[0 1]<line_sep>deleted_key=[1 0]<line_sep>keys=tf.constant([[11 12] [13 14] [15 16]] tf.dtypes.int64)<line_sep>values=tf.constant([[0.5 0.6] [1.5 1.6] [2.5 2.6]] tf.dtypes.float32)<line_sep>table=_ShardedMutableDenseHashTable(tf.dtypes.int64 tf.dtypes.float32 default_val empty_key deleted_key num_shards=num_shards)<line_sep>self.assertAllEqual(0 self.evaluate(table.size()))<line_sep>self.evaluate(table.insert(keys values))<line_sep>self.assertAllEqual(3 self.evaluate(table.size()))<line_sep>input_string=tf.constant([[11 12] [13 14] [11 14]] tf.dtypes.int64)<line_sep>output=table.lookup(input_string)<line_sep>self.assertAllEqual([3 2] output.get_shape())<line_sep>self.assertAllClose([[0.5 0.6] [1.5 1.6] [-0.1 0.2]] self.evaluate(output))<block_end><block_end><block_end><def_stmt>testExportSharded self<block_start><with_stmt>self.cached_session()<block_start>empty_key=-2<line_sep>deleted_key=-3<line_sep>default_val=-1<line_sep>num_shards=2<line_sep>keys=tf.constant([10 11 12] tf.dtypes.int64)<line_sep>values=tf.constant([2 3 4] tf.dtypes.int64)<line_sep>table=_ShardedMutableDenseHashTable(tf.dtypes.int64 tf.dtypes.int64 default_val empty_key deleted_key num_shards=num_shards)<line_sep>self.assertAllEqual(0 self.evaluate(table.size()))<line_sep>self.evaluate(table.insert(keys values))<line_sep>self.assertAllEqual(3 self.evaluate(table.size()))<line_sep>keys_list,values_list=table.export_sharded()<line_sep>self.assertAllEqual(num_shards len(keys_list))<line_sep>self.assertAllEqual(num_shards len(values_list))<line_sep># Exported keys include empty key buckets set to the empty_key self.assertAllEqual(set([-2 10 12]) set(self.evaluate(keys_list[0]).flatten()))<line_sep>self.assertAllEqual(set([-2 11]) set(self.evaluate(keys_list[1]).flatten()))<line_sep># Exported values include empty value buckets set to 0 self.assertAllEqual(set([0 2 4]) set(self.evaluate(values_list[0]).flatten()))<line_sep>self.assertAllEqual(set([0 3]) set(self.evaluate(values_list[1]).flatten()))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>googletest.main()<block_end>
# Generated by Django 1.11.11 on 2018-03-20 18:38 <import_stmt>django.db.models.deletion<import_stmt>django_countries.fields<import_stmt>stdimage.models<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>grandchallenge.core.storage<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL)]<line_sep>operations=[migrations.CreateModel(name="UserProfile" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("mugshot" stdimage.models.JPEGField(blank=<true> help_text="A personal image displayed in your profile." upload_to=grandchallenge.core.storage.get_mugshot_path verbose_name="mugshot" ) ) ("privacy" models.CharField(choices=[("open" "Open") ("registered" "Registered") ("closed" "Closed") ] default="open" help_text="Designates who can view your profile." max_length=15 verbose_name="privacy" ) ) ("institution" models.CharField(max_length=100)) ("department" models.CharField(max_length=100)) ("country" django_countries.fields.CountryField(max_length=2) ) ("website" models.CharField(blank=<true> max_length=150)) ("user" models.OneToOneField(on_delete=django.db.models.deletion.CASCADE related_name="user_profile" to=settings.AUTH_USER_MODEL verbose_name="user" ) ) ] options={"permissions":(("view_profile" "Can view profile") ) "abstract":<false> } )]<block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>typing Any TYPE_CHECKING<import_from_stmt>azure.core AsyncPipelineClient<import_from_stmt>azure.core.pipeline.transport AsyncHttpResponse HttpRequest<import_from_stmt>msrest Deserializer Serializer<if_stmt>TYPE_CHECKING# pylint: disable=unused-import,ungrouped-imports <block_start><import_from_stmt>azure.core.credentials_async AsyncTokenCredential<block_end><import_from_stmt>._configuration FarmBeatsClientConfiguration<import_from_stmt>.operations ApplicationDataOperations<import_from_stmt>.operations AttachmentsOperations<import_from_stmt>.operations BoundariesOperations<import_from_stmt>.operations CropsOperations<import_from_stmt>.operations CropVarietiesOperations<import_from_stmt>.operations FarmersOperations<import_from_stmt>.operations FarmOperationsOperations<import_from_stmt>.operations FarmsOperations<import_from_stmt>.operations FieldsOperations<import_from_stmt>.operations HarvestDataOperations<import_from_stmt>.operations ImageProcessingOperations<import_from_stmt>.operations OAuthProvidersOperations<import_from_stmt>.operations OAuthTokensOperations<import_from_stmt>.operations PlantingDataOperations<import_from_stmt>.operations ScenesOperations<import_from_stmt>.operations SeasonalFieldsOperations<import_from_stmt>.operations SeasonsOperations<import_from_stmt>.operations TillageDataOperations<import_from_stmt>.operations WeatherOperations<import_from_stmt>.. models<class_stmt>FarmBeatsClient(object)<block_start>"""APIs documentation for Azure AgPlatform DataPlane Service. :ivar application_data: ApplicationDataOperations operations :vartype application_data: azure.agrifood.farming.aio.operations.ApplicationDataOperations :ivar attachments: AttachmentsOperations operations :vartype attachments: azure.agrifood.farming.aio.operations.AttachmentsOperations :ivar boundaries: BoundariesOperations operations :vartype boundaries: azure.agrifood.farming.aio.operations.BoundariesOperations :ivar crops: CropsOperations operations :vartype crops: azure.agrifood.farming.aio.operations.CropsOperations :ivar crop_varieties: CropVarietiesOperations operations :vartype crop_varieties: azure.agrifood.farming.aio.operations.CropVarietiesOperations :ivar farmers: FarmersOperations operations :vartype farmers: azure.agrifood.farming.aio.operations.FarmersOperations :ivar farm_operations: FarmOperationsOperations operations :vartype farm_operations: azure.agrifood.farming.aio.operations.FarmOperationsOperations :ivar farms: FarmsOperations operations :vartype farms: azure.agrifood.farming.aio.operations.FarmsOperations :ivar fields: FieldsOperations operations :vartype fields: azure.agrifood.farming.aio.operations.FieldsOperations :ivar harvest_data: HarvestDataOperations operations :vartype harvest_data: azure.agrifood.farming.aio.operations.HarvestDataOperations :ivar image_processing: ImageProcessingOperations operations :vartype image_processing: azure.agrifood.farming.aio.operations.ImageProcessingOperations :ivar oauth_providers: OAuthProvidersOperations operations :vartype oauth_providers: azure.agrifood.farming.aio.operations.OAuthProvidersOperations :ivar oauth_tokens: OAuthTokensOperations operations :vartype oauth_tokens: azure.agrifood.farming.aio.operations.OAuthTokensOperations :ivar planting_data: PlantingDataOperations operations :vartype planting_data: azure.agrifood.farming.aio.operations.PlantingDataOperations :ivar scenes: ScenesOperations operations :vartype scenes: azure.agrifood.farming.aio.operations.ScenesOperations :ivar seasonal_fields: SeasonalFieldsOperations operations :vartype seasonal_fields: azure.agrifood.farming.aio.operations.SeasonalFieldsOperations :ivar seasons: SeasonsOperations operations :vartype seasons: azure.agrifood.farming.aio.operations.SeasonsOperations :ivar tillage_data: TillageDataOperations operations :vartype tillage_data: azure.agrifood.farming.aio.operations.TillageDataOperations :ivar weather: WeatherOperations operations :vartype weather: azure.agrifood.farming.aio.operations.WeatherOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param endpoint: The endpoint of your FarmBeats resource (protocol and hostname, for example: https://{resourceName}.farmbeats.azure.net). :type endpoint: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """<def_stmt>__init__ self credential:"AsyncTokenCredential" endpoint:str **kwargs:Any<arrow><none><block_start>base_url='{Endpoint}'<line_sep>self._config=FarmBeatsClientConfiguration(credential endpoint **kwargs)<line_sep>self._client=AsyncPipelineClient(base_url=base_url config=self._config **kwargs)<line_sep>client_models={k:v<for>k,v models.__dict__.items()<if>isinstance(v type)}<line_sep>self._serialize=Serializer(client_models)<line_sep>self._serialize.client_side_validation=<false><line_sep>self._deserialize=Deserializer(client_models)<line_sep>self.application_data=ApplicationDataOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.attachments=AttachmentsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.boundaries=BoundariesOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.crops=CropsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.crop_varieties=CropVarietiesOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.farmers=FarmersOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.farm_operations=FarmOperationsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.farms=FarmsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.fields=FieldsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.harvest_data=HarvestDataOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.image_processing=ImageProcessingOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.oauth_providers=OAuthProvidersOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.oauth_tokens=OAuthTokensOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.planting_data=PlantingDataOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.scenes=ScenesOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.seasonal_fields=SeasonalFieldsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.seasons=SeasonsOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.tillage_data=TillageDataOperations(self._client self._config self._serialize self._deserialize)<line_sep>self.weather=WeatherOperations(self._client self._config self._serialize self._deserialize)<block_end><async_keyword><def_stmt>_send_request self http_request:HttpRequest **kwargs:Any<arrow>AsyncHttpResponse<block_start>"""Runs the network request through the client's chained policies. :param http_request: The network request you want to make. Required. :type http_request: ~azure.core.pipeline.transport.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to True. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse """<line_sep>path_format_arguments={'Endpoint':self._serialize.url("self._config.endpoint" self._config.endpoint 'str' skip_quote=<true>) }<line_sep>http_request.url=self._client.format_url(http_request.url **path_format_arguments)<line_sep>stream=kwargs.pop("stream" <true>)<line_sep>pipeline_response=<await>self._client._pipeline.run(http_request stream=stream **kwargs)<line_sep><return>pipeline_response.http_response<block_end><async_keyword><def_stmt>close self<arrow><none><block_start><await>self._client.close()<block_end><async_keyword><def_stmt>__aenter__ self<arrow>"FarmBeatsClient"<block_start><await>self._client.__aenter__()<line_sep><return>self<block_end><async_keyword><def_stmt>__aexit__ self *exc_details<arrow><none><block_start><await>self._client.__aexit__(*exc_details)<block_end><block_end>
<import_stmt>synapse.glob<as>s_glob<import_stmt>synapse.tests.utils<as>s_t_utils<class_stmt>GlobTest(s_t_utils.SynTest)<block_start><def_stmt>test_glob_sync self<block_start><async_keyword><def_stmt>afoo <block_start><return>42<block_end>retn=s_glob.sync(afoo())<line_sep>self.eq(retn 42)<block_end><block_end>
# Copyright 2021 Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>argparse<import_stmt>importlib<import_stmt>os.path<import_stmt>sys<import_stmt>importer<import_from_stmt>code_formatter code_formatter<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('modpath' help='module the simobject belongs to')<line_sep>parser.add_argument('param_cc' help='parameter cc file to generate')<line_sep>parser.add_argument('use_python' help='whether python is enabled in gem5 (True or False)')<line_sep>args=parser.parse_args()<line_sep>use_python=args.use_python.lower()<if_stmt>use_python<eq>'true'<block_start>use_python=<true><block_end><elif_stmt>use_python<eq>'false'<block_start>use_python=<false><block_end><else_stmt><block_start>print(f'Unrecognized "use_python" value {use_python}' file=sys.stderr)<line_sep>sys.exit(1)<block_end>basename=os.path.basename(args.param_cc)<line_sep>no_ext=os.path.splitext(basename)[0]<line_sep>sim_object_name='_'.join(no_ext.split('_')[1:])<line_sep>importer.install()<line_sep>module=importlib.import_module(args.modpath)<line_sep>sim_object=getattr(module sim_object_name)<line_sep>code=code_formatter()<line_sep>sim_object.params_create_decl(code use_python)<line_sep>code.write(args.param_cc)<line_sep>
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("auditlog" "0005_logentry_additional_data_verbose_name") ]<line_sep>operations=[migrations.AlterField(model_name="logentry" name="object_pk" field=models.CharField(verbose_name="object pk" max_length=255 db_index=<true>) ) ]<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root # for license information. <import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>ctypes<import_from_stmt>ctypes.wintypes BOOL DWORD HANDLE LARGE_INTEGER LPCSTR UINT<import_from_stmt>debugpy.common log<line_sep>JOBOBJECTCLASS=ctypes.c_int<line_sep>LPDWORD=ctypes.POINTER(DWORD)<line_sep>LPVOID=ctypes.c_void_p<line_sep>SIZE_T=ctypes.c_size_t<line_sep>ULONGLONG=ctypes.c_ulonglong<class_stmt>IO_COUNTERS(ctypes.Structure)<block_start>_fields_=[("ReadOperationCount" ULONGLONG) ("WriteOperationCount" ULONGLONG) ("OtherOperationCount" ULONGLONG) ("ReadTransferCount" ULONGLONG) ("WriteTransferCount" ULONGLONG) ("OtherTransferCount" ULONGLONG) ]<block_end><class_stmt>JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure)<block_start>_fields_=[("PerProcessUserTimeLimit" LARGE_INTEGER) ("PerJobUserTimeLimit" LARGE_INTEGER) ("LimitFlags" DWORD) ("MinimumWorkingSetSize" SIZE_T) ("MaximumWorkingSetSize" SIZE_T) ("ActiveProcessLimit" DWORD) ("Affinity" SIZE_T) ("PriorityClass" DWORD) ("SchedulingClass" DWORD) ]<block_end><class_stmt>JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure)<block_start>_fields_=[("BasicLimitInformation" JOBOBJECT_BASIC_LIMIT_INFORMATION) ("IoInfo" IO_COUNTERS) ("ProcessMemoryLimit" SIZE_T) ("JobMemoryLimit" SIZE_T) ("PeakProcessMemoryUsed" SIZE_T) ("PeakJobMemoryUsed" SIZE_T) ]<block_end>JobObjectExtendedLimitInformation=JOBOBJECTCLASS(9)<line_sep>JOB_OBJECT_LIMIT_BREAKAWAY_OK=0x00000800<line_sep>JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE=0x00002000<line_sep>PROCESS_TERMINATE=0x0001<line_sep>PROCESS_SET_QUOTA=0x0100<def_stmt>_errcheck is_error_result=(<lambda>result:<not>result)<block_start><def_stmt>impl result func args<block_start><if_stmt>is_error_result(result)<block_start>log.debug("{0} returned {1}" func.__name__ result)<line_sep><raise>ctypes.WinError()<block_end><else_stmt><block_start><return>result<block_end><block_end><return>impl<block_end>kernel32=ctypes.windll.kernel32<line_sep>kernel32.AssignProcessToJobObject.errcheck=_errcheck()<line_sep>kernel32.AssignProcessToJobObject.restype=BOOL<line_sep>kernel32.AssignProcessToJobObject.argtypes=(HANDLE HANDLE)<line_sep>kernel32.CreateJobObjectA.errcheck=_errcheck(<lambda>result:result<eq>0)<line_sep>kernel32.CreateJobObjectA.restype=HANDLE<line_sep>kernel32.CreateJobObjectA.argtypes=(LPVOID LPCSTR)<line_sep>kernel32.OpenProcess.errcheck=_errcheck(<lambda>result:result<eq>0)<line_sep>kernel32.OpenProcess.restype=HANDLE<line_sep>kernel32.OpenProcess.argtypes=(DWORD BOOL DWORD)<line_sep>kernel32.QueryInformationJobObject.errcheck=_errcheck()<line_sep>kernel32.QueryInformationJobObject.restype=BOOL<line_sep>kernel32.QueryInformationJobObject.argtypes=(HANDLE JOBOBJECTCLASS LPVOID DWORD LPDWORD )<line_sep>kernel32.SetInformationJobObject.errcheck=_errcheck()<line_sep>kernel32.SetInformationJobObject.restype=BOOL<line_sep>kernel32.SetInformationJobObject.argtypes=(HANDLE JOBOBJECTCLASS LPVOID DWORD)<line_sep>kernel32.TerminateJobObject.errcheck=_errcheck()<line_sep>kernel32.TerminateJobObject.restype=BOOL<line_sep>kernel32.TerminateJobObject.argtypes=(HANDLE UINT)<line_sep>
<import_stmt>pytest<import_stmt>salt.exceptions<import_stmt>salt.utils.stringutils<import_stmt>salt.utils.thin<import_from_stmt>tests.support.mock MagicMock patch<def_stmt>_mock_popen return_value=<none> side_effect=<none> returncode=0<block_start>proc=MagicMock()<line_sep>proc.communicate=MagicMock(return_value=return_value side_effect=side_effect)<line_sep>proc.returncode=returncode<line_sep>popen=MagicMock(return_value=proc)<line_sep><return>popen<block_end>@pytest.mark.parametrize("version" [[2 7] [3 0] [3 7]])<def_stmt>test_get_tops_python version<block_start>""" Tests 'distro' is only included when targeting python 3 in get_tops_python """<line_sep>python3=<false><if_stmt>tuple(version)<ge>(3 0)<block_start>python3=<true><block_end>mods=["jinja2"]<if_stmt>python3<block_start>mods.append("distro")<block_end>popen_ret=tuple(salt.utils.stringutils.to_bytes(x)<for>x ("" ""))<line_sep>mock_popen=_mock_popen(return_value=popen_ret)<line_sep>patch_proc=patch("salt.utils.thin.subprocess.Popen" mock_popen)<line_sep>patch_which=patch("salt.utils.path.which" return_value=<true>)<with_stmt>patch_proc patch_which<block_start>salt.utils.thin.get_tops_python("python2" ext_py_ver=version)<line_sep>cmds=[x[0][0]<for>x mock_popen.call_args_list]<assert_stmt>[x<for>x cmds<if>"jinja2"<in>x[2]]<if_stmt>python3<block_start><assert_stmt>[x<for>x cmds<if>"distro"<in>x[2]]<block_end><else_stmt><block_start><assert_stmt><not>[x<for>x cmds<if>"distro"<in>x[2]]<block_end><block_end><block_end>@pytest.mark.parametrize("version" [[2 7] [3 0] [3 7]])<def_stmt>test_get_ext_tops version<block_start>""" Tests 'distro' is only included when targeting python 3 in get_ext_tops """<line_sep>python3=<false><if_stmt>tuple(version)<ge>(3 0)<block_start>python3=<true><block_end>cfg={"namespace":{"path":"/foo" "py-version":version "dependencies":{"jinja2":"/jinja/foo.py" "yaml":"/yaml/" "tornado":"/tornado/tornado.py" "msgpack":"msgpack.py" } }}<with_stmt>patch("salt.utils.thin.os.path.isfile" MagicMock(return_value=<true>))<block_start><if_stmt>python3<block_start><with_stmt>pytest.raises(salt.exceptions.SaltSystemExit)<as>err<block_start>salt.utils.thin.get_ext_tops(cfg)<block_end><block_end><else_stmt><block_start>ret=salt.utils.thin.get_ext_tops(cfg)<block_end><block_end><if_stmt>python3<block_start><assert_stmt>"distro"<in>err.value.code<block_end><else_stmt><block_start><assert_stmt><not>[x<for>x ret["namespace"]["dependencies"]<if>"distro"<in>x]<assert_stmt>[x<for>x ret["namespace"]["dependencies"]<if>"msgpack"<in>x]<block_end><block_end>
<def_stmt>extractMyFirstTimeTranslating item<block_start>""" 'My First Time Translating' """<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol<or>frag)<or>'preview'<in>item['title'].lower()<block_start><return><none><block_end><return><false><block_end>
# # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_future_stmt> annotations<import_from_stmt>typing *<import_stmt>re<import_stmt>hashlib<import_from_stmt>edb._edgeql_rust tokenize<as>_tokenize TokenizerError Token<import_from_stmt>edb._edgeql_rust normalize<as>_normalize Entry<import_from_stmt>edb errors<line_sep>TRAILING_WS_IN_CONTINUATION=re.compile(r'\\ \s+\n')<class_stmt>Source<block_start><def_stmt>__init__ self text:str tokens:List[Token]<arrow><none><block_start>self._cache_key=hashlib.blake2b(text.encode('utf-8')).digest()<line_sep>self._text=text<line_sep>self._tokens=tokens<block_end><def_stmt>text self<arrow>str<block_start><return>self._text<block_end><def_stmt>cache_key self<arrow>bytes<block_start><return>self._cache_key<block_end><def_stmt>variables self<arrow>Dict[str Any]<block_start><return>{}<block_end><def_stmt>tokens self<arrow>List[Token]<block_start><return>self._tokens<block_end><def_stmt>first_extra self<arrow>Optional[int]<block_start><return><none><block_end><def_stmt>extra_count self<arrow>int<block_start><return>0<block_end><def_stmt>extra_blob self<arrow>bytes<block_start><return>b''<block_end>@classmethod<def_stmt>from_string cls text:str<arrow>Source<block_start><return>cls(text=text tokens=tokenize(text))<block_end><def_stmt>__repr__ self<block_start><return>f'<edgeql.Source text={self._text!r}>'<block_end><block_end><class_stmt>NormalizedSource(Source)<block_start><def_stmt>__init__ self normalized:Entry text:str<arrow><none><block_start>self._text=text<line_sep>self._cache_key=normalized.key()<line_sep>self._tokens=normalized.tokens()<line_sep>self._variables=normalized.variables()<line_sep>self._first_extra=normalized.first_extra()<line_sep>self._extra_count=normalized.extra_count()<line_sep>self._extra_blob=normalized.extra_blob()<block_end><def_stmt>text self<arrow>str<block_start><return>self._text<block_end><def_stmt>cache_key self<arrow>bytes<block_start><return>self._cache_key<block_end><def_stmt>variables self<arrow>Dict[str Any]<block_start><return>self._variables<block_end><def_stmt>tokens self<arrow>List[Token]<block_start><return>self._tokens<block_end><def_stmt>first_extra self<arrow>Optional[int]<block_start><return>self._first_extra<block_end><def_stmt>extra_count self<arrow>int<block_start><return>self._extra_count<block_end><def_stmt>extra_blob self<arrow>bytes<block_start><return>self._extra_blob<block_end>@classmethod<def_stmt>from_string cls text:str<arrow>NormalizedSource<block_start><return>cls(normalize(text) text)<block_end><block_end><def_stmt>tokenize eql:str<arrow>List[Token]<block_start><try_stmt><block_start><return>_tokenize(eql)<block_end><except_stmt>TokenizerError<as>e<block_start>message,position=e.args<line_sep>hint=_derive_hint(eql message position)<line_sep><raise>errors.EdgeQLSyntaxError(message position=position hint=hint)<from>e<block_end><block_end><def_stmt>normalize eql:str<arrow>Entry<block_start><try_stmt><block_start><return>_normalize(eql)<block_end><except_stmt>TokenizerError<as>e<block_start>message,position=e.args<line_sep>hint=_derive_hint(eql message position)<line_sep><raise>errors.EdgeQLSyntaxError(message position=position hint=hint)<from>e<block_end><block_end><def_stmt>_derive_hint input:str message:str position:Tuple[int int int] <arrow>Optional[str]<block_start>_,_,off=position<if_stmt>message<eq>r"invalid string literal: invalid escape sequence '\ '"<block_start><if_stmt>TRAILING_WS_IN_CONTINUATION.search(input[off:])<block_start><return>"consider removing trailing whitespace"<block_end><block_end><return><none><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_stmt>torch.nn<as>nn<import_from_stmt>mmcv.cnn trunc_normal_init<import_from_stmt>..builder HEADS<import_from_stmt>.base BaseHead<line_sep>@HEADS.register_module()<class_stmt>TimeSformerHead(BaseHead)<block_start>"""Classification head for TimeSformer. Args: num_classes (int): Number of classes to be classified. in_channels (int): Number of channels in input feature. loss_cls (dict): Config for building loss. Defaults to `dict(type='CrossEntropyLoss')`. init_std (float): Std value for Initiation. Defaults to 0.02. kwargs (dict, optional): Any keyword argument to be used to initialize the head. """<def_stmt>__init__ self num_classes in_channels loss_cls=dict(type='CrossEntropyLoss') init_std=0.02 **kwargs<block_start>super().__init__(num_classes in_channels loss_cls **kwargs)<line_sep>self.init_std=init_std<line_sep>self.fc_cls=nn.Linear(self.in_channels self.num_classes)<block_end><def_stmt>init_weights self<block_start>"""Initiate the parameters from scratch."""<line_sep>trunc_normal_init(self.fc_cls std=self.init_std)<block_end><def_stmt>forward self x# [N, in_channels] <block_start>cls_score=self.fc_cls(x)<line_sep># [N, num_classes] <return>cls_score<block_end><block_end>
# pylint: disable=too-many-lines """Test for certbot_apache._internal.configurator AutoHSTS functionality"""<import_stmt>re<import_stmt>unittest<try_stmt><block_start><import_stmt>mock<block_end><except_stmt>ImportError# pragma: no cover <block_start><import_from_stmt>unittest mock<block_end># type: ignore <import_from_stmt>certbot errors<import_from_stmt>certbot_apache._internal constants<import_stmt>util<class_stmt>AutoHSTSTest(util.ApacheTest)<block_start>"""Tests for AutoHSTS feature"""<line_sep># pylint: disable=protected-access <def_stmt>setUp self# pylint: disable=arguments-differ <block_start>super().setUp()<line_sep>self.config=util.get_apache_configurator(self.config_path self.vhost_path self.config_dir self.work_dir)<line_sep>self.config.parser.modules["headers_module"]=<none><line_sep>self.config.parser.modules["mod_headers.c"]=<none><line_sep>self.config.parser.modules["ssl_module"]=<none><line_sep>self.config.parser.modules["mod_ssl.c"]=<none><line_sep>self.vh_truth=util.get_vh_truth(self.temp_dir "debian_apache_2_4/multiple_vhosts")<block_end><def_stmt>get_autohsts_value self vh_path<block_start>""" Get value from Strict-Transport-Security header """<line_sep>header_path=self.config.parser.find_dir("Header" <none> vh_path)<if_stmt>header_path<block_start>pat='(?:[ "]|^)(strict-transport-security)(?:[ "]|$)'<for_stmt>head header_path<block_start><if_stmt>re.search(pat self.config.parser.aug.get(head).lower())<block_start><return>self.config.parser.aug.get(head.replace("arg[3]" "arg[4]"))<block_end><block_end><block_end><return><none><block_end># pragma: no cover @mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.enable_mod")<def_stmt>test_autohsts_enable_headers_mod self mock_enable _restart<block_start>self.config.parser.modules.pop("headers_module" <none>)<line_sep>self.config.parser.modules.pop("mod_header.c" <none>)<line_sep>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<line_sep>self.assertTrue(mock_enable.called)<block_end>@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")<def_stmt>test_autohsts_deploy_already_exists self _restart<block_start>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<line_sep>self.assertRaises(errors.PluginEnhancementAlreadyPresent self.config.enable_autohsts mock.MagicMock() ["ocspvhost.com"])<block_end>@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ" 0)@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.prepare")<def_stmt>test_autohsts_increase self mock_prepare _mock_restart<block_start>self.config._prepared=<false><line_sep>maxage="\"max-age={0}\""<line_sep>initial_val=maxage.format(constants.AUTOHSTS_STEPS[0])<line_sep>inc_val=maxage.format(constants.AUTOHSTS_STEPS[1])<line_sep>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<line_sep># Verify initial value self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) initial_val)<line_sep># Increase self.config.update_autohsts(mock.MagicMock())<line_sep># Verify increased value self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) inc_val)<line_sep>self.assertTrue(mock_prepare.called)<block_end>@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._autohsts_increase")<def_stmt>test_autohsts_increase_noop self mock_increase _restart<block_start>maxage="\"max-age={0}\""<line_sep>initial_val=maxage.format(constants.AUTOHSTS_STEPS[0])<line_sep>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<line_sep># Verify initial value self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) initial_val)<line_sep>self.config.update_autohsts(mock.MagicMock())<line_sep># Freq not patched, so value shouldn't increase self.assertFalse(mock_increase.called)<block_end>@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ" 0)<def_stmt>test_autohsts_increase_no_header self _restart<block_start>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<line_sep># Remove the header dir_locs=self.config.parser.find_dir("Header" <none> self.vh_truth[7].path)<line_sep>dir_loc="/".join(dir_locs[0].split("/")[:-1])<line_sep>self.config.parser.aug.remove(dir_loc)<line_sep>self.assertRaises(errors.PluginError self.config.update_autohsts mock.MagicMock())<block_end>@mock.patch("certbot_apache._internal.constants.AUTOHSTS_FREQ" 0)@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")<def_stmt>test_autohsts_increase_and_make_permanent self _mock_restart<block_start>maxage="\"max-age={0}\""<line_sep>max_val=maxage.format(constants.AUTOHSTS_PERMANENT)<line_sep>mock_lineage=mock.MagicMock()<line_sep>mock_lineage.key_path="/etc/apache2/ssl/key-certbot_15.pem"<line_sep>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com"])<for_stmt>i range(len(constants.AUTOHSTS_STEPS)-1)# Ensure that value is not made permanent prematurely <block_start>self.config.deploy_autohsts(mock_lineage)<line_sep>self.assertNotEqual(self.get_autohsts_value(self.vh_truth[7].path) max_val)<line_sep>self.config.update_autohsts(mock.MagicMock())<line_sep># Value should match pre-permanent increment step cur_val=maxage.format(constants.AUTOHSTS_STEPS[i+1])<line_sep>self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) cur_val)<block_end># Ensure that the value is raised to max self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) maxage.format(constants.AUTOHSTS_STEPS[-1]))<line_sep># Make permanent self.config.deploy_autohsts(mock_lineage)<line_sep>self.assertEqual(self.get_autohsts_value(self.vh_truth[7].path) max_val)<block_end><def_stmt>test_autohsts_update_noop self<block_start><with_stmt>mock.patch("time.time")<as>mock_time# Time mock is used to make sure that the execution does not # continue when no autohsts entries exist in pluginstorage <block_start>self.config.update_autohsts(mock.MagicMock())<line_sep>self.assertFalse(mock_time.called)<block_end><block_end><def_stmt>test_autohsts_make_permanent_noop self<block_start>self.config.storage.put=mock.MagicMock()<line_sep>self.config.deploy_autohsts(mock.MagicMock())<line_sep># Make sure that the execution does not continue when no entries in store self.assertFalse(self.config.storage.put.called)<block_end>@mock.patch("certbot_apache._internal.display_ops.select_vhost")<def_stmt>test_autohsts_no_ssl_vhost self mock_select<block_start>mock_select.return_value=self.vh_truth[0]<with_stmt>mock.patch("certbot_apache._internal.configurator.logger.error")<as>mock_log<block_start>self.assertRaises(errors.PluginError self.config.enable_autohsts mock.MagicMock() "invalid.example.com")<line_sep>self.assertTrue("Certbot was not able to find SSL"<in>mock_log.call_args[0][0])<block_end><block_end>@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.add_vhost_id")<def_stmt>test_autohsts_dont_enhance_twice self mock_id _restart<block_start>mock_id.return_value="1234567"<line_sep>self.config.enable_autohsts(mock.MagicMock() ["ocspvhost.com" "ocspvhost.com"])<line_sep>self.assertEqual(mock_id.call_count 1)<block_end><def_stmt>test_autohsts_remove_orphaned self# pylint: disable=protected-access <block_start>self.config._autohsts_fetch_state()<line_sep>self.config._autohsts["orphan_id"]={"laststep":0 "timestamp":0}<line_sep>self.config._autohsts_save_state()<line_sep>self.config.update_autohsts(mock.MagicMock())<line_sep>self.assertFalse("orphan_id"<in>self.config._autohsts)<line_sep># Make sure it's removed from the pluginstorage file as well self.config._autohsts=<none><line_sep>self.config._autohsts_fetch_state()<line_sep>self.assertFalse(self.config._autohsts)<block_end><def_stmt>test_autohsts_make_permanent_vhost_not_found self# pylint: disable=protected-access <block_start>self.config._autohsts_fetch_state()<line_sep>self.config._autohsts["orphan_id"]={"laststep":999 "timestamp":0}<line_sep>self.config._autohsts_save_state()<with_stmt>mock.patch("certbot_apache._internal.configurator.logger.error")<as>mock_log<block_start>self.config.deploy_autohsts(mock.MagicMock())<line_sep>self.assertTrue(mock_log.called)<line_sep>self.assertTrue("VirtualHost with id orphan_id was not"<in>mock_log.call_args[0][0])<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end># pragma: no cover
<import_from_stmt>functools wraps<import_from_stmt>flask redirect url_for<import_from_stmt>urlobject URLObject<import_from_stmt>requests_oauthlib OAuth1Session<as>BaseOAuth1Session<import_from_stmt>requests_oauthlib OAuth2Session<as>BaseOAuth2Session<import_from_stmt>oauthlib.common to_unicode<import_from_stmt>werkzeug.utils cached_property<import_from_stmt>flask_dance.utils invalidate_cached_property<class_stmt>OAuth1Session(BaseOAuth1Session)<block_start>""" A :class:`requests.Session` subclass that can do some special things: * lazy-loads OAuth1 tokens from the storage via the blueprint * handles OAuth1 authentication (from :class:`requests_oauthlib.OAuth1Session` superclass) * has a ``base_url`` property used for relative URL resolution Note that this is a session between the consumer (your website) and the provider (e.g. Twitter), and *not* a session between a user of your website and your website. """<def_stmt>__init__ self blueprint=<none> base_url=<none> *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.blueprint=blueprint<line_sep>self.base_url=URLObject(base_url)<block_end>@cached_property<def_stmt>token self<block_start>""" Get and set the values in the OAuth token, structured as a dictionary. """<line_sep><return>self.blueprint.token<block_end><def_stmt>load_token self<block_start>t=self.token<if_stmt>t<and>"oauth_token"<in>t<and>"oauth_token_secret"<in>t# This really, really violates the Law of Demeter, but # I don't see a better way to set these parameters. :( <block_start>self.auth.client.resource_owner_key=to_unicode(t["oauth_token"])<line_sep>self.auth.client.resource_owner_secret=to_unicode(t["oauth_token_secret"])<line_sep><return><true><block_end><return><false><block_end>@property<def_stmt>authorized self<block_start>"""This is the property used when you have a statement in your code that reads "if <provider>.authorized:", e.g. "if twitter.authorized:". The way it works is kind of complicated: this function just tries to load the token, and then the 'super()' statement basically just tests if the token exists (see BaseOAuth1Session.authorized). To load the token, it calls the load_token() function within this class, which in turn checks the 'token' property of this class (another function), which in turn checks the 'token' property of the blueprint (see base.py), which calls 'storage.get()' to actually try to load the token from the cache/db (see the 'get()' function in storage/sqla.py). """<line_sep>self.load_token()<line_sep><return>super().authorized<block_end>@property<def_stmt>authorization_required self<block_start>""" .. versionadded:: 1.3.0 This is a decorator for a view function. If the current user does not have an OAuth token, then they will be redirected to the :meth:`~flask_dance.consumer.oauth1.OAuth1ConsumerBlueprint.login` view to obtain one. """<def_stmt>wrapper func<block_start>@wraps(func)<def_stmt>check_authorization *args **kwargs<block_start><if_stmt><not>self.authorized<block_start>endpoint=f"{self.blueprint.name}.login"<line_sep><return>redirect(url_for(endpoint))<block_end><return>func(*args **kwargs)<block_end><return>check_authorization<block_end><return>wrapper<block_end><def_stmt>prepare_request self request<block_start><if_stmt>self.base_url<block_start>request.url=self.base_url.relative(request.url)<block_end><return>super().prepare_request(request)<block_end><def_stmt>request self method url data=<none> headers=<none> should_load_token=<true> **kwargs<block_start><if_stmt>should_load_token<block_start>self.load_token()<block_end><return>super().request(method=method url=url data=data headers=headers **kwargs)<block_end><block_end><class_stmt>OAuth2Session(BaseOAuth2Session)<block_start>""" A :class:`requests.Session` subclass that can do some special things: * lazy-loads OAuth2 tokens from the storage via the blueprint * handles OAuth2 authentication (from :class:`requests_oauthlib.OAuth2Session` superclass) * has a ``base_url`` property used for relative URL resolution Note that this is a session between the consumer (your website) and the provider (e.g. Twitter), and *not* a session between a user of your website and your website. """<def_stmt>__init__ self blueprint=<none> base_url=<none> *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.blueprint=blueprint<line_sep>self.base_url=URLObject(base_url)<line_sep>invalidate_cached_property(self "token")<block_end>@cached_property<def_stmt>token self<block_start>""" Get and set the values in the OAuth token, structured as a dictionary. """<line_sep><return>self.blueprint.token<block_end><def_stmt>load_token self<block_start>self._client.token=self.token<if_stmt>self.token<block_start>self._client.populate_token_attributes(self.token)<line_sep><return><true><block_end><return><false><block_end>@property<def_stmt>access_token self<block_start>""" Returns the ``access_token`` from the OAuth token. """<line_sep><return>self.token<and>self.token.get("access_token")<block_end>@property<def_stmt>authorized self<block_start>"""This is the property used when you have a statement in your code that reads "if <provider>.authorized:", e.g. "if twitter.authorized:". The way it works is kind of complicated: this function just tries to load the token, and then the 'super()' statement basically just tests if the token exists (see BaseOAuth1Session.authorized). To load the token, it calls the load_token() function within this class, which in turn checks the 'token' property of this class (another function), which in turn checks the 'token' property of the blueprint (see base.py), which calls 'storage.get()' to actually try to load the token from the cache/db (see the 'get()' function in storage/sqla.py). """<line_sep>self.load_token()<line_sep><return>super().authorized<block_end>@property<def_stmt>authorization_required self<block_start>""" .. versionadded:: 1.3.0 This is a decorator for a view function. If the current user does not have an OAuth token, then they will be redirected to the :meth:`~flask_dance.consumer.oauth2.OAuth2ConsumerBlueprint.login` view to obtain one. """<def_stmt>wrapper func<block_start>@wraps(func)<def_stmt>check_authorization *args **kwargs<block_start><if_stmt><not>self.authorized<block_start>endpoint=f"{self.blueprint.name}.login"<line_sep><return>redirect(url_for(endpoint))<block_end><return>func(*args **kwargs)<block_end><return>check_authorization<block_end><return>wrapper<block_end><def_stmt>request self method url data=<none> headers=<none> **kwargs<block_start><if_stmt>self.base_url<block_start>url=self.base_url.relative(url)<block_end>self.load_token()<line_sep><return>super().request(method=method url=url data=data headers=headers client_id=self.blueprint.client_id client_secret=self.blueprint.client_secret **kwargs )<block_end><block_end>
#! /usr/bin/env python <import_stmt>os<import_stmt>platform<import_stmt>argparse<import_stmt>sys<import_stmt>shutil<line_sep>print(sys.argv)<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--target' dest='target' action='store')<line_sep>args=parser.parse_args()<line_sep>mypath=os.path.dirname(sys.argv[0])<line_sep>os.chdir(mypath)<line_sep>print(os.getcwd())<line_sep>target=args.target<line_sep>cur_os=platform.system()<line_sep>arch=platform.architecture()<line_sep>path=''<line_sep>magic='0xefefefef'<if_stmt>cur_os<eq>'Linux'<block_start><if_stmt>'64bit'<in>arch<block_start>path='linux64'<block_end><else_stmt><block_start>path='linux32'<block_end><block_end><elif_stmt>cur_os<eq>'Darwin'<block_start>path='osx'<block_end><elif_stmt>cur_os<eq>'Windows'<block_start>path='win32'<block_end><if_stmt>path<block_start>path=os.path.join("tools" path "xz")<block_end>hw_module=0<line_sep>cmd_str="python haas1000_genbin.py %d \"%s\""%(hw_module target)<line_sep>os.system(cmd_str)<line_sep>bin_path=os.path.join(".." "write_flash_gui" "ota_bin")<line_sep>shutil.copy(os.path.join(bin_path "ota_rtos.bin") os.path.join(bin_path "ota_rtos_ota.bin"))<line_sep>cmd_str="\"%s\" -f --lzma2=dict=32KiB --check=crc32 -k %s"%(os.path.abspath(path) os.path.join(bin_path "ota_rtos_ota.bin"))<line_sep>os.system(cmd_str)<line_sep>cmd_str="python ota_gen_md5_bin.py \"%s\" -m %s"%(os.path.join(bin_path "ota_rtos_ota.bin") magic)<line_sep>os.system(cmd_str)<line_sep>cmd_str="python ota_gen_md5_bin.py \"%s\" -m %s"%(os.path.join(bin_path "ota_rtos_ota.bin.xz") magic)<line_sep>os.system(cmd_str)<line_sep>print("run external script success")<line_sep>
<import_stmt>unittest<class_stmt>Test_test_one_1(unittest.TestCase)<block_start><def_stmt>test_1_1_1 self<block_start>self.assertEqual(1 1 'Not equal')<block_end><def_stmt>test_1_1_2 self<block_start>self.assertEqual(1 2 'Not equal')<block_end>@unittest.skip("demonstrating skipping")<def_stmt>test_1_1_3 self<block_start>self.assertEqual(1 2 'Not equal')<block_end><block_end><class_stmt>Test_test_one_2(unittest.TestCase)<block_start><def_stmt>test_1_2_1 self<block_start>self.assertEqual(1 1 'Not equal')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Learning rate utilities for vision tasks."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>typing Any Mapping Optional<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<line_sep>BASE_LEARNING_RATE=0.1<class_stmt>WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>"""A wrapper for LearningRateSchedule that includes warmup steps."""<def_stmt>__init__ self lr_schedule:tf.keras.optimizers.schedules.LearningRateSchedule warmup_steps:int warmup_lr:Optional[float]=<none><block_start>"""Add warmup decay to a learning rate schedule. Args: lr_schedule: base learning rate scheduler warmup_steps: number of warmup steps warmup_lr: an optional field for the final warmup learning rate. This should be provided if the base `lr_schedule` does not contain this field. """<line_sep>super(WarmupDecaySchedule self).__init__()<line_sep>self._lr_schedule=lr_schedule<line_sep>self._warmup_steps=warmup_steps<line_sep>self._warmup_lr=warmup_lr<block_end><def_stmt>__call__ self step:int<block_start>lr=self._lr_schedule(step)<if_stmt>self._warmup_steps<block_start><if_stmt>self._warmup_lr<is><not><none><block_start>initial_learning_rate=tf.convert_to_tensor(self._warmup_lr name="initial_learning_rate")<block_end><else_stmt><block_start>initial_learning_rate=tf.convert_to_tensor(self._lr_schedule.initial_learning_rate name="initial_learning_rate")<block_end>dtype=initial_learning_rate.dtype<line_sep>global_step_recomp=tf.cast(step dtype)<line_sep>warmup_steps=tf.cast(self._warmup_steps dtype)<line_sep>warmup_lr=initial_learning_rate<times>global_step_recomp/warmup_steps<line_sep>lr=tf.cond(global_step_recomp<l>warmup_steps <lambda>:warmup_lr <lambda>:lr)<block_end><return>lr<block_end><def_stmt>get_config self<arrow>Mapping[str Any]<block_start>config=self._lr_schedule.get_config()<line_sep>config.update({"warmup_steps":self._warmup_steps "warmup_lr":self._warmup_lr })<line_sep><return>config<block_end><block_end><class_stmt>CosineDecayWithWarmup(tf.keras.optimizers.schedules.LearningRateSchedule)<block_start>"""Class to generate learning rate tensor."""<def_stmt>__init__ self batch_size:int total_steps:int warmup_steps:int<block_start>"""Creates the consine learning rate tensor with linear warmup. Args: batch_size: The training batch size used in the experiment. total_steps: Total training steps. warmup_steps: Steps for the warm up period. """<line_sep>super(CosineDecayWithWarmup self).__init__()<line_sep>base_lr_batch_size=256<line_sep>self._total_steps=total_steps<line_sep>self._init_learning_rate=BASE_LEARNING_RATE<times>batch_size/base_lr_batch_size<line_sep>self._warmup_steps=warmup_steps<block_end><def_stmt>__call__ self global_step:int<block_start>global_step=tf.cast(global_step dtype=tf.float32)<line_sep>warmup_steps=self._warmup_steps<line_sep>init_lr=self._init_learning_rate<line_sep>total_steps=self._total_steps<line_sep>linear_warmup=global_step/warmup_steps<times>init_lr<line_sep>cosine_learning_rate=init_lr<times>(tf.cos(np.pi<times>(global_step-warmup_steps)/(total_steps-warmup_steps))+1.0)/2.0<line_sep>learning_rate=tf.where(global_step<l>warmup_steps linear_warmup cosine_learning_rate)<line_sep><return>learning_rate<block_end><def_stmt>get_config self<block_start><return>{"total_steps":self._total_steps "warmup_learning_rate":self._warmup_learning_rate "warmup_steps":self._warmup_steps "init_learning_rate":self._init_learning_rate }<block_end><block_end>
<import_from_stmt>.data Batcher TokenBatcher<import_from_stmt>.model BidirectionalLanguageModel dump_token_embeddings dump_bilm_embeddings<import_from_stmt>.elmo weight_layers<line_sep>
# Generated by Django 3.2.7 on 2021-10-03 07:09 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("zerver" "0361_realm_create_web_public_stream_policy") ]<line_sep>operations=[migrations.AddField(model_name="realmuserdefault" name="send_private_typing_notifications" field=models.BooleanField(default=<true>) ) migrations.AddField(model_name="realmuserdefault" name="send_stream_typing_notifications" field=models.BooleanField(default=<true>) ) migrations.AddField(model_name="userprofile" name="send_private_typing_notifications" field=models.BooleanField(default=<true>) ) migrations.AddField(model_name="userprofile" name="send_stream_typing_notifications" field=models.BooleanField(default=<true>) ) ]<block_end>
<import_from_future_stmt> absolute_import<line_sep># flake8: noqa # import apis into api package <import_from_stmt>hubspot.cms.performance.api.public_performance_api PublicPerformanceApi<line_sep>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for loading op libraries. @@load_op_library """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>re<import_from_stmt>tensorflow.python.framework load_library<import_from_stmt>tensorflow.python.platform resource_loader<def_stmt>load_op_library path<block_start>"""Loads a contrib op library from the given path. NOTE(mrry): On Windows, we currently assume that some contrib op libraries are statically linked into the main TensorFlow Python extension DLL - use dynamically linked ops if the .so is present. Args: path: An absolute path to a shared object file. Returns: A Python module containing the Python wrappers for Ops defined in the plugin. """<if_stmt>os.name<eq>'nt'# To avoid makeing every user_ops aware of windows, re-write # the file extension from .so to .dll. <block_start>path=re.sub(r'\.so$' '.dll' path)<line_sep># Currently we have only some user_ops as dlls on windows - don't try # to load them if the dll is not found. # TODO(mrry): Once we have all of them this check should be removed. <if_stmt><not>os.path.exists(path)<block_start><return><none><block_end><block_end>path=resource_loader.get_path_to_datafile(path)<line_sep>ret=load_library.load_op_library(path)<assert_stmt>ret 'Could not load %s'%path<line_sep><return>ret<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import unicode_literals<import_stmt>mock<import_stmt>logging<import_from_stmt>django.test TestCase<import_from_stmt>django.db IntegrityError<import_from_stmt>drivers base<import_from_stmt>maintenance.tests factory<as>maintenance_factory<import_from_stmt>physical.tests factory<as>physical_factory<import_from_stmt>physical.models DatabaseInfra<import_from_stmt>logical.tests factory<import_from_stmt>notification.tests.factory TaskHistoryFactory<import_from_stmt>notification.models TaskHistory<import_from_stmt>logical.models Database DatabaseHistory<line_sep>LOG=logging.getLogger(__name__)<line_sep>ERROR_CLONE_WITHOUT_PERSISTENCE="Database does not have persistence cannot be cloned"<line_sep>ERROR_CLONE_IN_QUARANTINE="Database in quarantine cannot be cloned"<line_sep>ERROR_CLONE_NOT_ALIVE="Database is not alive and cannot be cloned"<line_sep>ERROR_DELETE_PROTECTED="Database {} is protected and cannot be deleted"<line_sep>ERROR_DELETE_DEAD="Database {} is not alive and cannot be deleted"<line_sep>ERROR_UPGRADE_MONGO24="MongoDB 2.4 cannot be upgraded by this task."<line_sep>ERROR_UPGRADE_IN_QUARANTINE="Database in quarantine and cannot be upgraded."<line_sep>ERROR_UPGRADE_IS_DEAD="Database is dead and cannot be upgraded."<line_sep>ERROR_UPGRADE_NO_EQUIVALENT_PLAN="Source plan do not has equivalent plan to upgrade."<line_sep>UPGRADE_URL="/admin/logical/database/{}/upgrade/"<line_sep>UPGRADE_RETRY_URL="/admin/logical/database/{}/upgrade_retry/"<class_stmt>FakeDriver(base.BaseDriver)<block_start><def_stmt>get_connection self<block_start><return>'connection-url'<block_end><block_end><class_stmt>DatabaseTestCase(TestCase)<block_start><def_stmt>setUp self<block_start>self.instance=physical_factory.InstanceFactory()<line_sep>self.databaseinfra=self.instance.databaseinfra<line_sep>self.engine=FakeDriver(databaseinfra=self.databaseinfra)<line_sep>self.environment=physical_factory.EnvironmentFactory()<line_sep>self.plan_upgrade=physical_factory.PlanFactory()<block_end><def_stmt>tearDown self<block_start>self.engine=<none><block_end><def_stmt>test_create_database self<block_start>database=Database(name="blabla" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>database.save()<line_sep>self.assertTrue(database.pk)<block_end><def_stmt>test_create_duplicate_database_error self<block_start>database=Database(name="bleble" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>database.save()<line_sep>self.assertTrue(database.pk)<line_sep>self.assertRaises(IntegrityError Database(name="bleble" databaseinfra=self.databaseinfra environment=self.environment).save)<block_end><def_stmt>test_slugify_database_name_with_spaces self<block_start>database=factory.DatabaseFactory.build(name="w h a t" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>database.full_clean()<line_sep>database.save()<line_sep>self.assertTrue(database.id)<line_sep>self.assertEqual(database.name 'w_h_a_t')<block_end><def_stmt>test_slugify_database_name_with_dots self<block_start>database=factory.DatabaseFactory.build(name="w.h.e.r.e" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>database.full_clean()<line_sep>database.save()<line_sep>self.assertTrue(database.id)<line_sep>self.assertEqual(database.name 'w_h_e_r_e')<block_end><def_stmt>test_cannot_edit_database_name self<block_start>database=factory.DatabaseFactory(name="w h a t" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>self.assertTrue(database.id)<line_sep>database.name="super3"<line_sep>self.assertRaises(AttributeError database.save)<block_end>@mock.patch.object(DatabaseInfra 'get_info')<def_stmt>test_new_database_bypass_datainfra_info_cache self get_info<block_start><def_stmt>side_effect_get_info force_refresh=<false><block_start>m=mock.Mock()<if_stmt><not>force_refresh<block_start>m.get_database_status.return_value=<none><line_sep><return>m<block_end>m.get_database_status.return_value=object()<line_sep><return>m<block_end>get_info.side_effect=side_effect_get_info<line_sep>database=factory.DatabaseFactory(name="db1cache" databaseinfra=self.databaseinfra environment=self.environment)<line_sep>self.assertIsNotNone(database.database_status)<line_sep>self.assertEqual([mock.call() mock.call(force_refresh=<true>)] get_info.call_args_list)<block_end><def_stmt>test_can_update_volume_used_disk_size self<block_start>database=factory.DatabaseFactory()<line_sep>database.databaseinfra=self.databaseinfra<line_sep>volume=physical_factory.VolumeFactory()<line_sep>volume.host=self.instance.hostname<line_sep>volume.save()<line_sep>old_used_size=volume.used_size_kb<line_sep>volume=database.update_host_disk_used_size(host_address=self.instance.address used_size_kb=300)<line_sep>self.assertNotEqual(volume.used_size_kb old_used_size)<line_sep>self.assertEqual(volume.used_size_kb 300)<line_sep>old_used_size=volume.used_size_kb<line_sep>volume=database.update_host_disk_used_size(host_address=self.instance.address used_size_kb=500)<line_sep>self.assertNotEqual(volume.used_size_kb old_used_size)<line_sep>self.assertEqual(volume.used_size_kb 500)<block_end><def_stmt>test_cannot_update_volume_used_disk_size_host_not_volume self<block_start>database=factory.DatabaseFactory()<line_sep>database.databaseinfra=self.databaseinfra<line_sep>volume=database.update_host_disk_used_size(host_address=self.instance.address used_size_kb=300)<line_sep>self.assertIsNone(volume)<block_end><def_stmt>test_can_clone self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>can_be_cloned,error=database.can_be_cloned()<line_sep>self.assertTrue(can_be_cloned)<line_sep>self.assertIsNone(error)<block_end><def_stmt>test_cannot_clone_no_persistence self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.plan.has_persistence=<false><line_sep>can_be_cloned,error=database.can_be_cloned()<line_sep>self.assertFalse(can_be_cloned)<line_sep>self.assertEqual(error ERROR_CLONE_WITHOUT_PERSISTENCE)<block_end><def_stmt>test_cannot_clone_in_quarantine self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.is_in_quarantine=<true><line_sep>can_be_cloned,error=database.can_be_cloned()<line_sep>self.assertFalse(can_be_cloned)<line_sep>self.assertEqual(error ERROR_CLONE_IN_QUARANTINE)<block_end><def_stmt>test_cannot_clone_dead self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.DEAD<line_sep>database.database_status=<none><line_sep>can_be_cloned,error=database.can_be_cloned()<line_sep>self.assertFalse(can_be_cloned)<line_sep>self.assertEqual(error ERROR_CLONE_NOT_ALIVE)<block_end><def_stmt>test_can_delete self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>can_be_deleted,error=database.can_be_deleted()<line_sep>self.assertTrue(can_be_deleted)<line_sep>self.assertIsNone(error)<block_end>@mock.patch('logical.models.factory_for')@mock.patch('logical.models.Database.automatic_create_first_credential')<def_stmt>test_insert_on_database_history_when_delete self cred_mock factory_mock<block_start>database=factory.DatabaseFactory(name='test_fake_name' description='__test__ fake desc')<line_sep>database_id=database.id<line_sep>database.is_in_quarantine=<true><line_sep>database.is_protected=<false><line_sep>database.status=database.ALIVE<line_sep>database.environment.name='__test__ fake env'<line_sep>database.project.name='__test__ proj name'<line_sep>database.team.name='__test__ team name'<line_sep>database.plan.name='__test__ plan name'<line_sep>database.databaseinfra.name='__test__ infra name'<line_sep>database.databaseinfra.engine.version='v1.2.3'<line_sep>database.databaseinfra.plan.has_persistence=<false><line_sep>database.databaseinfra.engine.engine_type.name='__test__ fake engine type'<line_sep>database.databaseinfra.disk_offering.size_kb=1234<line_sep>database.delete()<line_sep>deleted_databases=DatabaseHistory.objects.filter(database_id=database_id)<line_sep>self.assertEqual(len(deleted_databases) 1)<line_sep>deleted_database=deleted_databases[0]<line_sep>self.assertEqual(deleted_database.database_id database_id)<line_sep>self.assertEqual(deleted_database.name 'test_fake_name')<line_sep>self.assertEqual(deleted_database.description '__test__ fake desc')<line_sep>self.assertEqual(deleted_database.engine '__test__ fake engine type v1.2.3')<line_sep>self.assertEqual(deleted_database.project '__test__ proj name')<line_sep>self.assertEqual(deleted_database.team '__test__ team name')<line_sep>self.assertEqual(deleted_database.databaseinfra_name '__test__ infra name')<line_sep>self.assertEqual(deleted_database.plan '__test__ plan name')<line_sep>self.assertEqual(deleted_database.disk_size_kb 1234)<line_sep>self.assertFalse(deleted_database.has_persistence)<line_sep>self.assertEqual(deleted_database.environment '__test__ fake env')<block_end><def_stmt>test_cannot_delete_protected self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.is_protected=<true><line_sep>can_be_deleted,error=database.can_be_deleted()<line_sep>self.assertFalse(can_be_deleted)<line_sep>self.assertEqual(error ERROR_DELETE_PROTECTED.format(database.name))<block_end><def_stmt>test_can_delete_protected_in_quarantine self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.is_protected=<true><line_sep>database.is_in_quarantine=<true><line_sep>can_be_deleted,error=database.can_be_deleted()<line_sep>self.assertTrue(can_be_deleted)<line_sep>self.assertIsNone(error)<block_end><def_stmt>test_can_delete_in_quarantine self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.is_in_quarantine=<true><line_sep>can_be_deleted,error=database.can_be_deleted()<line_sep>self.assertTrue(can_be_deleted)<line_sep>self.assertIsNone(error)<block_end><def_stmt>test_can_upgrade self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.databaseinfra.plan.engine_equivalent_plan=self.plan_upgrade<line_sep>can_do_upgrade,error=database.can_do_upgrade()<line_sep>self.assertTrue(can_do_upgrade)<line_sep>self.assertIsNone(error)<block_end><def_stmt>test_cannot_upgrade_mongo24 self<block_start>mongo=physical_factory.EngineTypeFactory()<line_sep>mongo.name='mongodb'<line_sep>mongo24=physical_factory.EngineFactory()<line_sep>mongo24.engine_type=mongo<line_sep>mongo24.version='2.4.xxx'<line_sep>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>infra=database.databaseinfra<line_sep>infra.engine=mongo24<line_sep>database.databaseinfra=infra<line_sep>can_do_upgrade,error=database.can_do_upgrade()<line_sep>self.assertFalse(can_do_upgrade)<line_sep>self.assertEqual(error ERROR_UPGRADE_MONGO24)<block_end><def_stmt>test_cannot_upgrade_in_quarantine self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>database.is_in_quarantine=<true><line_sep>can_do_upgrade,error=database.can_do_upgrade()<line_sep>self.assertFalse(can_do_upgrade)<line_sep>self.assertEqual(error ERROR_UPGRADE_IN_QUARANTINE)<block_end><def_stmt>test_cannot_upgrade_dead self<block_start>database=factory.DatabaseFactory()<line_sep>database.databaseinfra.plan.engine_equivalent_plan=self.plan_upgrade<line_sep>database.status=database.DEAD<line_sep>can_do_upgrade,error=database.can_do_upgrade()<line_sep>self.assertFalse(can_do_upgrade)<line_sep>self.assertEqual(error ERROR_UPGRADE_IS_DEAD)<block_end><def_stmt>test_cannot_upgrade_no_equivalent_plan self<block_start>database=factory.DatabaseFactory()<line_sep>database.status=database.ALIVE<line_sep>can_do_upgrade,error=database.can_do_upgrade()<line_sep>self.assertFalse(can_do_upgrade)<line_sep>self.assertEqual(error ERROR_UPGRADE_NO_EQUIVALENT_PLAN)<block_end><def_stmt>test_get_upgrade_url self<block_start>database=factory.DatabaseFactory()<line_sep>expected_url=UPGRADE_URL.format(database.id)<line_sep>returned_url=database.get_upgrade_url()<line_sep>self.assertEqual(returned_url expected_url)<block_end><def_stmt>test_get_upgrade_retry_url self<block_start>database=factory.DatabaseFactory()<line_sep>expected_url=UPGRADE_RETRY_URL.format(database.id)<line_sep>returned_url=database.get_upgrade_retry_url()<line_sep>self.assertEqual(returned_url expected_url)<block_end><def_stmt>test_last_successful_upgrade self<block_start>database=factory.DatabaseFactory()<line_sep>self.assertIsNone(database.last_successful_upgrade)<line_sep>upgrade=maintenance_factory.DatabaseUpgradeFactory()<line_sep>upgrade.database=database<line_sep>upgrade.save()<line_sep>self.assertIsNone(database.last_successful_upgrade)<line_sep>upgrade.set_success()<line_sep>self.assertEqual(database.last_successful_upgrade upgrade)<block_end><def_stmt>test_last_successful_upgrade_with_error self<block_start>database=factory.DatabaseFactory()<line_sep>upgrade=maintenance_factory.DatabaseUpgradeFactory()<line_sep>upgrade.database=database<line_sep>upgrade.set_error()<line_sep>self.assertIsNone(database.last_successful_upgrade)<block_end><def_stmt>test_current_task_lock self<block_start>database=factory.DatabaseFactory()<line_sep>task1=TaskHistoryFactory()<line_sep>task2=TaskHistoryFactory()<line_sep>database.pin_task(task1)<line_sep>self.assertFalse(database.pin_task(task2))<line_sep>database.unpin_task()<line_sep>self.assertTrue(database.pin_task(task2))<block_end><def_stmt>test_lock_retry self<block_start>database=factory.DatabaseFactory()<line_sep>task1=TaskHistoryFactory()<line_sep>task2=TaskHistoryFactory()<line_sep>task3=TaskHistoryFactory()<line_sep>task1.task_status=TaskHistory.STATUS_ERROR<line_sep>task1.save()<line_sep>task2.task_name=task1.task_name<line_sep>task2.save()<line_sep>database.pin_task(task1)<line_sep>self.assertFalse(database.update_task(task3))<line_sep>self.assertTrue(database.update_task(task2))<line_sep>self.assertFalse(database.update_task(task2))<line_sep>database.unpin_task()<line_sep>self.assertTrue(database.pin_task(task3))<block_end><block_end>
<import_stmt>unittest<import_stmt>numpy<import_stmt>six<import_stmt>chainer<import_from_stmt>chainer backend<import_from_stmt>chainer.backends cuda<import_from_stmt>chainer functions<import_from_stmt>chainer gradient_check<import_from_stmt>chainer testing<import_from_stmt>chainer.testing attr<line_sep>@testing.parameterize(*testing.product_dict([{'dtype':numpy.float16 'forward_options':{'rtol':3e-3 'atol':3e-3} 'backward_options':{'rtol':1e-1 'atol':1e-1}} {'dtype':numpy.float32 'forward_options':{} 'backward_options':{'rtol':1e-1 'atol':1e-1}} {'dtype':numpy.float64 'forward_options':{} 'backward_options':{'rtol':1e-1 'atol':1e-1}} ] [{'reduce':'no'} {'reduce':'mean'} ] [{'norm':'L1'} {'norm':'L2'} ] [{'label_dtype':numpy.int8} {'label_dtype':numpy.int16} {'label_dtype':numpy.int32} {'label_dtype':numpy.int64} ] ))<class_stmt>TestHinge(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self._config_user=chainer.using_config('dtype' self.dtype)<line_sep>self._config_user.__enter__()<line_sep>shape=(10 5)<line_sep>self.x=numpy.random.uniform(-1 1 shape).astype(self.dtype)<line_sep># Avoid values around -1.0 for stability self.x[numpy.logical_and(-1.01<l>self.x self.x<l>-0.99)]=0.5<line_sep>self.t=numpy.random.randint(0 shape[1] shape[:1]).astype(self.label_dtype)<if_stmt>self.reduce<eq>'no'<block_start>self.gy=numpy.random.uniform(-1 1 self.x.shape).astype(self.dtype)<block_end><block_end><def_stmt>tearDown self<block_start>self._config_user.__exit__(<none> <none> <none>)<block_end><def_stmt>check_forward self x_data t_data<block_start>x_val=chainer.Variable(x_data)<line_sep>t_val=chainer.Variable(t_data requires_grad=<false>)<line_sep>loss=functions.hinge(x_val t_val self.norm self.reduce)<if_stmt>self.reduce<eq>'mean'<block_start>self.assertEqual(loss.data.shape ())<block_end><else_stmt><block_start>self.assertEqual(loss.data.shape self.x.shape)<block_end>self.assertEqual(loss.data.dtype self.dtype)<line_sep>loss_value=cuda.to_cpu(loss.data)<line_sep># Compute expected value <for_stmt>i six.moves.range(self.x.shape[0])<block_start>self.x[i self.t[i]]<augmul>-1<block_end><for_stmt>i six.moves.range(self.x.shape[0])<block_start><for_stmt>j six.moves.range(self.x.shape[1])<block_start>self.x[i j]=max(0 1.0+self.x[i j])<block_end><block_end><if_stmt>self.norm<eq>'L1'<block_start>loss_expect=self.x<block_end><elif_stmt>self.norm<eq>'L2'<block_start>loss_expect=self.x<power>2<block_end><if_stmt>self.reduce<eq>'mean'<block_start>loss_expect=numpy.sum(loss_expect)/self.x.shape[0]<block_end>testing.assert_allclose(loss_expect loss_value **self.forward_options)<block_end><def_stmt>test_forward_cpu self<block_start>self.check_forward(self.x self.t)<block_end>@attr.gpu<def_stmt>test_forward_gpu self<block_start>self.check_forward(cuda.to_gpu(self.x) cuda.to_gpu(self.t))<block_end>@attr.chainerx<def_stmt>test_forward_chainerx_native self<block_start>self.check_forward(backend.to_chx(self.x) backend.to_chx(self.t))<block_end>@attr.gpu@attr.chainerx<def_stmt>test_forward_chainerx_cuda self<block_start>self.check_forward(backend.to_chx(cuda.to_gpu(self.x)) backend.to_chx(cuda.to_gpu(self.t)))<block_end><def_stmt>check_backward self x_data t_data<block_start><def_stmt>f x t<block_start><return>functions.hinge(x t self.norm)<block_end>gradient_check.check_backward(f (x_data t_data) <none> dtype='d' **self.backward_options)<block_end><def_stmt>check_backward_chainerx self x_data t_data# TODO(niboshi): gradient_check does not support integer input # (no_grads) for ChainerX. Support it and merge this method with # `self.check_backward`. <block_start><def_stmt>f x<block_start><return>functions.hinge(x t_data self.norm)<block_end>gradient_check.check_backward(f (x_data ) <none> dtype='d' **self.backward_options)<block_end><def_stmt>test_backward_cpu self<block_start>self.check_backward(self.x self.t)<block_end>@attr.gpu<def_stmt>test_backward_gpu self<block_start>self.check_backward(cuda.to_gpu(self.x) cuda.to_gpu(self.t))<block_end>@attr.chainerx<def_stmt>test_backward_chainerx_native self<block_start>self.check_backward_chainerx(backend.to_chx(self.x) backend.to_chx(self.t))<block_end>@attr.gpu@attr.chainerx<def_stmt>test_backward_chainerx_cuda self<block_start>self.check_backward_chainerx(backend.to_chx(cuda.to_gpu(self.x)) backend.to_chx(cuda.to_gpu(self.t)))<block_end><block_end><class_stmt>TestHingeInvalidOption(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.x=numpy.random.uniform(-1 1 (10 5)).astype(numpy.float32)<line_sep>self.t=numpy.random.randint(0 5 (10 )).astype(numpy.int32)<block_end><def_stmt>check_invalid_norm_option self xp<block_start>x=xp.asarray(self.x)<line_sep>t=xp.asarray(self.t)<with_stmt>self.assertRaises(NotImplementedError)<block_start>functions.hinge(x t 'invalid_norm' 'mean')<block_end><block_end><def_stmt>test_invalid_norm_option_cpu self<block_start>self.check_invalid_norm_option(numpy)<block_end>@attr.gpu<def_stmt>test_invalid_norm_option_gpu self<block_start>self.check_invalid_norm_option(cuda.cupy)<block_end><def_stmt>check_invalid_reduce_option self xp<block_start>x=xp.asarray(self.x)<line_sep>t=xp.asarray(self.t)<with_stmt>self.assertRaises(ValueError)<block_start>functions.hinge(x t 'L1' 'invalid_option')<block_end><block_end><def_stmt>test_invalid_reduce_option_cpu self<block_start>self.check_invalid_reduce_option(numpy)<block_end>@attr.gpu<def_stmt>test_invalid_reduce_option_gpu self<block_start>self.check_invalid_reduce_option(cuda.cupy)<block_end><block_end>testing.run_module(__name__ __file__)<line_sep>
<import_from_future_stmt> print_function<import_stmt>codecs<import_stmt>logging<import_stmt>os<import_stmt>sys<import_from_stmt>optparse OptionParser<import_from_stmt>pypugjs.utils process<def_stmt>convert_file <block_start>support_compilers_list=['django' 'jinja' 'underscore' 'mako' 'tornado' 'html' ]<line_sep>available_compilers={}<for_stmt>i support_compilers_list<block_start><try_stmt><block_start>compiler_class=__import__('pypugjs.ext.%s'%i fromlist=['pypugjs']).Compiler<block_end><except_stmt>ImportError<as>e<block_start>logging.warning(e)<block_end><else_stmt><block_start>available_compilers[i]=compiler_class<block_end><block_end>usage="usage: %prog [options] [file [output]]"<line_sep>parser=OptionParser(usage)<line_sep>parser.add_option("-o" "--output" dest="output" help="Write output to FILE" metavar="FILE")<line_sep># use a default compiler here to sidestep making a particular # compiler absolutely necessary (ex. django) default_compiler=sorted(available_compilers.keys())[0]<line_sep>parser.add_option("-c" "--compiler" dest="compiler" choices=list(available_compilers.keys()) default=default_compiler type="choice" help=("COMPILER must be one of %s, default is %s"%(', '.join(list(available_compilers.keys())) default_compiler)) )<line_sep>parser.add_option("-e" "--ext" dest="extension" help="Set import/extends default file extension" metavar="FILE" )<line_sep>options,args=parser.parse_args()<line_sep>file_output=options.output<or>(args[1]<if>len(args)<g>1<else><none>)<line_sep>compiler=options.compiler<if_stmt>options.extension<block_start>extension='.%s'%options.extension<block_end><elif_stmt>options.output<block_start>extension=os.path.splitext(options.output)[1]<block_end><else_stmt><block_start>extension=<none><block_end><if_stmt>compiler<in>available_compilers<block_start><import_stmt>six<if_stmt>len(args)<ge>1<block_start>template=codecs.open(args[0] 'r' encoding='utf-8').read()<block_end><elif_stmt>six.PY3<block_start>template=sys.stdin.read()<block_end><else_stmt><block_start>template=codecs.getreader('utf-8')(sys.stdin).read()<block_end>output=process(template compiler=available_compilers[compiler] staticAttrs=<true> extension=extension )<if_stmt>file_output<block_start>outfile=codecs.open(file_output 'w' encoding='utf-8')<line_sep>outfile.write(output)<block_end><elif_stmt>six.PY3<block_start>sys.stdout.write(output)<block_end><else_stmt><block_start>codecs.getwriter('utf-8')(sys.stdout).write(output)<block_end><block_end><else_stmt><block_start><raise>Exception('You must have %s installed!'%compiler)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>convert_file()<block_end>
<import_from_stmt>hachoir_parser.game.zsnes ZSNESFile<import_from_stmt>hachoir_parser.game.spider_man_video SpiderManVideoFile<import_from_stmt>hachoir_parser.game.laf LafFile<import_from_stmt>hachoir_parser.game.blp BLP1File BLP2File<import_from_stmt>hachoir_parser.game.uasset UAssetFile<line_sep>
<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib<import_from_stmt>math ceil<import_from_stmt>constants ENV_NAMES<import_stmt>seaborn# sets some style parameters automatically COLORS=[(57 106 177) (218 124 48)]<def_stmt>switch_to_outer_plot fig<block_start>ax0=fig.add_subplot(111 frame_on=<false>)<line_sep>ax0.set_xticks([])<line_sep>ax0.set_yticks([])<line_sep><return>ax0<block_end><def_stmt>ema data_in smoothing=0<block_start>data_out=np.zeros_like(data_in)<line_sep>curr=np.nan<for_stmt>i range(len(data_in))<block_start>x=data_in[i]<if_stmt>np.isnan(curr)<block_start>curr=x<block_end><else_stmt><block_start>curr=(1-smoothing)<times>x+smoothing<times>curr<block_end>data_out[i]=curr<block_end><return>data_out<block_end><def_stmt>plot_data_mean_std ax data_y color_idx=0 data_x=<none> x_scale=1 smoothing=0 first_valid=0 label=<none><block_start>color=COLORS[color_idx]<line_sep>hexcolor='#%02x%02x%02x'%color<line_sep>data_y=data_y[: first_valid:]<line_sep>nx,num_datapoint=np.shape(data_y)<if_stmt>smoothing<g>0<block_start><for_stmt>i range(nx)<block_start>data_y[i <ellipsis>]=ema(data_y[i <ellipsis>] smoothing)<block_end><block_end><if_stmt>data_x<is><none><block_start>data_x=(np.array(range(num_datapoint))+first_valid)<times>x_scale<block_end>data_mean=np.mean(data_y axis=0)<line_sep>data_std=np.std(data_y axis=0 ddof=1)<line_sep>ax.plot(data_x data_mean color=hexcolor label=label linestyle='solid' alpha=1 rasterized=<true>)<line_sep>ax.fill_between(data_x data_mean-data_std data_mean+data_std color=hexcolor alpha=.25 linewidth=0.0 rasterized=<true>)<block_end><def_stmt>read_csv filename key_name<block_start><with_stmt>open(filename)<as>csv_file<block_start>csv_reader=csv.reader(csv_file delimiter=',')<line_sep>key_index=-1<line_sep>values=[]<for_stmt>line_num,row enumerate(csv_reader)<block_start>row=[x.lower()<for>x row]<if_stmt>line_num<eq>0<block_start>idxs=[i<for>i,val enumerate(row)<if>val<eq>key_name]<line_sep>key_index=idxs[0]<block_end><else_stmt><block_start>values.append(row[key_index])<block_end><block_end><block_end><return>np.array(values dtype=np.float32)<block_end><def_stmt>plot_values ax all_values title=<none> max_x=0 label=<none> **kwargs<block_start><if_stmt>max_x<g>0<block_start>all_values=all_values[<ellipsis> :max_x]<block_end><if_stmt>ax<is><not><none><block_start>plot_data_mean_std(ax all_values label=label **kwargs)<line_sep>ax.set_title(title)<block_end><return>all_values<block_end><def_stmt>plot_experiment run_directory_prefix titles=<none> suffixes=[''] normalization_ranges=<none> key_name='eprewmean' **kwargs<block_start>run_folders=[f'{run_directory_prefix}{x}'<for>x range(3)]<line_sep>num_envs=len(ENV_NAMES)<line_sep>will_normalize_and_reduce=normalization_ranges<is><not><none><if_stmt>will_normalize_and_reduce<block_start>num_visible_plots=1<line_sep>f,axarr=plt.subplots()<block_end><else_stmt><block_start>num_visible_plots=num_envs<line_sep>dimx=dimy=ceil(np.sqrt(num_visible_plots))<line_sep>f,axarr=plt.subplots(dimx dimy sharex=<true>)<block_end><for_stmt>suffix_idx,suffix enumerate(suffixes)<block_start>all_values=[]<line_sep>game_weights=[1]<times>num_envs<for_stmt>env_idx range(num_envs)<block_start>env_name=ENV_NAMES[env_idx]<line_sep>label=suffix<if>env_idx<eq>0<else><none># only label the first graph to avoid legend duplicates print(f'loading results from {env_name}...')<if_stmt>num_visible_plots<eq>1<block_start>ax=axarr<block_end><else_stmt><block_start>dimy=len(axarr[0])<line_sep>ax=axarr[env_idx<floordiv>dimy][env_idx%dimy]<block_end>csv_files=[f"results/{resid}/progress-{env_name}{'-'<if>len(suffix)<g>0<else>''}{suffix}.csv"<for>resid run_folders]<line_sep>curr_ax=<none><if>will_normalize_and_reduce<else>ax<line_sep>raw_data=np.array([read_csv(file key_name)<for>file csv_files])<line_sep>values=plot_values(curr_ax raw_data title=env_name color_idx=suffix_idx label=label **kwargs)<if_stmt>will_normalize_and_reduce<block_start>game_range=normalization_ranges[env_name]<line_sep>game_min=game_range[0]<line_sep>game_max=game_range[1]<line_sep>game_delta=game_max-game_min<line_sep>sub_values=game_weights[env_idx]<times>(np.array(values)-game_min)/(game_delta)<line_sep>all_values.append(sub_values)<block_end><block_end><if_stmt>will_normalize_and_reduce<block_start>normalized_data=np.sum(all_values axis=0)<line_sep>normalized_data=normalized_data/np.sum(game_weights)<line_sep>title='Mean Normalized Score'<line_sep>plot_values(ax normalized_data title=<none> color_idx=suffix_idx label=suffix **kwargs)<block_end><block_end><if_stmt>len(suffixes)<g>1<block_start><if_stmt>num_visible_plots<eq>1<block_start>ax.legend(loc='lower right')<block_end><else_stmt><block_start>f.legend(loc='lower right' bbox_to_anchor=(.5 0 .5 1))<block_end><block_end><return>f axarr<block_end>
<import_stmt>torch<import_from_stmt>torch nn Tensor<import_from_stmt>torch.nn functional<as>F<class_stmt>BasicBlock(nn.Module)<block_start>expansion=1<def_stmt>__init__ self c1 c2 s=1 downsample=<none> no_relu=<false><arrow><none><block_start>super().__init__()<line_sep>self.conv1=nn.Conv2d(c1 c2 3 s 1 bias=<false>)<line_sep>self.bn1=nn.BatchNorm2d(c2)<line_sep>self.conv2=nn.Conv2d(c2 c2 3 1 1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(c2)<line_sep>self.downsample=downsample<line_sep>self.no_relu=no_relu<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>identity=x<line_sep>out=F.relu(self.bn1(self.conv1(x)))<line_sep>out=self.bn2(self.conv2(out))<if_stmt>self.downsample<is><not><none><block_start>identity=self.downsample(x)<block_end>out<augadd>identity<line_sep><return>out<if>self.no_relu<else>F.relu(out)<block_end><block_end><class_stmt>Bottleneck(nn.Module)<block_start>expansion=2<def_stmt>__init__ self c1 c2 s=1 downsample=<none> no_relu=<false><arrow><none><block_start>super().__init__()<line_sep>self.conv1=nn.Conv2d(c1 c2 1 bias=<false>)<line_sep>self.bn1=nn.BatchNorm2d(c2)<line_sep>self.conv2=nn.Conv2d(c2 c2 3 s 1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(c2)<line_sep>self.conv3=nn.Conv2d(c2 c2<times>self.expansion 1 bias=<false>)<line_sep>self.bn3=nn.BatchNorm2d(c2<times>self.expansion)<line_sep>self.downsample=downsample<line_sep>self.no_relu=no_relu<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>identity=x<line_sep>out=F.relu(self.bn1(self.conv1(x)))<line_sep>out=F.relu(self.bn2(self.conv2(out)))<line_sep>out=self.bn3(self.conv3(out))<if_stmt>self.downsample<is><not><none><block_start>identity=self.downsample(x)<block_end>out<augadd>identity<line_sep><return>out<if>self.no_relu<else>F.relu(out)<block_end><block_end><class_stmt>ConvBN(nn.Sequential)<block_start><def_stmt>__init__ self c1 c2 k s=1 p=0<block_start>super().__init__(nn.Conv2d(c1 c2 k s p bias=<false>) nn.BatchNorm2d(c2))<block_end><block_end><class_stmt>Conv2BN(nn.Sequential)<block_start><def_stmt>__init__ self c1 ch c2 k s=1 p=0<block_start>super().__init__(nn.Conv2d(c1 ch k s p bias=<false>) nn.BatchNorm2d(ch) nn.ReLU() nn.Conv2d(ch c2 k s p bias=<false>) nn.BatchNorm2d(c2))<block_end><block_end><class_stmt>Stem(nn.Sequential)<block_start><def_stmt>__init__ self c1 c2<block_start>super().__init__(nn.Conv2d(c1 c2 3 2 1) nn.BatchNorm2d(c2) nn.ReLU() nn.Conv2d(c2 c2 3 2 1) nn.BatchNorm2d(c2) nn.ReLU())<block_end><block_end><class_stmt>Scale(nn.Sequential)<block_start><def_stmt>__init__ self c1 c2 k s=<none> p=0<block_start>super().__init__(nn.AvgPool2d(k s p ) nn.BatchNorm2d(c1) nn.ReLU() nn.Conv2d(c1 c2 1 bias=<false>))<block_end><block_end><class_stmt>ConvModule(nn.Sequential)<block_start><def_stmt>__init__ self c1 c2 k s=1 p=0<block_start>super().__init__(nn.BatchNorm2d(c1) nn.ReLU() nn.Conv2d(c1 c2 k s p bias=<false>))<block_end><block_end><class_stmt>DAPPM(nn.Module)<block_start><def_stmt>__init__ self c1 ch c2<block_start>super().__init__()<line_sep>self.scale1=Scale(c1 ch 5 2 2)<line_sep>self.scale2=Scale(c1 ch 9 4 4)<line_sep>self.scale3=Scale(c1 ch 17 8 8)<line_sep>self.scale4=Scale(c1 ch 1)<line_sep>self.scale0=ConvModule(c1 ch 1)<line_sep>self.process1=ConvModule(ch ch 3 1 1)<line_sep>self.process2=ConvModule(ch ch 3 1 1)<line_sep>self.process3=ConvModule(ch ch 3 1 1)<line_sep>self.process4=ConvModule(ch ch 3 1 1)<line_sep>self.compression=ConvModule(ch<times>5 c2 1)<line_sep>self.shortcut=ConvModule(c1 c2 1)<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>outs=[self.scale0(x)]<line_sep>outs.append(self.process1((F.interpolate(self.scale1(x) size=x.shape[-2:] mode='bilinear' align_corners=<true>)+outs[-1])))<line_sep>outs.append(self.process2((F.interpolate(self.scale2(x) size=x.shape[-2:] mode='bilinear' align_corners=<true>)+outs[-1])))<line_sep>outs.append(self.process3((F.interpolate(self.scale3(x) size=x.shape[-2:] mode='bilinear' align_corners=<true>)+outs[-1])))<line_sep>outs.append(self.process4((F.interpolate(self.scale4(x) size=x.shape[-2:] mode='bilinear' align_corners=<true>)+outs[-1])))<line_sep>out=self.compression(torch.cat(outs dim=1))+self.shortcut(x)<line_sep><return>out<block_end><block_end><class_stmt>SegHead(nn.Module)<block_start><def_stmt>__init__ self c1 ch c2 scale_factor=<none><block_start>super().__init__()<line_sep>self.bn1=nn.BatchNorm2d(c1)<line_sep>self.conv1=nn.Conv2d(c1 ch 3 1 1 bias=<false>)<line_sep>self.bn2=nn.BatchNorm2d(ch)<line_sep>self.conv2=nn.Conv2d(ch c2 1)<line_sep>self.scale_factor=scale_factor<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>x=self.conv1(F.relu(self.bn1(x)))<line_sep>x=self.conv2(F.relu(self.bn2(x)))<if_stmt>self.scale_factor<is><not><none><block_start>H,W=x.shape[-2]<times>self.scale_factor x.shape[-1]<times>self.scale_factor<line_sep>x=F.interpolate(x size=(H W) mode='bilinear' align_corners=<true>)<block_end><return>x<block_end><block_end><class_stmt>DDRNet(nn.Module)<block_start><def_stmt>__init__ self backbone:str=<none> num_classes:int=19<arrow><none><block_start>super().__init__()<line_sep>planes,spp_planes,head_planes=[32 64 128 256 512] 128 64<line_sep>self.conv1=Stem(3 planes[0])<line_sep>self.layer1=self._make_layer(BasicBlock planes[0] planes[0] 2)<line_sep>self.layer2=self._make_layer(BasicBlock planes[0] planes[1] 2 2)<line_sep>self.layer3=self._make_layer(BasicBlock planes[1] planes[2] 2 2)<line_sep>self.layer4=self._make_layer(BasicBlock planes[2] planes[3] 2 2)<line_sep>self.layer5=self._make_layer(Bottleneck planes[3] planes[3] 1 2)<line_sep>self.layer3_=self._make_layer(BasicBlock planes[1] planes[1] 2)<line_sep>self.layer4_=self._make_layer(BasicBlock planes[1] planes[1] 2)<line_sep>self.layer5_=self._make_layer(Bottleneck planes[1] planes[1] 1)<line_sep>self.compression3=ConvBN(planes[2] planes[1] 1)<line_sep>self.compression4=ConvBN(planes[3] planes[1] 1)<line_sep>self.down3=ConvBN(planes[1] planes[2] 3 2 1)<line_sep>self.down4=Conv2BN(planes[1] planes[2] planes[3] 3 2 1)<line_sep>self.spp=DAPPM(planes[-1] spp_planes planes[2])<line_sep>self.seghead_extra=SegHead(planes[1] head_planes num_classes 8)<line_sep>self.final_layer=SegHead(planes[2] head_planes num_classes 8)<line_sep>self.apply(self._init_weights)<block_end><def_stmt>_init_weights self m:nn.Module<arrow><none><block_start><if_stmt>isinstance(m nn.Conv2d)<block_start>nn.init.kaiming_normal_(m.weight mode='fan_out' nonlinearity='relu')<block_end><elif_stmt>isinstance(m nn.BatchNorm2d)<block_start>nn.init.constant_(m.weight 1)<line_sep>nn.init.constant_(m.bias 0)<block_end><block_end><def_stmt>init_pretrained self pretrained:str=<none><arrow><none><block_start><if_stmt>pretrained<block_start>self.load_state_dict(torch.load(pretrained map_location='cpu') strict=<false>)<block_end><block_end><def_stmt>_make_layer self block inplanes planes depths s=1<arrow>nn.Sequential<block_start>downsample=<none><if_stmt>inplanes<ne>planes<times>block.expansion<block_start>downsample=nn.Sequential(nn.Conv2d(inplanes planes<times>block.expansion 1 s bias=<false>) nn.BatchNorm2d(planes<times>block.expansion))<block_end>layers=[block(inplanes planes s downsample)]<line_sep>inplanes=planes<times>block.expansion<for_stmt>i range(1 depths)<block_start><if_stmt>i<eq>depths-1<block_start>layers.append(block(inplanes planes no_relu=<true>))<block_end><else_stmt><block_start>layers.appned(block(inplanes planes))<block_end><block_end><return>nn.Sequential(*layers)<block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>H,W=x.shape[-2]<floordiv>8 x.shape[-1]<floordiv>8<line_sep>layers=[]<line_sep>x=self.conv1(x)<line_sep>x=self.layer1(x)<line_sep>layers.append(x)<line_sep>x=self.layer2(F.relu(x))<line_sep>layers.append(x)<line_sep>x=self.layer3(F.relu(x))<line_sep>layers.append(x)<line_sep>x_=self.layer3_(F.relu(layers[1]))<line_sep>x=x+self.down3(F.relu(x_))<line_sep>x_=x_+F.interpolate(self.compression3(F.relu(layers[2])) size=(H W) mode='bilinear' align_corners=<true>)<if_stmt>self.training<block_start>x_aux=self.seghead_extra(x_)<block_end>x=self.layer4(F.relu(x))<line_sep>layers.append(x)<line_sep>x_=self.layer4_(F.relu(x_))<line_sep>x=x+self.down4(F.relu(x_))<line_sep>x_=x_+F.interpolate(self.compression4(F.relu(layers[3])) size=(H W) mode='bilinear' align_corners=<true>)<line_sep>x_=self.layer5_(F.relu(x_))<line_sep>x=F.interpolate(self.spp(self.layer5(F.relu(x))) size=(H W) mode='bilinear' align_corners=<true>)<line_sep>x_=self.final_layer(x+x_)<line_sep><return>(x_ x_aux)<if>self.training<else>x_<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>model=DDRNet()<line_sep># model.init_pretrained('checkpoints/backbones/ddrnet/ddrnet_23slim.pth') # model.load_state_dict(torch.load('checkpoints/pretrained/ddrnet/ddrnet_23slim_city.pth', map_location='cpu')) x=torch.zeros(2 3 224 224)<line_sep>outs=model(x)<for_stmt>y outs<block_start>print(y.shape)<block_end><block_end>
""" While *nussl* does not come with any data sets, it does have the capability to interface with many common source separation data sets used within the MIR and speech separation communities. These data set "hooks" subclass BaseDataset and by default return AudioSignal objects in labeled dictionaries for ease of use. Transforms can be applied to these datasets for use in machine learning pipelines. """<import_stmt>os<import_from_stmt>.. musdb<import_stmt>jams<import_from_stmt>..core constants utils<import_from_stmt>.base_dataset BaseDataset DataSetException<class_stmt>MUSDB18(BaseDataset)<block_start>""" Hook for MUSDB18. Uses the musdb.DB object to access the dataset. If ``download=True``, then the 7s snippets of each track are downloaded to ``self.folder``. If no folder is given, then the tracks are downloaded to ~/.nussl/musdb18. Getting an item from this dataset with no transforms returns the following dictionary: .. code-block:: none { 'mix': [AudioSignal object containing mix audio], 'source': { 'bass': [AudioSignal object containing vocals], 'drums': [AudioSignal object containing drums], 'other': [AudioSignal object containing other], 'vocals': [AudioSignal object containing vocals], } 'metadata': { 'labels': ['bass', 'drums', 'other', 'vocals'] } } Args: folder (str, optional): Location that should be processed to produce the list of files. Defaults to None. is_wav (bool, optional): Expect subfolder with wav files for each source instead of stems, defaults to False. download (bool, optional): Download sample version of MUSDB18 which includes 7s excerpts. Defaults to False. subsets (list, optional): Select a musdb subset train or test. Defaults to ['train', 'test'] (all tracks). split (str, optional): When subset train is loaded, split selects the train/validation split. split=’train’ loads the training split, `split=’valid’ loads the validation split. split=None applies no splitting. Defaults to None. **kwargs: Any additional arguments that are passed up to BaseDataset (see ``nussl.datasets.BaseDataset``). """<line_sep>DATASET_HASHES={"musdb":"56777516ad56fe6a8590badf877e6be013ff932c010e0fbdb0aba03ef878d4cd" }<def_stmt>__init__ self folder=<none> is_wav=<false> download=<false> subsets=<none> split=<none> **kwargs<block_start>subsets=['train' 'test']<if>subsets<is><none><else>subsets<if_stmt>folder<is><none><block_start>folder=os.path.join(constants.DEFAULT_DOWNLOAD_DIRECTORY 'musdb18')<block_end>self.musdb=musdb.DB(root=folder is_wav=is_wav download=download subsets=subsets split=split)<line_sep>super().__init__(folder **kwargs)<line_sep>self.metadata['subsets']=subsets<line_sep>self.metadata['split']=split<block_end><def_stmt>get_items self folder<block_start>items=range(len(self.musdb))<line_sep><return>list(items)<block_end><def_stmt>process_item self item<block_start>track=self.musdb[item]<line_sep>mix,sources=utils.musdb_track_to_audio_signals(track)<line_sep>self._setup_audio_signal(mix)<for_stmt>source list(sources.values())<block_start>self._setup_audio_signal(source)<block_end>output={'mix':mix 'sources':sources 'metadata':{'labels':['bass' 'drums' 'other' 'vocals']}}<line_sep><return>output<block_end><block_end><class_stmt>MixSourceFolder(BaseDataset)<block_start>""" This dataset expects your data to be formatted in the following way: .. code-block:: none data/ mix/ [file0].wav [file1].wav [file2].wav ... [label0]/ [file0].wav [file1].wav [file2].wav ... [label1]/ [file0].wav [file1].wav [file2].wav ... [label2]/ [file0].wav [file1].wav [file2].wav ... ... Note that the the filenames match between the mix folder and each source folder. The source folder names can be whatever you want. Given a file in the ``self.mix_folder`` folder, this dataset will look up the corresponding files with the same name in the source folders. These are the source audio files. The sum of the sources should equal the mixture. Each source will be labeled according to the folder name it comes from. Getting an item from this dataset with no transforms returns the following dictionary: .. code-block:: none { 'mix': [AudioSignal object containing mix audio], 'source': { '[label0]': [AudioSignal object containing label0 audio], '[label1]': [AudioSignal object containing label1 audio], '[label2]': [AudioSignal object containing label2 audio], '[label3]': [AudioSignal object containing label3 audio], ... } 'metadata': { 'labels': ['label0', 'label1', 'label2', 'label3'] } } Args: folder (str, optional): Location that should be processed to produce the list of files. Defaults to None. mix_folder (str, optional): Folder to look in for mixtures. Defaults to 'mix'. source_folders (list, optional): List of folders to look in for sources. Path is defined relative to folder. If None, all folders other than mix_folder are treated as the source folders. Defaults to None. ext (list, optional): Audio extensions to look for in mix_folder. Defaults to ['.wav', '.flac', '.mp3']. **kwargs: Any additional arguments that are passed up to BaseDataset (see ``nussl.datasets.BaseDataset``). """<def_stmt>__init__ self folder mix_folder='mix' source_folders=<none> ext=<none> make_mix=<false> **kwargs<block_start>self.mix_folder=mix_folder<line_sep>self.source_folders=source_folders<line_sep>self.ext=['.wav' '.flac' '.mp3']<if>ext<is><none><else>ext<line_sep>self.make_mix=make_mix<line_sep>super().__init__(folder **kwargs)<block_end><def_stmt>get_items self folder<block_start><if_stmt>self.source_folders<is><none><block_start>self.source_folders=sorted([f<for>f os.listdir(folder)<if>os.path.isdir(os.path.join(folder f))<and>f<ne>self.mix_folder])<block_end><if_stmt>self.make_mix<block_start>mix_folder=os.path.join(folder self.source_folders[0])<block_end><else_stmt><block_start>mix_folder=os.path.join(folder self.mix_folder)<block_end>items=sorted([x<for>x os.listdir(mix_folder)<if>os.path.splitext(x)[1]<in>self.ext])<line_sep><return>items<block_end><def_stmt>get_mix_and_sources self item<block_start>sources={}<for_stmt>k self.source_folders<block_start>source_path=os.path.join(self.folder k item)<if_stmt>os.path.exists(source_path)<block_start>sources[k]=self._load_audio_file(source_path)<block_end><block_end><if_stmt>self.make_mix<block_start>mix=sum(list(sources.values()))<block_end><else_stmt><block_start>mix_path=os.path.join(self.folder self.mix_folder item)<line_sep>mix=self._load_audio_file(mix_path)<block_end><return>mix sources<block_end><def_stmt>process_item self item<block_start>mix,sources=self.get_mix_and_sources(item)<line_sep>output={'mix':mix 'sources':sources 'metadata':{'labels':self.source_folders}}<line_sep><return>output<block_end><block_end><class_stmt>Scaper(BaseDataset)<block_start>""" Source separation datasets can be generated using Scaper, a library for automatic soundscape generation. Datasets that are generated with Scaper can be fed into this class easily. Scaper generates a large list of JAMS files which specify the parameters of the soundscape. If the soundscape is generated with `save_isolated_events=True`, then the audio corresponding to each event in the soundscape will be saved as well. Below is an example of using Scaper to generate a small dataset of 10 mixtures with 2 sources each. The generated dataset can then be immediately loaded into an instance of ``nussl.datasets.Scaper`` for integration into a training or evaluation pipeline. The sources are output in a dictionary that looks like this: .. code-block:: none data['sources] = { '{label}::{count}': AudioSignal, '{label}::{count}': AudioSignal, ... } For example: .. code-block:: none data['sources] = { 'siren::0': AudioSignal, 'siren::1': AudioSignal, 'car_horn::0': AudioSignal, ... } Getting an item from this dataset with no transforms returns the following dictionary: .. code-block:: none { 'mix': [AudioSignal object containing mix audio], 'source': { '[label0::count]': [AudioSignal object containing label0 audio], '[label1::count]': [AudioSignal object containing label1 audio], '[label2::count]': [AudioSignal object containing label2 audio], '[label3::count]': [AudioSignal object containing label3 audio], ... } 'metadata': { 'jams': [the content of the jams file used to generate the soundscape] 'labels': ['label0', 'label1', 'label2', 'label3'] } } Example of generating a Scaper dataset and then loading it with nussl: >>> n_sources = 2 >>> n_mixtures = 10 >>> duration = 3 >>> ref_db = -40 >>> fg_path = '/path/to/foreground/' >>> output_dir = '/output/path' >>> for i in range(n_mixtures): >>> sc = scaper.Scaper( >>> duration, fg_path, fg_path, random_state=i) >>> sc.ref_db = ref_db >>> sc.sr = 16000 >>> for j in range(n_sources): >>> sc.add_event( >>> label=('choose', []), >>> source_file=('choose', []), >>> source_time=('const', 0), >>> event_time=('const', 0), >>> event_duration=('const', duration), >>> snr=('const', 0), >>> pitch_shift=None, >>> time_stretch=None >>> ) >>> audio_path = os.path.join(output_dir, f'{i}.wav') >>> jams_path = os.path.join(output_dir, f'{i}.jams') >>> sc.generate(audio_path, jams_path, save_isolated_events=True) >>> dataset = nussl.datasets.Scaper(output_dir) >>> dataset[0] # contains mix, sources, and metadata corresponding to 0.jams. Raises: DataSetException: if Scaper dataset wasn't saved with isolated event audio. """<def_stmt>get_items self folder<block_start>items=sorted([x<for>x os.listdir(folder)<if>os.path.splitext(x)[1]<in>['.jams']])<line_sep><return>items<block_end><def_stmt>_get_info_from_item self item<block_start>jam=jams.load(os.path.join(self.folder item))<line_sep>ann=jam.annotations.search(namespace='scaper')[0]<line_sep>mix_path=ann.sandbox.scaper['soundscape_audio_path']<line_sep>source_paths=ann.sandbox.scaper['isolated_events_audio_path']<line_sep><return>jam ann mix_path source_paths<block_end><def_stmt>process_item self item<block_start>jam,ann,mix_path,source_paths=self._get_info_from_item(item)<if_stmt><not>source_paths<block_start><raise>DataSetException("No paths to isolated events found! Did you generate "<concat>"the soundscape with save_isolated_events=True?")<block_end>mix=self._load_audio_file(mix_path)<line_sep>sources={}<for_stmt>event_spec,event_audio_path zip(ann source_paths)<block_start>label=event_spec.value['label']<line_sep>label_count=0<for_stmt>k sources<block_start><if_stmt>label<in>k<block_start>label_count<augadd>1<block_end><block_end>label=f"{label}::{label_count}"<line_sep>sources[label]=self._load_audio_file(event_audio_path)<block_end>output={'mix':mix 'sources':sources 'metadata':{'scaper':jam 'labels':ann.sandbox.scaper['fg_labels'] }}<line_sep><return>output<block_end><block_end><class_stmt>OnTheFly(BaseDataset)<block_start>""" Hook for a dataset that creates mixtures on the fly from source data. The function that creates the mixture is a closure which is defined by the end-user. The number of mixtures in the dataset is also defined by the end-user. The mix closure function should take two arguments - the dataset object and the index of the item being processed - and the output of the mix closure should be a dictionary containing at least a 'mix', 'sources' and (optionally) a 'metadata' key, or other keys that can be defined up to you. Here's an example of a closure, which can be configured via variable scoping: >>> def make_sine_wave(freq, sample_rate, duration): >>> dt = 1 / sample_rate >>> x = np.arange(0.0, duration, dt) >>> x = np.sin(2 * np.pi * freq * x) >>> return x >>> n_sources = 2 >>> duration = 3 >>> sample_rate = 44100 >>> min_freq, max_freq = 110, 1000 >>> def make_mix(dataset, i): >>> sources = {} >>> freqs = [] >>> for i in range(n_sources): >>> freq = np.random.randint(min_freq, max_freq) >>> freqs.append(freq) >>> source_data = make_sine_wave(freq, sample_rate, duration) >>> source_signal = dataset._load_audio_from_array( >>> audio_data=source_data, sample_rate=sample_rate) >>> sources[f'sine{i}'] = source_signal * 1 / n_sources >>> mix = sum(sources.values()) >>> output = { >>> 'mix': mix, >>> 'sources': sources, >>> 'metadata': { >>> 'frequencies': freqs >>> } >>> } >>> return output >>> dataset = nussl.datasets.OnTheFly(make_mix, 10) Args: mix_closure (function): A closure that determines how to create a single mixture, given the index. It has a strict input signature (the index is given as an int) and a strict output signature (a dictionary containing a 'mix' and 'sources') key. num_mixtures (int): Number of mixtures that will be created on the fly. This determines one 'run' thrugh the dataset, or an epoch. kwargs: Keyword arguments to BaseDataset. """<def_stmt>__init__ self mix_closure num_mixtures **kwargs<block_start>self.num_mixtures=num_mixtures<line_sep>self.mix_closure=mix_closure<line_sep>super().__init__('none' **kwargs)<line_sep>self.metadata['num_mixtures']=num_mixtures<block_end><def_stmt>get_items self folder<block_start><return>list(range(self.num_mixtures))<block_end><def_stmt>process_item self item<block_start>output=self.mix_closure(self item)<if_stmt><not>isinstance(output dict)<block_start><raise>DataSetException("output of mix_closure must be a dict!")<block_end><if_stmt>'mix'<not><in>output<or>'sources'<not><in>output<block_start><raise>DataSetException("output of mix_closure must be a dict containing "<concat>"'mix', 'sources' as keys!")<block_end><return>output<block_end><block_end><class_stmt>FUSS(Scaper)<block_start>""" The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary sound mixtures and source-level references, for use in experiments on arbitrary sound separation. This is the official sound separation data for the DCASE2020 Challenge Task 4: Sound Event Detection and Separation in Domestic Environments. This is a hook for reading in this dataset, and making sure that the mix and source paths are massaged to be relative paths. References: [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "What's All the FUSS About Free Universal Sound Separation Data?", 2020, in preparation. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Freesound Datasets: A Platform for the Creation of Open Audio Datasets." International Society for Music Information Retrieval Conference (ISMIR), pp. 486–493. Suzhou, China, 2017. Args: root (str): Folder where the FUSS data is. Either points to ssdata or ssdata_reverb. split (str): Either the ``train``, ``validation``, or ``eval`` split. kwargs: Additional keyword arguments to BaseDataset. """<def_stmt>__init__ self root split='train' **kwargs<block_start><if_stmt>split<not><in>['train' 'validation' 'eval']<block_start><raise>DataSetException(f"split '{split}' not one of the accepted splits: "<concat>f"'train', 'validation', 'eval'.")<block_end>folder=os.path.join(root split)<line_sep>super().__init__(folder sample_rate=16000 strict_sample_rate=<true> **kwargs)<line_sep>self.metadata['split']=split<block_end><def_stmt>_get_info_from_item self item<block_start>path_to_item=os.path.join(self.folder item)<line_sep>item_base_name=os.path.splitext(item)[0]<line_sep>jam=jams.load(path_to_item)<line_sep>ann=jam.annotations.search(namespace='scaper')[0]<line_sep>mix_path=ann.sandbox.scaper['soundscape_audio_path']<line_sep>source_paths=ann.sandbox.scaper['isolated_events_audio_path']<line_sep>mix_path=os.path.join(self.folder item_base_name+mix_path.split(item_base_name)[-1])<for_stmt>i,source_path enumerate(source_paths)<block_start>source_paths[i]=os.path.join(self.folder item_base_name+source_path.split(item_base_name)[-1])<block_end><return>jam ann mix_path source_paths<block_end><block_end><class_stmt>WHAM(MixSourceFolder)<block_start>""" Hook for the WHAM dataset. Essentially subclasses MixSourceFolder but with presets that are helpful for WHAM, which as the following directory structure: .. code-block:: none [wav8k, wav16k]/ [min, max]/ [tr, cv, tt]/ mix_both/ mix_clean/ mix_single/ noise/ s1/ s2/ wham_noise/ tr/ cv/ tt/ metadata/ Args: root (str): Root of WHAM directory. mix_folder (str): Which folder is the mix? Either 'mix_clean', 'mix_both', or 'mix_single'. mode (str): Either 'min' or 'max' mode. split (str): Split to use (tr, cv, or tt). sample_rate (int): Sample rate of audio, either 8000 or 16000. """<line_sep>MIX_TO_SOURCE_MAP={'mix_clean':['s1' 's2'] 'mix_both':['s1' 's2' 'noise'] 'mix_single':['s1'] }<line_sep>DATASET_HASHES={"wav8k":"acd49e0dae066e16040c983d71cc5a8adb903abff6e5cbb92b3785a1997b7547" "wav16k":"5691d6a35382f2408a99594f21d820b58371b5ea061841db37d548c0b8d6ec7f"}<def_stmt>__init__ self root mix_folder='mix_clean' mode='min' split='tr' sample_rate=8000 **kwargs<block_start><if_stmt>mix_folder<not><in>self.MIX_TO_SOURCE_MAP.keys()<block_start><raise>DataSetException(f"{mix_folder} must be in {list(self.MIX_TO_SOURCE_MAP.keys())}")<block_end><if_stmt>sample_rate<not><in>[8000 16000]<block_start><raise>DataSetException(f"{sample_rate} not available for WHAM (only 8000 and 16000 Hz allowed)")<block_end><if_stmt>mode<not><in>['min' 'max']<block_start><raise>DataSetException(f"{mode} not available, only 'min' or 'max' allowed.")<block_end><if_stmt>split<not><in>['tr' 'cv' 'tt']<block_start><raise>DataSetException(f"{split} not available, must be one of 'tr' (train), "<concat>f"'cv' (validation), and 'tt' (test)")<block_end>wav_folder='wav8k'<if>sample_rate<eq>8000<else>'wav16k'<line_sep>folder=os.path.join(root wav_folder mode split)<line_sep>source_folders=self.MIX_TO_SOURCE_MAP[mix_folder]<line_sep>super().__init__(folder mix_folder=mix_folder source_folders=source_folders sample_rate=sample_rate strict_sample_rate=<true> **kwargs)<line_sep>self.metadata.update({'mix_folder':mix_folder 'mode':mode 'split':split 'wav_folder':wav_folder})<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> division print_function absolute_import<import_stmt>os<import_stmt>tensorflow<as>tf<import_stmt>math<import_from_stmt>dataloader.pretrained_weights.pretrain_zoo PretrainModelZoo<line_sep>""" RetinaNet-H + gwd fix bug + sqrt + tau=2 + train set FLOPs: 860451163; Trainable params: 33002916 iou threshold: 0.5 classname: plane npos num: 2450 ap: 0.8948394008103565 classname: baseball-diamond npos num: 209 ap: 0.6580467157774382 classname: bridge npos num: 424 ap: 0.388917639526009 classname: ground-track-field npos num: 131 ap: 0.582799082808811 classname: small-vehicle npos num: 5090 ap: 0.6058372268499183 classname: large-vehicle npos num: 4293 ap: 0.6297220646782561 classname: ship npos num: 8861 ap: 0.8143495259256781 classname: tennis-court npos num: 739 ap: 0.897082428301694 classname: basketball-court npos num: 124 ap: 0.6194974348503025 classname: storage-tank npos num: 1869 ap: 0.7888520103937031 classname: soccer-ball-field npos num: 87 ap: 0.6721727619016967 classname: roundabout npos num: 164 ap: 0.6740140076462648 classname: harbor npos num: 2065 ap: 0.6030928319524497 classname: swimming-pool npos num: 366 ap: 0.532690992577956 classname: helicopter npos num: 72 ap: 0.45393048522054874 map: 0.6543896406147388 {'0.65': {'mAP': 0.5531255908346647, 'ground-track-field': 0.46874541967164557, 'small-vehicle': 0.5254805842312422, 'soccer-ball-field': 0.49674069740653076, 'harbor': 0.3325998985859663, 'large-vehicle': 0.49237446722103323, 'swimming-pool': 0.3786694115862947, 'roundabout': 0.6127737951332743, 'tennis-court': 0.8955950695702153, 'basketball-court': 0.5642336574393851, 'helicopter': 0.4095234559651532, 'storage-tank': 0.768350569402555, 'bridge': 0.229887299838382, 'baseball-diamond': 0.5172297968073052, 'ship': 0.718831628735693, 'plane': 0.885848110925295}, '0.5': {'mAP': 0.6543896406147388, 'ground-track-field': 0.582799082808811, 'small-vehicle': 0.6058372268499183, 'soccer-ball-field': 0.6721727619016967, 'harbor': 0.6030928319524497, 'large-vehicle': 0.6297220646782561, 'swimming-pool': 0.532690992577956, 'roundabout': 0.6740140076462648, 'tennis-court': 0.897082428301694, 'basketball-court': 0.6194974348503025, 'helicopter': 0.45393048522054874, 'storage-tank': 0.7888520103937031, 'bridge': 0.388917639526009, 'baseball-diamond': 0.6580467157774382, 'ship': 0.8143495259256781, 'plane': 0.8948394008103565}, '0.8': {'mAP': 0.28292248169049333, 'ground-track-field': 0.2325775080634852, 'small-vehicle': 0.1979511661753693, 'soccer-ball-field': 0.29786281543794524, 'harbor': 0.11494252873563218, 'large-vehicle': 0.16034195972421744, 'swimming-pool': 0.10212121212121213, 'roundabout': 0.29187883858274505, 'tennis-court': 0.8003975003061949, 'basketball-court': 0.47053242084058733, 'helicopter': 0.08282828282828283, 'storage-tank': 0.4630236938472425, 'bridge': 0.045454545454545456, 'baseball-diamond': 0.0980392156862745, 'ship': 0.3419243781838527, 'plane': 0.5439611593698137}, '0.85': {'mAP': 0.17732891599288997, 'ground-track-field': 0.13084951639168507, 'small-vehicle': 0.06282073067119796, 'soccer-ball-field': 0.18311688311688312, 'harbor': 0.09090909090909091, 'large-vehicle': 0.05997549072961212, 'swimming-pool': 0.01515151515151515, 'roundabout': 0.1523809523809524, 'tennis-court': 0.777850986366134, 'basketball-court': 0.27146743865010114, 'helicopter': 0.025974025974025972, 'storage-tank': 0.3194857000235097, 'bridge': 0.025974025974025972, 'baseball-diamond': 0.07032306536438768, 'ship': 0.09238611869237975, 'plane': 0.38126819949784874}, '0.9': {'mAP': 0.09261312239028942, 'ground-track-field': 0.045454545454545456, 'small-vehicle': 0.007575757575757575, 'soccer-ball-field': 0.08787878787878788, 'harbor': 0.09090909090909091, 'large-vehicle': 0.006888231631382316, 'swimming-pool': 0.01515151515151515, 'roundabout': 0.05694896083698572, 'tennis-court': 0.6190068314484273, 'basketball-court': 0.1277056277056277, 'helicopter': 0.018181818181818184, 'storage-tank': 0.10310064772905649, 'bridge': 0.012987012987012986, 'baseball-diamond': 0.05454545454545454, 'ship': 0.00899621212121212, 'plane': 0.133866341697667}, '0.6': {'mAP': 0.602003225559061, 'ground-track-field': 0.5117731722941454, 'small-vehicle': 0.5692796674261347, 'soccer-ball-field': 0.591601532425069, 'harbor': 0.42439117183385383, 'large-vehicle': 0.5379528999441402, 'swimming-pool': 0.4552774282858074, 'roundabout': 0.6590275695186874, 'tennis-court': 0.8967502975397331, 'basketball-court': 0.6163602294422292, 'helicopter': 0.42175379721391987, 'storage-tank': 0.7814590420239126, 'bridge': 0.30900189391187255, 'baseball-diamond': 0.6270284107602824, 'ship': 0.7357085211727478, 'plane': 0.892682749593379}, '0.7': {'mAP': 0.47209699491529994, 'ground-track-field': 0.37315990473910204, 'small-vehicle': 0.4462857945106512, 'soccer-ball-field': 0.43301958208470137, 'harbor': 0.24212265985665615, 'large-vehicle': 0.41707228898274396, 'swimming-pool': 0.2672845272755605, 'roundabout': 0.4752231061636024, 'tennis-court': 0.8954629342636613, 'basketball-court': 0.5565887540061711, 'helicopter': 0.3137137929820856, 'storage-tank': 0.6891634802537836, 'bridge': 0.16824841824841824, 'baseball-diamond': 0.3967626112242669, 'ship': 0.6233882592021442, 'plane': 0.7839588099359523}, '0.75': {'mAP': 0.38682933856456475, 'ground-track-field': 0.3505001362890805, 'small-vehicle': 0.32936925454926796, 'soccer-ball-field': 0.35644113950565565, 'harbor': 0.16082435022158342, 'large-vehicle': 0.312014321085313, 'swimming-pool': 0.15053744756715054, 'roundabout': 0.421342806894755, 'tennis-court': 0.8933998458347037, 'basketball-court': 0.5018426096266209, 'helicopter': 0.17586580086580086, 'storage-tank': 0.6481067305855587, 'bridge': 0.11431682090364725, 'baseball-diamond': 0.21312574893137554, 'ship': 0.5086325250920672, 'plane': 0.6661205405158923}, 'mmAP': 0.38707336824937255, '0.95': {'mAP': 0.020635306242343165, 'ground-track-field': 0.045454545454545456, 'small-vehicle': 0.0005790387955993052, 'soccer-ball-field': 0.0, 'harbor': 0.0004434589800443459, 'large-vehicle': 0.00036638424547744445, 'swimming-pool': 0.0, 'roundabout': 0.0053475935828877, 'tennis-court': 0.2304241077310939, 'basketball-court': 0.003189792663476874, 'helicopter': 0.0, 'storage-tank': 0.012987012987012986, 'bridge': 0.0, 'baseball-diamond': 0.0, 'ship': 0.0009404388714733542, 'plane': 0.009797220323536112}, '0.55': {'mAP': 0.6287890656893798, 'ground-track-field': 0.5643322633863954, 'small-vehicle': 0.5913067741856398, 'soccer-ball-field': 0.6335613572261539, 'harbor': 0.5190220297608497, 'large-vehicle': 0.5649195362143626, 'swimming-pool': 0.49227487366542605, 'roundabout': 0.667984152802187, 'tennis-court': 0.897082428301694, 'basketball-court': 0.6163602294422292, 'helicopter': 0.44399239228256077, 'storage-tank': 0.7862921590716214, 'bridge': 0.35810648582284893, 'baseball-diamond': 0.6568440654367499, 'ship': 0.7454706366368675, 'plane': 0.8942866011051104}} """<line_sep># ------------------------------------------------ VERSION='RetinaNet_DOTA_2x_20210124'<line_sep>NET_NAME='resnet50_v1d'# 'MobilenetV2' # ---------------------------------------- System ROOT_PATH=os.path.abspath('../../')<line_sep>print(20<times>"++--")<line_sep>print(ROOT_PATH)<line_sep>GPU_GROUP="0,1,3"<line_sep>NUM_GPU=len(GPU_GROUP.strip().split(','))<line_sep>SHOW_TRAIN_INFO_INTE=20<line_sep>SMRY_ITER=200<line_sep>SAVE_WEIGHTS_INTE=20673<times>2<line_sep>SUMMARY_PATH=os.path.join(ROOT_PATH 'output/summary')<line_sep>TEST_SAVE_PATH=os.path.join(ROOT_PATH 'tools/test_result')<line_sep>pretrain_zoo=PretrainModelZoo()<line_sep>PRETRAINED_CKPT=pretrain_zoo.pretrain_weight_path(NET_NAME ROOT_PATH)<line_sep>TRAINED_CKPT=os.path.join(ROOT_PATH 'output/trained_weights')<line_sep>EVALUATE_R_DIR=os.path.join(ROOT_PATH 'output/evaluate_result_pickle/')<line_sep># ------------------------------------------ Train and Test RESTORE_FROM_RPN=<false><line_sep>FIXED_BLOCKS=1# allow 0~3 FREEZE_BLOCKS=[<true> <false> <false> <false> <false>]# for gluoncv backbone USE_07_METRIC=<true><line_sep>ADD_BOX_IN_TENSORBOARD=<true><line_sep>MUTILPY_BIAS_GRADIENT=2.0# if None, will not multipy GRADIENT_CLIPPING_BY_NORM=10.0# if None, will not clip CLS_WEIGHT=1.0<line_sep>REG_WEIGHT=2.0<line_sep>REG_LOSS_MODE=2<line_sep>ALPHA=1.0<line_sep>BETA=1.0<line_sep>BATCH_SIZE=1<line_sep>EPSILON=1e-5<line_sep>MOMENTUM=0.9<line_sep>LR=1e-3<line_sep>DECAY_STEP=[SAVE_WEIGHTS_INTE<times>12 SAVE_WEIGHTS_INTE<times>16 SAVE_WEIGHTS_INTE<times>20]<line_sep>MAX_ITERATION=SAVE_WEIGHTS_INTE<times>20<line_sep>WARM_SETP=int(1.0/8.0<times>SAVE_WEIGHTS_INTE)<line_sep># -------------------------------------------- Dataset DATASET_NAME='DOTATrain'# 'pascal', 'coco' PIXEL_MEAN=[123.68 116.779 103.939]# R, G, B. In tf, channel is RGB. In openCV, channel is BGR PIXEL_MEAN_=[0.485 0.456 0.406]<line_sep>PIXEL_STD=[0.229 0.224 0.225]# R, G, B. In tf, channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN=800<line_sep>IMG_MAX_LENGTH=800<line_sep>CLASS_NUM=15<line_sep>IMG_ROTATE=<false><line_sep>RGB2GRAY=<false><line_sep>VERTICAL_FLIP=<false><line_sep>HORIZONTAL_FLIP=<true><line_sep>IMAGE_PYRAMID=<false><line_sep># --------------------------------------------- Network SUBNETS_WEIGHTS_INITIALIZER=tf.random_normal_initializer(mean=0.0 stddev=0.01 seed=<none>)<line_sep>SUBNETS_BIAS_INITIALIZER=tf.constant_initializer(value=0.0)<line_sep>PROBABILITY=0.01<line_sep>FINAL_CONV_BIAS_INITIALIZER=tf.constant_initializer(value=-math.log((1.0-PROBABILITY)/PROBABILITY))<line_sep>WEIGHT_DECAY=1e-4<line_sep>USE_GN=<false><line_sep>FPN_CHANNEL=256<line_sep>NUM_SUBNET_CONV=4<line_sep>FPN_MODE='fpn'<line_sep># --------------------------------------------- Anchor LEVEL=['P3' 'P4' 'P5' 'P6' 'P7']<line_sep>BASE_ANCHOR_SIZE_LIST=[32 64 128 256 512]<line_sep>ANCHOR_STRIDE=[8 16 32 64 128]<line_sep>ANCHOR_SCALES=[2<power>0 2<power>(1.0/3.0) 2<power>(2.0/3.0)]<line_sep>ANCHOR_RATIOS=[1 1/2 2. 1/3. 3. 5. 1/5.]<line_sep>ANCHOR_ANGLES=[-90 -75 -60 -45 -30 -15]<line_sep>ANCHOR_SCALE_FACTORS=<none><line_sep>USE_CENTER_OFFSET=<true><line_sep>METHOD='H'<line_sep>USE_ANGLE_COND=<false><line_sep>ANGLE_RANGE=90# or 180 # -------------------------------------------- Head SHARE_NET=<true><line_sep>USE_P5=<true><line_sep>IOU_POSITIVE_THRESHOLD=0.5<line_sep>IOU_NEGATIVE_THRESHOLD=0.4<line_sep>NMS=<true><line_sep>NMS_IOU_THRESHOLD=0.1<line_sep>MAXIMUM_DETECTIONS=100<line_sep>FILTERED_SCORE=0.05<line_sep>VIS_SCORE=0.4<line_sep># -------------------------------------------- GWD GWD_TAU=2.0<line_sep>GWD_FUNC=tf.sqrt<line_sep>
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This will attempt to import the actual App Engine modules, and if it fails, # they will be replaced with fake modules. This is useful during testing. <try_stmt><block_start><import_stmt>google.appengine.api.memcache<as>memcache<block_end><except_stmt>ImportError<block_start><class_stmt>_RPC(object)<block_start><def_stmt>__init__ self result=<none><block_start>self.result=result<block_end><def_stmt>get_result self<block_start><return>self.result<block_end><def_stmt>wait self<block_start><pass><block_end><block_end><class_stmt>InMemoryMemcache(object)<block_start>"""An in-memory memcache implementation. """<def_stmt>__init__ self<block_start>self._namespaces={}<block_end><class_stmt>Client(object)<block_start><def_stmt>set_multi_async self mapping namespace='' time=0<block_start><return>_RPC(result=dict((k memcache.set(k v namespace=namespace time=time))<for>k,v mapping.iteritems()))<block_end><def_stmt>get_multi_async self keys namespace='' time=0<block_start><return>_RPC(result=dict((k memcache.get(k namespace=namespace time=time))<for>k keys))<block_end><block_end><def_stmt>set self key value namespace='' time=0<block_start>self._GetNamespace(namespace)[key]=value<block_end><def_stmt>get self key namespace='' time=0<block_start><return>self._GetNamespace(namespace).get(key)<block_end><def_stmt>delete self key namespace=''<block_start>self._GetNamespace(namespace).pop(key <none>)<block_end><def_stmt>delete_multi self keys namespace=''<block_start><for_stmt>k keys<block_start>self.delete(k namespace=namespace)<block_end><block_end><def_stmt>_GetNamespace self namespace<block_start><if_stmt>namespace<not><in>self._namespaces<block_start>self._namespaces[namespace]={}<block_end><return>self._namespaces[namespace]<block_end><def_stmt>flush_all self<block_start>self._namespaces={}<line_sep><return><false><block_end><block_end>memcache=InMemoryMemcache()<block_end>
<import_from_stmt>collections defaultdict<import_from_stmt>insights.core filters<import_from_stmt>insights.parsers.ps PsAux PsAuxcww<import_from_stmt>insights.specs Specs<import_from_stmt>insights.specs.default DefaultSpecs<import_stmt>pytest<import_stmt>sys<def_stmt>setup_function func<block_start><if_stmt>func<is>test_get_filter<block_start>filters.add_filter(Specs.ps_aux "COMMAND")<block_end><if_stmt>func<is>test_get_filter_registry_point<block_start>filters.add_filter(Specs.ps_aux "COMMAND")<line_sep>filters.add_filter(DefaultSpecs.ps_aux "MEM")<block_end><if_stmt>func<is>test_filter_dumps_loads<block_start>filters.add_filter(Specs.ps_aux "COMMAND")<block_end><block_end><def_stmt>teardown_function func<block_start><if_stmt>func<is>test_get_filter<block_start><del_stmt>filters.FILTERS[Specs.ps_aux]<block_end><if_stmt>func<is>test_get_filter_registry_point<block_start><del_stmt>filters.FILTERS[Specs.ps_aux]<del_stmt>filters.FILTERS[DefaultSpecs.ps_aux]<block_end><if_stmt>func<is>test_filter_dumps_loads<block_start><del_stmt>filters.FILTERS[Specs.ps_aux]<block_end><if_stmt>func<is>test_add_filter_to_parser<block_start><del_stmt>filters.FILTERS[Specs.ps_aux]<block_end><if_stmt>func<is>test_add_filter_to_parser_patterns_list<block_start><del_stmt>filters.FILTERS[Specs.ps_aux]<block_end><block_end>@pytest.mark.skipif(sys.version_info<l>(2 7) reason='Playbook verifier code uses oyaml library which is incompatable with this test')<def_stmt>test_filter_dumps_loads <block_start>r=filters.dumps()<assert_stmt>r<is><not><none><line_sep>filters.FILTERS=defaultdict(set)<line_sep>filters.loads(r)<assert_stmt>Specs.ps_aux<in>filters.FILTERS<assert_stmt>filters.FILTERS[Specs.ps_aux]<eq>set(["COMMAND"])<block_end><def_stmt>test_get_filter <block_start>f=filters.get_filters(Specs.ps_aux)<assert_stmt>"COMMAND"<in>f<line_sep>f=filters.get_filters(DefaultSpecs.ps_aux)<assert_stmt>"COMMAND"<in>f<block_end><def_stmt>test_get_filter_registry_point <block_start>s=set(["COMMAND" "MEM"])<line_sep>f=filters.get_filters(DefaultSpecs.ps_aux)<assert_stmt>f&s<eq>s<line_sep>f=filters.get_filters(Specs.ps_aux)<assert_stmt>"COMMAND"<in>f<assert_stmt>"MEM"<not><in>f<block_end><def_stmt>test_add_filter_to_parser <block_start>filter_string="bash"<line_sep>filters.add_filter(PsAux filter_string)<line_sep>spec_filters=filters.get_filters(Specs.ps_aux)<assert_stmt>filter_string<in>spec_filters<line_sep>parser_filters=filters.get_filters(PsAux)<assert_stmt><not>parser_filters<block_end><def_stmt>test_add_filter_to_parser_patterns_list <block_start>filters_list=["bash" "systemd" "Network"]<line_sep>filters.add_filter(PsAux filters_list)<line_sep>spec_filters=filters.get_filters(Specs.ps_aux)<assert_stmt>all(f<in>spec_filters<for>f filters_list)<line_sep>parser_filters=filters.get_filters(PsAux)<assert_stmt><not>parser_filters<block_end><def_stmt>test_add_filter_to_parser_non_filterable <block_start>filter_string="bash"<line_sep>filters.add_filter(PsAuxcww filter_string)<line_sep>spec_filters=filters.get_filters(Specs.ps_auxcww)<assert_stmt><not>spec_filters<line_sep>parser_filters=filters.get_filters(PsAuxcww)<assert_stmt><not>parser_filters<block_end><def_stmt>test_add_filter_exception_not_filterable <block_start><with_stmt>pytest.raises(Exception)<block_start>filters.add_filter(Specs.ps_auxcww "bash")<block_end><block_end><def_stmt>test_add_filter_exception_raw <block_start><with_stmt>pytest.raises(Exception)<block_start>filters.add_filter(Specs.metadata_json "[]")<block_end><block_end><def_stmt>test_add_filter_exception_empty <block_start><with_stmt>pytest.raises(Exception)<block_start>filters.add_filter(Specs.ps_aux "")<block_end><block_end>
<import_from_stmt>matplotlib.offsetbox AnchoredOffsetbox AuxTransformBox VPacker TextArea AnchoredText DrawingArea AnnotationBbox<import_from_stmt>mpl_toolkits.axes_grid1.anchored_artists AnchoredDrawingArea AnchoredAuxTransformBox AnchoredEllipse AnchoredSizeBar<line_sep>
# Copyright 2018,2019,2020,2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<class_stmt>_LifeSpan<block_start><def_stmt>__init__ self<block_start>self.begin_func_idx=-1<line_sep>self.end_func_idx=-1<block_end><def_stmt>needed_at self func_idx<block_start>needed=self.begin_func_idx<le>func_idx<line_sep>needed<augand>self.end_func_idx<ge>func_idx<line_sep><return>needed<block_end><block_end><def_stmt>__make_buf_var_lives info# buf_var_lives is to remember from when and until when each # Buffer Variables must be alive <block_start>buf_var_num=len(info._variable_buffer_index)<line_sep>buf_var_lives=[_LifeSpan()<for>_ range(buf_var_num)]<line_sep>name_to_vidx={v.name:i<for>i,v enumerate(info._network.variable)}<line_sep>name_to_var={v.name:v<for>v info._network.variable}<line_sep># set _LifeSpan.begin_func_idx and .end_func_idx along info._network final_func_idx=len(info._network.function)<for_stmt>func_idx,func enumerate(info._network.function)<block_start><for_stmt>var_name list(func.input)+list(func.output)<block_start><if_stmt>var_name<in>info._generator_variables# no need to assign buffer for generator data <block_start><pass><block_end><if_stmt>name_to_var[var_name].type<eq>'Buffer'<block_start>var_idx=name_to_vidx[var_name]<line_sep>buf_idx=info._buffer_ids[var_idx]<line_sep>buf_var_life=buf_var_lives[buf_idx]<if_stmt>buf_var_life.begin_func_idx<l>0<block_start><if_stmt>var_name<in>info._input_variables<block_start>buf_var_life.begin_func_idx=0<block_end><else_stmt><block_start>buf_var_life.begin_func_idx=func_idx<block_end><block_end><else_stmt># only identify a Function which first refers to the Variable <block_start><pass><block_end><if_stmt>var_name<in>info._output_variables<block_start>buf_var_life.end_func_idx=final_func_idx<block_end><else_stmt><block_start>buf_var_life.end_func_idx=func_idx<block_end><block_end><else_stmt><block_start><pass><block_end><block_end><block_end># ignore 'Parameter' <return>buf_var_lives<block_end><def_stmt>__count_actual_buf info buf_var_lives# count how many buffers are required at maximum based on buf_var_lives <block_start>actual_buf_num=0<for_stmt>func_idx,_ enumerate(info._network.function)<block_start>buf_num=0<for_stmt>buf_idx,buf_var_life enumerate(buf_var_lives)<block_start>buf_num<augadd>int(buf_var_life.needed_at(func_idx))<block_end>actual_buf_num=max(actual_buf_num buf_num)<block_end><return>actual_buf_num<block_end><def_stmt>__make_buf_var_refs info buf_var_lives# buf_var_refs is to store buffer indices of buffers required in each Function <block_start>actual_buf_num=__count_actual_buf(info buf_var_lives)<line_sep>shape=(len(info._network.function) actual_buf_num)<line_sep>buf_var_refs=np.empty(shape dtype=np.int32)<line_sep>buf_var_refs[:]=-1<line_sep># fill buf_var_refs based on buf_var_lives <for_stmt>func_idx,_ enumerate(info._network.function)<block_start>crsr=0<for_stmt>buf_idx,buf_var_life enumerate(buf_var_lives)<block_start><if_stmt>buf_var_life.needed_at(func_idx)<block_start>buf_var_refs[func_idx][crsr]=buf_idx<line_sep>crsr<augadd>1<block_end><else_stmt><block_start><pass><block_end><block_end><block_end># only focus on buffers used in this func <return>buf_var_refs<block_end><def_stmt>__compute_actual_buf_sizes info buf_var_lives# buf_size_array is to store size values of each actual buffer <block_start>actual_buf_num=__count_actual_buf(info buf_var_lives)<line_sep>buf_size_array=np.zeros(actual_buf_num dtype=np.int32)<line_sep># tmp_size_array is size values when only focusing on a single Function tmp_size_array=np.empty_like(buf_size_array dtype=np.int32)<for_stmt>func_idx,_ enumerate(info._network.function)<block_start>tmp_size_array[:]=-1<line_sep>crsr=0<for_stmt>buf_idx,buf_var_life enumerate(buf_var_lives)<block_start><if_stmt>buf_var_life.needed_at(func_idx)<block_start>tmp_size_array[crsr]=info._variable_buffer_size[buf_idx]<line_sep>crsr<augadd>1<block_end><else_stmt><block_start><pass><block_end><block_end># only focus on buffers used in this func # update sizes of actual buffers tmp_size_array=np.sort(tmp_size_array)<for_stmt>i range(actual_buf_num)<block_start>buf_size_array[i]=max(buf_size_array[i] tmp_size_array[i])<block_end><block_end><return>buf_size_array<block_end><def_stmt>__assign_actual_buf_to_variable info actual_buf_sizes buf_var_refs# create a dictionary to store assignment of actual buffers to Variables # vidx_to_abidx is short for variable index to actual buffer index <block_start>vidx_to_abidx={}<line_sep># actual_assigned_flags is to remember if actual buffers are assigned or not actual_buf_num=len(actual_buf_sizes)<line_sep>actual_assigned_flags=np.empty(actual_buf_num dtype=np.bool)<for_stmt>func_idx,_ enumerate(info._network.function)<block_start>actual_assigned_flags[:]=<false><for_stmt>ref_crsr range(actual_buf_num)# minus buf_idx means the corresponding buffer is not needed <block_start>buf_idx=buf_var_refs[func_idx][ref_crsr]<if_stmt>buf_idx<l>0<block_start><continue><block_end># restore assignment determined in the previous func_idx vidx=info._variable_buffer_index[buf_idx][0]<if_stmt>vidx<in>vidx_to_abidx<block_start>abidx=vidx_to_abidx[vidx]<line_sep>actual_assigned_flags[abidx]=<true><block_end><else_stmt><block_start><pass><block_end><block_end># determine assignment for this vidx in the following for loop # determine new assignments of actual buffers to Variables <for_stmt>ref_crsr range(actual_buf_num)# minus buf_idx means the corresponding buffer is not needed <block_start>buf_idx=buf_var_refs[func_idx][ref_crsr]<if_stmt>buf_idx<l>0<block_start><continue><block_end># skip Variables to which an actual buffer is already assigned vidx=info._variable_buffer_index[buf_idx][0]<if_stmt>vidx<in>vidx_to_abidx<block_start><continue><block_end># search for actual buffers vacant and large enough needed_size=info._variable_buffer_size[buf_idx]<line_sep>abidx=0<while_stmt>abidx<ne>actual_buf_num<block_start>cond=<not>actual_assigned_flags[abidx]<line_sep>cond<augand>needed_size<le>actual_buf_sizes[abidx]<if_stmt>cond<block_start>actual_assigned_flags[abidx]=<true><line_sep>vidx_to_abidx[vidx]=abidx<line_sep><break><block_end><else_stmt><block_start>abidx<augadd>1<block_end><block_end># increase size if buffers large enough was NOT found <if_stmt>abidx<eq>actual_buf_num<block_start><for_stmt>abidx range(actual_buf_num)<block_start><if_stmt><not>actual_assigned_flags[abidx]<block_start>actual_buf_sizes[abidx]=needed_size<line_sep>actual_assigned_flags[abidx]=<true><line_sep>vidx_to_abidx[vidx]=abidx<line_sep><break><block_end><block_end><block_end><block_end><block_end><return>vidx_to_abidx<block_end><def_stmt>save_variable_buffer info# make the followings to save memory usage for Variable Buffer: # - actual_buf_sizes(list): sizes of actual buffers, which lie under Variable Buffer. # indices in this list are hereinafter called 'actual buffer index' # - vidx_to_abidx(dict): assignment of actual buffers to Variable Buffer. # the key and the value are Variable index and actual buffer index, respectively <block_start>buf_var_lives=__make_buf_var_lives(info)<line_sep>actual_buf_sizes=__compute_actual_buf_sizes(info buf_var_lives)<line_sep>buf_var_refs=__make_buf_var_refs(info buf_var_lives)<line_sep>vidx_to_abidx=__assign_actual_buf_to_variable(info actual_buf_sizes buf_var_refs)<line_sep><return>list(actual_buf_sizes) vidx_to_abidx<block_end>
# Time: O(n) # Space: O(1) <class_stmt>Solution(object)<block_start><def_stmt>isSelfCrossing self x<block_start>""" :type x: List[int] :rtype: bool """<if_stmt>len(x)<ge>5<and>x[3]<eq>x[1]<and>x[4]+x[0]<ge>x[2]# Crossing in a loop: # 2 # 3 ┌────┐ # └─══>┘1 # 4 0 (overlapped) <block_start><return><true><block_end><for_stmt>i xrange(3 len(x))<block_start><if_stmt>x[i]<ge>x[i-2]<and>x[i-3]<ge>x[i-1]# Case 1: # i-2 # i-1┌─┐ # └─┼─>i # i-3 <block_start><return><true><block_end><elif_stmt>i<ge>5<and>x[i-4]<le>x[i-2]<and>x[i]+x[i-4]<ge>x[i-2]<and>x[i-1]<le>x[i-3]<and>x[i-5]+x[i-1]<ge>x[i-3]# Case 2: # i-4 # ┌──┐ # │i<┼─┐ # i-3│ i-5│i-1 # └────┘ # i-2 <block_start><return><true><block_end><block_end><return><false><block_end><block_end>
<import_stmt>platform<import_from_stmt>pypykatz logger<import_from_stmt>minidump.minidumpfile MinidumpFile<import_from_stmt>pypykatz.commons.common KatzSystemInfo<import_from_stmt>pypykatz.rdp.packages.creds.templates RDPCredsTemplate<import_from_stmt>pypykatz.rdp.packages.creds.decryptor RDPCredentialDecryptor<class_stmt>RDPCredParser<block_start><def_stmt>__init__ self process reader sysinfo<block_start>self.process=process<line_sep>self.reader=reader<line_sep>self.sysinfo=sysinfo<line_sep>self.credentials=[]<block_end>@staticmethod<def_stmt>go_live pid=<none> all_rdp=<false><block_start><if_stmt>platform.system()<ne>'Windows'<block_start><raise>Exception('Live parsing will only work on Windows')<block_end><import_from_stmt>pypykatz.commons.readers.local.common.live_reader_ctypes OpenProcess PROCESS_ALL_ACCESS<import_from_stmt>pypykatz.commons.winapi.machine LiveMachine<import_from_stmt>pypykatz.commons.winapi.constants PROCESS_VM_READ PROCESS_VM_WRITE PROCESS_VM_OPERATION PROCESS_QUERY_INFORMATION PROCESS_CREATE_THREAD<import_from_stmt>pypykatz.commons.readers.local.common.privileges enable_debug_privilege<import_from_stmt>pypykatz.commons.readers.local.live_reader LiveReader<import_from_stmt>pypykatz.commons.readers.local.process Process<line_sep>req_access_rights=PROCESS_VM_READ|PROCESS_VM_WRITE|PROCESS_VM_OPERATION|PROCESS_QUERY_INFORMATION|PROCESS_CREATE_THREAD<line_sep>enable_debug_privilege()<line_sep>targets=[]<if_stmt>pid<is><not><none><block_start>process=Process(pid=pid access=req_access_rights)<line_sep>process.list_modules()<line_sep>reader=LiveReader(process_handle=process.phandle)<line_sep>sysinfo=KatzSystemInfo.from_live_reader(reader)<line_sep>targets.append(RDPCredParser(process reader.get_buffered_reader() sysinfo))<block_end><else_stmt><block_start>machine=LiveMachine()<for_stmt>service_name,display_name,pid machine.list_services()<block_start><if_stmt>service_name<eq>'TermService'<block_start>process=Process(pid=pid access=req_access_rights)<line_sep>reader=LiveReader(process_handle=process.phandle)<line_sep>sysinfo=KatzSystemInfo.from_live_reader(reader)<line_sep>targets.append(RDPCredParser(process reader.get_buffered_reader() sysinfo))<block_end><block_end><if_stmt>all_rdp<is><true><block_start><for_stmt>pid machine.list_all_pids()<block_start><try_stmt><block_start>process=Process(pid=pid access=req_access_rights)<for_stmt>module process.list_modules()<block_start><if_stmt>module.name.lower().find("mstscax.dll")<ne>-1<block_start>reader=LiveReader(process_handle=process.phandle)<line_sep>sysinfo=KatzSystemInfo.from_live_reader(reader)<line_sep>targets.append(RDPCredParser(process reader.get_buffered_reader() sysinfo))<line_sep><break><block_end><block_end><block_end><except_stmt>Exception<as>e#import traceback #traceback.print_exc() <block_start>print(e)<block_end><block_end><block_end><block_end><for_stmt>target targets<block_start>target.start()<block_end><return>targets<block_end>@staticmethod<def_stmt>parse_minidump_file filename chunksize=10<times>1024<block_start><try_stmt><block_start>minidump=MinidumpFile.parse(filename)<line_sep>reader=minidump.get_reader().get_buffered_reader(segment_chunk_size=chunksize)<line_sep>sysinfo=KatzSystemInfo.from_minidump(minidump)<block_end><except_stmt>Exception<as>e<block_start>logger.exception('Minidump parsing error!')<line_sep><raise>e<block_end><try_stmt><block_start>mimi=RDPCredParser(<none> reader sysinfo)<line_sep>mimi.start()<block_end><except_stmt>Exception<as>e<block_start>logger.info('Credentials parsing error!')<line_sep><raise>e<block_end><return>[mimi]<block_end><def_stmt>rdpcreds self<block_start>decryptor_template=RDPCredsTemplate.get_template(self.sysinfo)<line_sep>decryptor=RDPCredentialDecryptor(self.process self.reader decryptor_template self.sysinfo)<line_sep>decryptor.start()<for_stmt>cred decryptor.credentials<block_start>self.credentials.append(cred)<block_end><block_end><def_stmt>start self<block_start>self.rdpcreds()<block_end><block_end>
# # -*- coding: utf-8 -*- # # Copyright (c) 2019-2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Get pretrained model file: wget https://zenodo.org/record/2535873/files/resnet50_v1.pb <import_stmt>time<import_from_stmt>argparse ArgumentParser<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.tools.optimize_for_inference_lib optimize_for_inference<import_from_stmt>tensorflow.python.framework dtypes<import_stmt>ngraph_bridge<line_sep>INPUTS='input_tensor'<line_sep>OUTPUTS='softmax_tensor'<line_sep>RESNET_IMAGE_SIZE=224<class_stmt>RN50Graph<block_start>"""Evaluate image classifier with optimized TensorFlow graph"""<def_stmt>__init__ self<block_start>arg_parser=ArgumentParser(description='Parse arguments')<line_sep>arg_parser.add_argument("--batch-size" dest="batch_size" type=int default=8)<line_sep>arg_parser.add_argument("--num-images" dest='num_images' type=int default=512)<line_sep>arg_parser.add_argument("--num-inter-threads" dest='num_inter_threads' type=int default=0)<line_sep>arg_parser.add_argument("--num-intra-threads" dest='num_intra_threads' type=int default=0)<line_sep>arg_parser.add_argument("--input-graph" dest='input_graph' type=str default="resnet50_v1.pb")<line_sep>arg_parser.add_argument("--warmup-iters" dest='warmup_iters' type=int default=8)<line_sep>self.args=arg_parser.parse_args()<block_end><def_stmt>run self<block_start>"""run benchmark with optimized graph"""<line_sep>print("Run inference with dummy data")<line_sep>config=tf.compat.v1.ConfigProto()<line_sep>config.intra_op_parallelism_threads=self.args.num_intra_threads<line_sep>config.inter_op_parallelism_threads=self.args.num_inter_threads<line_sep>config.use_per_session_threads=<true><line_sep>data_graph=tf.Graph()<with_stmt>data_graph.as_default()<block_start>input_shape=[self.args.batch_size RESNET_IMAGE_SIZE RESNET_IMAGE_SIZE 3]<line_sep>images=tf.random.uniform(input_shape 0.0 255.0 dtype=tf.float32 seed=42 name='synthetic_images')<block_end>infer_graph=tf.Graph()<with_stmt>infer_graph.as_default()<block_start>graph_def=tf.compat.v1.GraphDef()<with_stmt>tf.io.gfile.GFile(self.args.input_graph 'rb')<as>input_file<block_start>input_graph_content=input_file.read()<line_sep>graph_def.ParseFromString(input_graph_content)<block_end>print("Optimizing graph %s for inference..."%self.args.input_graph)<line_sep>output_graph=optimize_for_inference(graph_def [INPUTS] [OUTPUTS] dtypes.float32.as_datatype_enum <false>)<line_sep>tf.import_graph_def(output_graph name='')<block_end>input_tensor=infer_graph.get_tensor_by_name('input_tensor:0')<line_sep>output_tensor=infer_graph.get_tensor_by_name('softmax_tensor:0')<line_sep># Run without nGraph first print("Run inference (without nGraph)")<line_sep>ngraph_bridge.disable()<line_sep>data_sess=tf.compat.v1.Session(graph=data_graph config=config)<line_sep>infer_sess=tf.compat.v1.Session(graph=infer_graph config=config)<line_sep>iteration=0<line_sep>num_processed_images=0<line_sep>num_remaining_images=self.args.num_images<line_sep>tf_time=0.0<line_sep>tf_labels=np.array([] dtype=np.int32)<while_stmt>num_remaining_images<ge>self.args.batch_size<block_start>np_images=data_sess.run(images)<if_stmt>iteration<g>self.args.warmup_iters<block_start>num_processed_images<augadd>self.args.batch_size<line_sep>num_remaining_images<augsub>self.args.batch_size<block_end>tf_start_time=time.time()<line_sep>predictions=infer_sess.run(output_tensor {input_tensor:np_images})<line_sep>tf_elapsed_time=time.time()-tf_start_time<if_stmt>iteration<g>self.args.warmup_iters<block_start>tf_time<augadd>tf_elapsed_time<line_sep>tf_labels=np.append(tf_labels np.argmax(predictions axis=-1))<block_end>iteration<augadd>1<block_end>print("Total execution time (TF): " tf_time)<line_sep># Run with nGraph now print("Run inference (with nGraph)")<line_sep>ngraph_bridge.enable()<line_sep>data_sess=tf.compat.v1.Session(graph=data_graph config=config)<line_sep>infer_sess=tf.compat.v1.Session(graph=infer_graph config=config)<line_sep>iteration=0<line_sep>num_processed_images=0<line_sep>num_remaining_images=self.args.num_images<line_sep>ngtf_time=0.0<line_sep>ngtf_labels=np.array([] dtype=np.int32)<while_stmt>num_remaining_images<ge>self.args.batch_size<block_start>np_images=data_sess.run(images)<if_stmt>iteration<g>self.args.warmup_iters<block_start>num_processed_images<augadd>self.args.batch_size<line_sep>num_remaining_images<augsub>self.args.batch_size<block_end>ngtf_start_time=time.time()<line_sep>predictions=infer_sess.run(output_tensor {input_tensor:np_images})<line_sep>ngtf_elapsed_time=time.time()-ngtf_start_time<if_stmt>iteration<g>self.args.warmup_iters<block_start>ngtf_time<augadd>ngtf_elapsed_time<line_sep>ngtf_labels=np.append(ngtf_labels np.argmax(predictions axis=-1))<block_end>iteration<augadd>1<block_end>print("Total execution time (NGTF): " ngtf_time)<line_sep>print("Processed %d images. Batch size = %d"%(num_processed_images self.args.batch_size))<line_sep>print("Avg throughput (TF): %0.4f img/s"%(num_processed_images/tf_time))<line_sep>print("Avg throughput (NGTF): %0.4f img/s"%(num_processed_images/ngtf_time))<assert_stmt>((tf_labels<eq>ngtf_labels).all())<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>graph=RN50Graph()<line_sep>graph.run()<block_end>
""" Transform video =============== In this example, we use ``torchio.Resample((2, 2, 1))`` to divide the spatial size of the clip (height and width) by two and ``RandomAffine(degrees=(0, 0, 20))`` to rotate a maximum of 20 degrees around the time axis. """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>matplotlib.animation<as>animation<import_stmt>torch<import_stmt>torchio<as>tio<import_from_stmt>PIL Image<def_stmt>read_clip path undersample=4<block_start>"""Read a GIF a return an array of shape (C, W, H, T)."""<line_sep>gif=Image.open(path)<line_sep>frames=[]<for_stmt>i range(gif.n_frames)<block_start>gif.seek(i)<line_sep>frames.append(np.array(gif.convert('RGB')))<block_end>frames=frames[::undersample]<line_sep>array=np.stack(frames).transpose(3 1 2 0)<line_sep>delay=gif.info['duration']<line_sep><return>array delay<block_end><def_stmt>plot_gif image<block_start><def_stmt>_update_frame num<block_start>frame=get_frame(image num)<line_sep>im.set_data(frame)<line_sep><return><block_end><def_stmt>get_frame image i<block_start><return>image.data[<ellipsis> i].permute(1 2 0).byte()<block_end>plt.rcParams['animation.embed_limit']=25<line_sep>fig,ax=plt.subplots()<line_sep>im=ax.imshow(get_frame(image 0))<line_sep><return>animation.FuncAnimation(fig _update_frame repeat_delay=image['delay'] frames=image.shape[-1] )<block_end># Source: https://thehigherlearning.wordpress.com/2014/06/25/watching-a-cell-divide-under-an-electron-microscope-is-mesmerizing-gif/ # noqa: E501 array,delay=read_clip('nBTu3oi.gif')<line_sep>plt.imshow(array[<ellipsis> 0].transpose(1 2 0))<line_sep>plt.plot()<line_sep>image=tio.ScalarImage(tensor=array delay=delay)<line_sep>original_animation=plot_gif(image)<line_sep>transform=tio.Compose((tio.Resample((2 2 1)) tio.RandomAffine(degrees=(0 0 20)) ))<line_sep>torch.manual_seed(0)<line_sep>transformed=transform(image)<line_sep>transformed_animation=plot_gif(transformed)<line_sep>
""" <NAME>. 2014. The curse of the excluded middle. Commun. ACM 57, 6 (June 2014), 50-55. DOI=10.1145/2605176 http://doi.acm.org/10.1145/2605176 """<with_stmt>open('citation.txt' encoding='ascii')<as>fp<block_start>get_contents=<lambda>:fp.read()<block_end>print(get_contents())<line_sep>
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes for FFN model definition."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>. optimizer<class_stmt>FFNModel(object)<block_start>"""Base class for FFN models."""<line_sep># Dimensionality of the model (2 or 3). dim=<none><line_sep>############################################################################ # (x, y, z) tuples defining various properties of the network. # Note that 3-tuples should be used even for 2D networks, in which case # the third (z) value is ignored. # How far to move the field of view in the respective directions. deltas=<none><line_sep># Size of the input image and seed subvolumes to be used during inference. # This is enough information to execute a single prediction step, without # moving the field of view. input_image_size=<none><line_sep>input_seed_size=<none><line_sep># Size of the predicted patch as returned by the model. pred_mask_size=<none><line_sep>########################################################################### # TF op to compute loss optimized during training. This should include all # loss components in case more than just the pixelwise loss is used. loss=<none><line_sep># TF op to call to perform loss optimization on the model. train_op=<none><def_stmt>__init__ self deltas batch_size=<none> define_global_step=<true><block_start><assert_stmt>self.dim<is><not><none><line_sep>self.deltas=deltas<line_sep>self.batch_size=batch_size<line_sep># Initialize the shift collection. This is used during training with the # fixed step size policy. self.shifts=[]<for_stmt>dx (-self.deltas[0] 0 self.deltas[0])<block_start><for_stmt>dy (-self.deltas[1] 0 self.deltas[1])<block_start><for_stmt>dz (-self.deltas[2] 0 self.deltas[2])<block_start><if_stmt>dx<eq>0<and>dy<eq>0<and>dz<eq>0<block_start><continue><block_end>self.shifts.append((dx dy dz))<block_end><block_end><block_end><if_stmt>define_global_step<block_start>self.global_step=tf.Variable(0 name='global_step' trainable=<false>)<block_end># The seed is always a placeholder which is fed externally from the # training/inference drivers. self.input_seed=tf.placeholder(tf.float32 name='seed')<line_sep>self.input_patches=tf.placeholder(tf.float32 name='patches')<line_sep># For training, labels should be defined as a TF object. self.labels=<none><line_sep># Optional. Provides per-pixel weights with which the loss is multiplied. # If specified, should have the same shape as self.labels. self.loss_weights=<none><line_sep>self.logits=<none># type: tf.Operation # List of image tensors to save in summaries. The images are concatenated # along the X axis. self._images=[]<block_end><def_stmt>set_uniform_io_size self patch_size<block_start>"""Initializes unset input/output sizes to 'patch_size', sets input shapes. This assumes that the inputs and outputs are of equal size, and that exactly one step is executed in every direction during training. Args: patch_size: (x, y, z) specifying the input/output patch size Returns: None """<if_stmt>self.pred_mask_size<is><none><block_start>self.pred_mask_size=patch_size<block_end><if_stmt>self.input_seed_size<is><none><block_start>self.input_seed_size=patch_size<block_end><if_stmt>self.input_image_size<is><none><block_start>self.input_image_size=patch_size<block_end>self.set_input_shapes()<block_end><def_stmt>set_input_shapes self<block_start>"""Sets the shape inference for input_seed and input_patches. Assumes input_seed_size and input_image_size are already set. """<line_sep>self.input_seed.set_shape([self.batch_size]+list(self.input_seed_size[::-1])+[1])<line_sep>self.input_patches.set_shape([self.batch_size]+list(self.input_image_size[::-1])+[1])<block_end><def_stmt>set_up_sigmoid_pixelwise_loss self logits<block_start>"""Sets up the loss function of the model."""<assert_stmt>self.labels<is><not><none><assert_stmt>self.loss_weights<is><not><none><line_sep>pixel_loss=tf.nn.sigmoid_cross_entropy_with_logits(logits=logits labels=self.labels)<line_sep>pixel_loss<augmul>self.loss_weights<line_sep>self.loss=tf.reduce_mean(pixel_loss)<line_sep>tf.summary.scalar('pixel_loss' self.loss)<line_sep>self.loss=tf.verify_tensor_all_finite(self.loss 'Invalid loss detected')<block_end><def_stmt>set_up_optimizer self loss=<none> max_gradient_entry_mag=0.7<block_start>"""Sets up the training op for the model."""<if_stmt>loss<is><none><block_start>loss=self.loss<block_end>tf.summary.scalar('optimizer_loss' self.loss)<line_sep>opt=optimizer.optimizer_from_flags()<line_sep>grads_and_vars=opt.compute_gradients(loss)<for_stmt>g,v grads_and_vars<block_start><if_stmt>g<is><none><block_start>tf.logging.error('Gradient is None: %s' v.op.name)<block_end><block_end><if_stmt>max_gradient_entry_mag<g>0.0<block_start>grads_and_vars=[(tf.clip_by_value(g -max_gradient_entry_mag +max_gradient_entry_mag) v)<for>g,v, grads_and_vars]<block_end>trainables=tf.trainable_variables()<if_stmt>trainables<block_start><for_stmt>var trainables<block_start>tf.summary.histogram(var.name.replace(':0' '') var)<block_end><block_end><for_stmt>grad,var grads_and_vars<block_start>tf.summary.histogram('gradients/%s'%var.name.replace(':0' '') grad)<block_end>update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)<with_stmt>tf.control_dependencies(update_ops)<block_start>self.train_op=opt.apply_gradients(grads_and_vars global_step=self.global_step name='train')<block_end><block_end><def_stmt>show_center_slice self image sigmoid=<true><block_start>image=image[: image.get_shape().dims[1]<floordiv>2 : : :]<if_stmt>sigmoid<block_start>image=tf.sigmoid(image)<block_end>self._images.append(image)<block_end><def_stmt>add_summaries self<block_start><pass><block_end><def_stmt>update_seed self seed update<block_start>"""Updates the initial 'seed' with 'update'."""<line_sep>dx=self.input_seed_size[0]-self.pred_mask_size[0]<line_sep>dy=self.input_seed_size[1]-self.pred_mask_size[1]<line_sep>dz=self.input_seed_size[2]-self.pred_mask_size[2]<if_stmt>dx<eq>0<and>dy<eq>0<and>dz<eq>0<block_start>seed<augadd>update<block_end><else_stmt><block_start>seed<augadd>tf.pad(update [[0 0] [dz<floordiv>2 dz-dz<floordiv>2] [dy<floordiv>2 dy-dy<floordiv>2] [dx<floordiv>2 dx-dx<floordiv>2] [0 0]])<block_end><return>seed<block_end><def_stmt>define_tf_graph self<block_start>"""Creates the TensorFlow graph representing the model. If self.labels is not None, the graph should include operations for computing and optimizing the loss. """<line_sep><raise>NotImplementedError('DefineTFGraph needs to be defined by a subclass.')<block_end><block_end>
<import_stmt>os<import_stmt>platform<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>torch.utils.cpp_extension CppExtension BuildExtension<as>TorchBuildExtension <line_sep>__all__=['get_ext_modules' 'BuildExtension' ]<line_sep>_ROOT_DIR=Path(__file__).parent.parent.parent.resolve()<line_sep>_CSRC_DIR=_ROOT_DIR/'torchtext'/'csrc'<line_sep>_TP_BASE_DIR=_ROOT_DIR/'third_party'<line_sep>_TP_INSTALL_DIR=_TP_BASE_DIR/'build'<def_stmt>_get_eca debug<block_start>eca=[]<if_stmt>platform.system()<eq>"Windows"<block_start>eca<augadd>['/MT']<block_end><if_stmt>debug<block_start>eca<augadd>["-O0" "-g"]<block_end><else_stmt><block_start><if_stmt>platform.system()<eq>"Windows"<block_start>eca<augadd>['-O2']<block_end><else_stmt><block_start>eca<augadd>["-O3" "-fvisibility=hidden"]<block_end><block_end><return>eca<block_end><def_stmt>_get_ela debug<block_start>ela=[]<if_stmt>debug<block_start><if_stmt>platform.system()<eq>"Windows"<block_start>ela<augadd>["/DEBUG:FULL"]<block_end><else_stmt><block_start>ela<augadd>["-O0" "-g"]<block_end><block_end><else_stmt><block_start><if_stmt>platform.system()<ne>"Windows"<block_start>ela<augadd>["-O3"]<block_end><block_end><return>ela<block_end><def_stmt>_get_srcs <block_start><return>[str(p)<for>p _CSRC_DIR.glob('**/*.cpp')]<block_end><def_stmt>_get_include_dirs <block_start><return>[str(_CSRC_DIR) str(_TP_INSTALL_DIR/'include') ]<block_end><def_stmt>_get_library_dirs <block_start><return>[str(_TP_INSTALL_DIR/'lib') str(_TP_INSTALL_DIR/'lib64')]<block_end><def_stmt>_get_libraries # NOTE: The order of the library listed bellow matters. # # For example, the symbol `sentencepiece::unigram::Model` is # defined in sentencepiece but UNDEFINED in sentencepiece_train. # GCC only remembers the last encountered symbol. # Therefore placing 'sentencepiece_train' after 'sentencepiece' cause runtime error. # # $ nm third_party/build/lib/libsentencepiece_train.a | grep _ZTIN13sentencepiece7unigram5ModelE # U _ZTIN13sentencepiece7unigram5ModelE # $ nm third_party/build/lib/libsentencepiece.a | grep _ZTIN13sentencepiece7unigram5ModelE # 0000000000000000 V _ZTIN13sentencepiece7unigram5ModelE <block_start><return>['sentencepiece_train' 'sentencepiece' 're2' 'double-conversion']<block_end><def_stmt>_get_cxx11_abi <block_start><try_stmt><block_start><import_stmt>torch<line_sep>value=int(torch._C._GLIBCXX_USE_CXX11_ABI)<block_end><except_stmt>ImportError<block_start>value=0<block_end><return>'-D_GLIBCXX_USE_CXX11_ABI='+str(value)<block_end><def_stmt>_build_third_party debug<block_start>build_dir=_TP_BASE_DIR/'build'<line_sep>build_dir.mkdir(exist_ok=<true>)<line_sep>build_env=os.environ.copy()<line_sep>config='Debug'<if>debug<else>'Release'<if_stmt>platform.system()<eq>'Windows'<block_start>extra_args=['-GNinja' ]<line_sep>build_env.setdefault('CC' 'cl')<line_sep>build_env.setdefault('CXX' 'cl')<block_end><else_stmt><block_start>extra_args=['-DCMAKE_CXX_FLAGS=-fPIC '+_get_cxx11_abi()]<block_end>subprocess.run(args=['cmake' '-DBUILD_SHARED_LIBS=OFF' '-DRE2_BUILD_TESTING=OFF' '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON' f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}' f'-DCMAKE_BUILD_TYPE={config}' '-DCMAKE_CXX_VISIBILITY_PRESET=hidden' '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW' ]+extra_args+['..'] cwd=str(build_dir) check=<true> env=build_env )<line_sep>print('*** Command list Thirdparty ***')<with_stmt>open(build_dir/'compile_commands.json' 'r')<as>fileobj<block_start>print(fileobj.read())<block_end>print('running cmake --build' flush=<true>)<line_sep>subprocess.run(args=['cmake' '--build' '.' '--target' 'install' '--config' config] cwd=str(build_dir) check=<true> env=build_env )<block_end><def_stmt>_build_sentence_piece debug<block_start>build_dir=_TP_BASE_DIR/'sentencepiece'/'build'<line_sep>build_dir.mkdir(exist_ok=<true>)<line_sep>build_env=os.environ.copy()<line_sep>config='Debug'<if>debug<else>'Release'<if_stmt>platform.system()<eq>'Windows'<block_start>extra_args=['-GNinja']<line_sep>build_env.setdefault('CC' 'cl')<line_sep>build_env.setdefault('CXX' 'cl')<block_end><else_stmt><block_start>extra_args=[]<block_end>subprocess.run(args=['cmake' '-DSPM_ENABLE_SHARED=OFF' f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}' '-DCMAKE_CXX_VISIBILITY_PRESET=hidden' '-DCMAKE_CXX_FLAGS='+_get_cxx11_abi() '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW' f'-DCMAKE_BUILD_TYPE={config}']+extra_args+['..'] cwd=str(build_dir) check=<true> env=build_env )<line_sep>subprocess.run(args=['cmake' '--build' '.' '--target' 'install' '--config' config] cwd=str(build_dir) check=<true> env=build_env )<block_end><def_stmt>_configure_third_party debug<block_start>_build_third_party(debug)<line_sep>_build_sentence_piece(debug)<block_end>_EXT_NAME='torchtext._torchtext'<def_stmt>get_ext_modules debug=<false><block_start><return>[CppExtension(_EXT_NAME _get_srcs() libraries=_get_libraries() include_dirs=_get_include_dirs() library_dirs=_get_library_dirs() extra_compile_args=_get_eca(debug) extra_link_args=_get_ela(debug) ) ]<block_end><class_stmt>BuildExtension(TorchBuildExtension)<block_start><def_stmt>build_extension self ext<block_start><if_stmt>ext.name<eq>_EXT_NAME<block_start>_configure_third_party(self.debug)<block_end>super().build_extension(ext)<block_end><block_end>
# Copyright 2021 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for chord_inference."""<import_from_stmt>absl.testing absltest<import_from_stmt>note_seq chord_inference<import_from_stmt>note_seq sequences_lib<import_from_stmt>note_seq testing_lib<import_from_stmt>note_seq.protobuf music_pb2<line_sep>CHORD_SYMBOL=music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL<class_stmt>ChordInferenceTest(absltest.TestCase)<block_start><def_stmt>testSequenceNotePitchVectors self<block_start>sequence=music_pb2.NoteSequence()<line_sep>testing_lib.add_track_to_sequence(sequence 0 [(60 100 0.0 0.0) (62 100 0.0 0.5) (60 100 1.5 2.5) (64 100 2.0 2.5) (67 100 2.25 2.75) (70 100 2.5 4.5) (60 100 6.0 6.0) ])<line_sep>note_pitch_vectors=chord_inference.sequence_note_pitch_vectors(sequence seconds_per_frame=1.0)<line_sep>expected_note_pitch_vectors=[[0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.5 0.0 0.0 0.0 0.5 0.0 0.0 0.5 0.0 0.0 0.5 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] ]<line_sep>self.assertEqual(expected_note_pitch_vectors note_pitch_vectors.tolist())<block_end><def_stmt>testSequenceNotePitchVectorsVariableLengthFrames self<block_start>sequence=music_pb2.NoteSequence()<line_sep>testing_lib.add_track_to_sequence(sequence 0 [(60 100 0.0 0.0) (62 100 0.0 0.5) (60 100 1.5 2.5) (64 100 2.0 2.5) (67 100 2.25 2.75) (70 100 2.5 4.5) (60 100 6.0 6.0) ])<line_sep>note_pitch_vectors=chord_inference.sequence_note_pitch_vectors(sequence seconds_per_frame=[1.5 2.0 3.0 5.0])<line_sep>expected_note_pitch_vectors=[[0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] [0.5 0.0 0.0 0.0 0.5 0.0 0.0 0.5 0.0 0.0 0.5 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0] [0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] ]<line_sep>self.assertEqual(expected_note_pitch_vectors note_pitch_vectors.tolist())<block_end><def_stmt>testInferChordsForSequence self<block_start>sequence=music_pb2.NoteSequence()<line_sep>testing_lib.add_track_to_sequence(sequence 0 [(60 100 0.0 1.0) (64 100 0.0 1.0) (67 100 0.0 1.0) # C (62 100 1.0 2.0) (65 100 1.0 2.0) (69 100 1.0 2.0) # Dm (60 100 2.0 3.0) (65 100 2.0 3.0) (69 100 2.0 3.0) # F (59 100 3.0 4.0) (62 100 3.0 4.0) (67 100 3.0 4.0)])<line_sep># G quantized_sequence=sequences_lib.quantize_note_sequence(sequence steps_per_quarter=4)<line_sep>chord_inference.infer_chords_for_sequence(quantized_sequence chords_per_bar=2)<line_sep>expected_chords=[('C' 0.0) ('Dm' 1.0) ('F' 2.0) ('G' 3.0)]<line_sep>chords=[(ta.text ta.time)<for>ta quantized_sequence.text_annotations]<line_sep>self.assertEqual(expected_chords chords)<block_end><def_stmt>testInferChordsForSequenceAddKeySignatures self<block_start>sequence=music_pb2.NoteSequence()<line_sep>testing_lib.add_track_to_sequence(sequence 0 [(60 100 0.0 1.0) (64 100 0.0 1.0) (67 100 0.0 1.0) # C (62 100 1.0 2.0) (65 100 1.0 2.0) (69 100 1.0 2.0) # Dm (60 100 2.0 3.0) (65 100 2.0 3.0) (69 100 2.0 3.0) # F (59 100 3.0 4.0) (62 100 3.0 4.0) (67 100 3.0 4.0) # G (66 100 4.0 5.0) (70 100 4.0 5.0) (73 100 4.0 5.0) # F# (68 100 5.0 6.0) (71 100 5.0 6.0) (75 100 5.0 6.0) # G#m (66 100 6.0 7.0) (71 100 6.0 7.0) (75 100 6.0 7.0) # B (65 100 7.0 8.0) (68 100 7.0 8.0) (73 100 7.0 8.0)])<line_sep># C# quantized_sequence=sequences_lib.quantize_note_sequence(sequence steps_per_quarter=4)<line_sep>chord_inference.infer_chords_for_sequence(quantized_sequence chords_per_bar=2 add_key_signatures=<true>)<line_sep>expected_key_signatures=[(0 0.0) (6 4.0)]<line_sep>key_signatures=[(ks.key ks.time)<for>ks quantized_sequence.key_signatures]<line_sep>self.assertEqual(expected_key_signatures key_signatures)<block_end><def_stmt>testInferChordsForSequenceWithBeats self<block_start>sequence=music_pb2.NoteSequence()<line_sep>testing_lib.add_track_to_sequence(sequence 0 [(60 100 0.0 1.1) (64 100 0.0 1.1) (67 100 0.0 1.1) # C (62 100 1.1 1.9) (65 100 1.1 1.9) (69 100 1.1 1.9) # Dm (60 100 1.9 3.0) (65 100 1.9 3.0) (69 100 1.9 3.0) # F (59 100 3.0 4.5) (62 100 3.0 4.5) (67 100 3.0 4.5)])<line_sep># G testing_lib.add_beats_to_sequence(sequence [0.0 1.1 1.9 1.9 3.0])<line_sep>chord_inference.infer_chords_for_sequence(sequence)<line_sep>expected_chords=[('C' 0.0) ('Dm' 1.1) ('F' 1.9) ('G' 3.0)]<line_sep>chords=[(ta.text ta.time)<for>ta sequence.text_annotations<if>ta.annotation_type<eq>CHORD_SYMBOL]<line_sep>self.assertEqual(expected_chords chords)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>.cifar Cifar10DataProvider Cifar100DataProvider Cifar10AugmentedDataProvider Cifar100AugmentedDataProvider<import_from_stmt>.svhn SVHNDataProvider<def_stmt>get_data_provider_by_name name train_params<block_start>"""Return required data provider class"""<if_stmt>name<eq>'C10'<block_start><return>Cifar10DataProvider(**train_params)<block_end><if_stmt>name<eq>'C10+'<block_start><return>Cifar10AugmentedDataProvider(**train_params)<block_end><if_stmt>name<eq>'C100'<block_start><return>Cifar100DataProvider(**train_params)<block_end><if_stmt>name<eq>'C100+'<block_start><return>Cifar100AugmentedDataProvider(**train_params)<block_end><if_stmt>name<eq>'SVHN'<block_start><return>SVHNDataProvider(**train_params)<block_end><else_stmt><block_start>print("Sorry, data provider for `%s` dataset "<concat>"was not implemented yet"%name)<line_sep>exit()<block_end><block_end>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """Clear out our local testing directories."""<import_stmt>argparse<import_stmt>os<import_stmt>shutil<line_sep>TESTING_DIRS=["local_providers/aws_local" "local_providers/aws_local_0" "local_providers/aws_local_1" "local_providers/aws_local_2" "local_providers/aws_local_3" "local_providers/aws_local_4" "local_providers/aws_local_5" "local_providers/azure_local" "local_providers/gcp_local" "local_providers/gcp_local_0" "local_providers/gcp_local_1" "local_providers/gcp_local_2" "local_providers/gcp_local_3" "local_providers/insights_local" "pvc_dir/insights_local" "pvc_dir/processing" "parquet_data" ]<def_stmt>main *args **kwargs<block_start>testing_path=kwargs["testing_path"]<line_sep>paths_to_clear=[f"{testing_path}/{directory}"<for>directory TESTING_DIRS]<for_stmt>path paths_to_clear<block_start><try_stmt><block_start>print(f"Checking {path}")<line_sep>dirs_to_remove=[f.path<for>f os.scandir(path)<if>f.is_dir()]<for_stmt>directory dirs_to_remove<block_start>print(f"Removing {directory}")<line_sep>shutil.rmtree(directory)<block_end><block_end><except_stmt>FileNotFoundError<as>err<block_start>print(err)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>PARSER=argparse.ArgumentParser()<line_sep>PARSER.add_argument("-p" "--path" dest="testing_path" help="The path to the testing directory" required=<true>)<line_sep>ARGS=vars(PARSER.parse_args())<line_sep>main(**ARGS)<block_end>
<import_stmt>logging<import_stmt>platform<import_from_stmt>unittest.mock Mock<import_from_stmt>sanic __version__<import_from_stmt>sanic.application.logo BASE_LOGO<import_from_stmt>sanic.application.motd MOTDTTY<def_stmt>test_logo_base app run_startup<block_start>logs=run_startup(app)<assert_stmt>logs[0][1]<eq>logging.DEBUG<assert_stmt>logs[0][2]<eq>BASE_LOGO<block_end><def_stmt>test_logo_false app run_startup<block_start>app.config.LOGO=<false><line_sep>logs=run_startup(app)<line_sep>banner,port=logs[1][2].rsplit(":" 1)<assert_stmt>logs[0][1]<eq>logging.INFO<assert_stmt>banner<eq>"Goin' Fast @ http://127.0.0.1"<assert_stmt>int(port)<g>0<block_end><def_stmt>test_logo_true app run_startup<block_start>app.config.LOGO=<true><line_sep>logs=run_startup(app)<assert_stmt>logs[0][1]<eq>logging.DEBUG<assert_stmt>logs[0][2]<eq>BASE_LOGO<block_end><def_stmt>test_logo_custom app run_startup<block_start>app.config.LOGO="My Custom Logo"<line_sep>logs=run_startup(app)<assert_stmt>logs[0][1]<eq>logging.DEBUG<assert_stmt>logs[0][2]<eq>"My Custom Logo"<block_end><def_stmt>test_motd_with_expected_info app run_startup<block_start>logs=run_startup(app)<assert_stmt>logs[1][2]<eq>f"Sanic v{__version__}"<assert_stmt>logs[3][2]<eq>"mode: debug, single worker"<assert_stmt>logs[4][2]<eq>"server: sanic"<assert_stmt>logs[5][2]<eq>f"python: {platform.python_version()}"<assert_stmt>logs[6][2]<eq>f"platform: {platform.platform()}"<block_end><def_stmt>test_motd_init <block_start>_orig=MOTDTTY.set_variables<line_sep>MOTDTTY.set_variables=Mock()<line_sep>motd=MOTDTTY(<none> "" {} {})<line_sep>motd.set_variables.assert_called_once()<line_sep>MOTDTTY.set_variables=_orig<block_end><def_stmt>test_motd_display caplog<block_start>motd=MOTDTTY(" foobar " "" {"one":"1"} {"two":"2"})<with_stmt>caplog.at_level(logging.INFO)<block_start>motd.display()<block_end>version_line=f"Sanic v{__version__}".center(motd.centering_length)<assert_stmt>("".join(caplog.messages)<eq>f""" ┌────────────────────────────────┐ │ {version_line} │ │ │ ├───────────────────────┬────────┤ │ foobar │ one: 1 │ | ├────────┤ │ │ two: 2 │ └───────────────────────┴────────┘ """)<block_end>