content
stringlengths
0
1.55M
# -*- encoding: utf-8 -*- <import_from_stmt>django.utils timezone<import_from_stmt>django.contrib.sites.models get_current_site<import_from_stmt>django.conf settings<import_from_stmt>haystack.query SearchQuerySet<import_from_stmt>opps.views.generic.list ListView<import_from_stmt>opps.containers.models Container<import_from_stmt>opps.channels.models Channel<import_from_stmt>.models Tag<line_sep>USE_HAYSTACK=getattr(settings 'OPPS_TAGS_USE_HAYSTACK' <false>)<class_stmt>TagList(ListView)<block_start>model=Container<def_stmt>get_template_list self domain_folder="containers"<block_start>templates=[]<line_sep>list_name='list_tags'<if_stmt>self.request.GET.get('page')<and>self.__class__.__name__<not><in>settings.OPPS_PAGINATE_NOT_APP<block_start>templates.append('{0}/{1}_paginated.html'.format(domain_folder list_name))<block_end>templates.append('{0}/{1}.html'.format(domain_folder list_name))<line_sep><return>templates<block_end><def_stmt>get_context_data self **kwargs<block_start>context=super(TagList self).get_context_data(**kwargs)<line_sep>context['tag']=self.kwargs['tag']<line_sep>site=get_current_site(self.request)<line_sep>context['channel']=Channel.objects.get_homepage(site)<line_sep><return>context<block_end><def_stmt>get_queryset self<block_start>self.site=get_current_site(self.request)<line_sep># without the long_slug, the queryset will cause an error self.long_slug='tags'<line_sep>self.tag=self.kwargs['tag']<if_stmt>USE_HAYSTACK<block_start><return>self.get_queryset_from_haystack()<block_end><return>self.get_queryset_from_db()<block_end><def_stmt>get_queryset_from_haystack self<block_start>models=Container.get_children_models()<line_sep>sqs=SearchQuerySet().models(*models).filter(tags=self.tag).order_by('-date_available')<line_sep>sqs.model=Container<line_sep><return>sqs<block_end><def_stmt>get_queryset_from_db self<block_start>tags=Tag.objects.filter(slug=self.tag).values_list('name')<or>[]<line_sep>tags_names=[]<if_stmt>tags<block_start>tags_names=[i[0]<for>i tags]<block_end>ids=[]<for_stmt>tag tags_names<block_start>result=self.containers=self.model.objects.filter(site_domain=self.site tags__contains=tag date_available__lte=timezone.now() published=<true>)<if_stmt>result.exists()<block_start>ids.extend([i.id<for>i result])<block_end><block_end># remove the repeated ids=list(set(ids))<line_sep># grab the containers self.containers=self.model.objects.filter(id__in=ids)<line_sep><return>self.containers<block_end><block_end>
# -*- coding: utf-8 -*- """Module with custom implementations of :class:`click.Group`."""<line_sep># AUTO-GENERATED # yapf: disable # pylint: disable=wildcard-import <import_from_stmt>.dynamic *<import_from_stmt>.verdi *<line_sep>__all__=('DynamicEntryPointCommandGroup' 'VerdiCommandGroup' )<line_sep># yapf: enable
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- <import_stmt>os<import_from_stmt>azure.cli.testsdk ResourceGroupPreparer StorageAccountPreparer JMESPathCheck ScenarioTest <import_from_stmt>..storage_test_util StorageScenarioMixin<class_stmt>StorageFileShareScenarios(StorageScenarioMixin ScenarioTest)<block_start>@ResourceGroupPreparer()@StorageAccountPreparer()<def_stmt>test_storage_file_upload_small_file_v2 self resource_group storage_account_info<block_start>account_info=storage_account_info<line_sep>share_name=self.create_share(account_info)<line_sep>curr_dir=os.path.dirname(os.path.realpath(__file__))<line_sep>local_file=os.path.join(curr_dir 'upload_file').replace('\\' '\\\\')<line_sep>local_file_name='upload_file'<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" '<concat>'--content-cache-control no-cache '<concat>'--content-disposition attachment '<concat>'--content-encoding compress '<concat>'--content-language en-US '<concat>'--content-type "multipart/form-data;" '<concat>'--metadata key=val ' account_info share_name local_file)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name local_file_name).assert_with_checks(JMESPathCheck('name' local_file_name) JMESPathCheck('properties.contentSettings.cacheControl' 'no-cache') JMESPathCheck('properties.contentSettings.contentDisposition' 'attachment') JMESPathCheck('properties.contentSettings.contentEncoding' 'compress') JMESPathCheck('properties.contentSettings.contentLanguage' 'en-US') JMESPathCheck('properties.contentSettings.contentType' 'multipart/form-data;') JMESPathCheck('metadata' {'key':'val'}))<line_sep>dest_dir='dest_dir'<import_from_stmt>azure.core.exceptions ResourceNotFoundError<with_stmt>self.assertRaises(ResourceNotFoundError)<block_start>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file dest_dir)<block_end>self.storage_cmd('storage directory create -s {} -n {}' account_info share_name dest_dir)<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file dest_dir)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name dest_dir+'/'+local_file_name).assert_with_checks(JMESPathCheck('name' local_file_name))<line_sep>dest_file='dest_file.json'<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file dest_file)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name dest_file).assert_with_checks(JMESPathCheck('name' dest_file))<line_sep>dest_path=dest_dir+'/'+dest_file<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file dest_path)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name dest_path).assert_with_checks(JMESPathCheck('name' dest_file))<line_sep>sub_deep_path=dest_dir+'/'+'sub_dir'<line_sep>self.storage_cmd('storage directory create -s {} -n {}' account_info share_name sub_deep_path)<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file sub_deep_path)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name sub_deep_path+'/'+local_file_name).assert_with_checks(JMESPathCheck('name' local_file_name))<line_sep>sub_deep_file=sub_deep_path+'/'+dest_file<line_sep>self.storage_cmd('storage file upload -s {} --source "{}" -p {}' account_info share_name local_file sub_deep_file)<line_sep>self.storage_cmd('storage file show -s {} -p "{}"' account_info share_name sub_deep_file).assert_with_checks(JMESPathCheck('name' dest_file))<block_end><block_end>
<import_stmt>ctypes<import_stmt>ida_ida<import_stmt>ida_funcs<import_stmt>ida_graph<import_stmt>ida_idaapi<import_stmt>ida_kernwin<import_stmt>ida_hexrays<import_from_stmt>PyQt5 QtWidgets QtGui QtCore sip<import_from_stmt>lucid.ui.sync MicroCursorHighlight<import_from_stmt>lucid.ui.subtree MicroSubtreeView<import_from_stmt>lucid.util.python register_callback notify_callback<import_from_stmt>lucid.util.hexrays get_microcode get_mmat get_mmat_name get_mmat_levels<import_from_stmt>lucid.microtext MicrocodeText MicroInstructionToken MicroOperandToken AddressToken BlockNumberToken translate_mtext_position remap_mtext_position<line_sep>#------------------------------------------------------------------------------ # Microcode Explorer #------------------------------------------------------------------------------ # # The Microcode Explorer UI is mostly implemented following a standard # Model-View-Controller pattern. This is a little abnormal for Qt, but # I've come to appreciate it more for its portability and testability. # <class_stmt>MicrocodeExplorer(object)<block_start>""" The controller component of the microcode explorer. The role of the controller is to handle user gestures, map user actions to model updates, and change views based on controls. In theory, the controller should be able to drive the 'view' headlessly or simulate user UI interaction. """<def_stmt>__init__ self<block_start>self.model=MicrocodeExplorerModel()<line_sep>self.view=MicrocodeExplorerView(self self.model)<line_sep>self.view._code_sync.enable_sync(<true>)<block_end># XXX/HACK <def_stmt>show self address=<none><block_start>""" Show the microcode explorer. """<if_stmt>address<is><none><block_start>address=ida_kernwin.get_screen_ea()<block_end>self.select_function(address)<line_sep>self.view.show()<block_end><def_stmt>show_subtree self insn_token<block_start>""" Show the sub-instruction graph for the given instruction token. """<line_sep>graph=MicroSubtreeView(insn_token.insn)<line_sep>graph.show()<line_sep># TODO/HACK: this is dumb, but moving it breaks my centering code so # i'll figure it out later... gv=ida_graph.get_graph_viewer(graph.GetWidget())<line_sep>ida_graph.viewer_set_titlebar_height(gv 15)<block_end>#------------------------------------------------------------------------- # View Toggles #------------------------------------------------------------------------- <def_stmt>set_highlight_mutual self status<block_start>""" Toggle the highlighting of lines containing the same active address. """<if_stmt>status<block_start>self.view._code_sync.hook()<block_end><else_stmt><block_start>self.view._code_sync.unhook()<block_end>ida_kernwin.refresh_idaview_anyway()<block_end><def_stmt>set_verbose self status<block_start>""" Toggle the verbosity of the printed microcode text. """<line_sep>self.model.verbose=status<line_sep>ida_kernwin.refresh_idaview_anyway()<block_end>#------------------------------------------------------------------------- # View Controls #------------------------------------------------------------------------- <def_stmt>select_function self address<block_start>""" Switch the microcode view to the specified function. """<line_sep>func=ida_funcs.get_func(address)<if_stmt><not>func<block_start><return><false><block_end><for_stmt>maturity get_mmat_levels()<block_start>mba=get_microcode(func maturity)<line_sep>mtext=MicrocodeText(mba self.model.verbose)<line_sep>self.model.update_mtext(mtext maturity)<block_end>self.view.refresh()<line_sep>ida_kernwin.refresh_idaview_anyway()<line_sep><return><true><block_end><def_stmt>select_maturity self maturity_name<block_start>""" Switch the microcode view to the specified maturity level. """<line_sep>self.model.active_maturity=get_mmat(maturity_name)<line_sep>#self.view.refresh() <block_end><def_stmt>select_address self address<block_start>""" Select a token in the microcode view matching the given address. """<line_sep>tokens=self.model.mtext.get_tokens_for_address(address)<if_stmt><not>tokens<block_start><return><none><block_end>token_line_num,token_x=self.model.mtext.get_pos_of_token(tokens[0])<line_sep>rel_y=self.model.current_position[2]<if_stmt>self.model.current_position[2]<eq>0<block_start>rel_y=30<block_end>self.model.current_position=(token_line_num token_x rel_y)<line_sep><return>tokens[0]<block_end><def_stmt>select_position self line_num x y<block_start>""" Select the given text position in the microcode view. """<line_sep>self.model.current_position=(line_num x y)<line_sep>#print(" - hovered token: %s" % self.model.current_token.text) #print(" - hovered taddr: 0x%08X" % self.model.current_token.address) #print(" - hovered laddr: 0x%08X" % self.model.current_address) <block_end><def_stmt>activate_position self line_num x y<block_start>""" Activate (eg. double click) the given text position in the microcode view. """<line_sep>token=self.model.mtext.get_token_at_position(line_num x)<if_stmt>isinstance(token AddressToken)<block_start>ida_kernwin.jumpto(token.target_address -1 0)<line_sep><return><block_end><if_stmt>isinstance(token BlockNumberToken)<or>(isinstance(token MicroOperandToken)<and>token.mop.t<eq>ida_hexrays.mop_b)<block_start>blk_idx=token.blk_idx<if>isinstance(token BlockNumberToken)<else>token.mop.b<line_sep>blk_token=self.model.mtext.blks[blk_idx]<line_sep>blk_line_num,_=self.model.mtext.get_pos_of_token(blk_token.lines[0])<line_sep>self.model.current_position=(blk_line_num 0 y)<line_sep>self.view._code_view.Jump(*self.model.current_position)<line_sep><return><block_end><block_end><block_end><class_stmt>MicrocodeExplorerModel(object)<block_start>""" The model component of the microcode explorer. The role of the model is to encapsulate application state, respond to state queries, and notify views of changes. Ideally, the model could be serialized / unserialized to save and restore state. """<def_stmt>__init__ self# # 'mtext' is short for MicrocodeText objects (see microtext.py) # # this dictionary will contain a mtext object (the renderable text # mapping of a given hexrays mba_t) for each microcode maturity level # of the current function. # # at any given time, one mtext will be 'active' in the model, and # therefore visible in the UI/Views # <block_start>self._mtext={x:<none><for>x get_mmat_levels()}<line_sep># # there is a 'cursor' (ViewCursor) for each microcode maturity level / # mtext object. cursors don't actually contain the 'position' in the # rendered text (line_num, x), but also information to position the # cursor within the line view (y) # self._view_cursors={x:<none><for>x get_mmat_levels()}<line_sep># # the currently active / selected maturity level of the model. this # determines which mtext is currently visible / active in the # microcode view, and which cursor will be used # self._active_maturity=ida_hexrays.MMAT_GENERATED<line_sep># this flag tracks the verbosity toggle state self._verbose=<false><line_sep>#---------------------------------------------------------------------- # Callbacks #---------------------------------------------------------------------- self._mtext_refreshed_callbacks=[]<line_sep>self._position_changed_callbacks=[]<line_sep>self._maturity_changed_callbacks=[]<block_end>#------------------------------------------------------------------------- # Read-Only Properties #------------------------------------------------------------------------- @property<def_stmt>mtext self<block_start>""" Return the microcode text mapping for the current maturity level. """<line_sep><return>self._mtext[self._active_maturity]<block_end>@property<def_stmt>current_line self<block_start>""" Return the line token at the current viewport cursor position. """<if_stmt><not>self.mtext<block_start><return><none><block_end>line_num,_,_=self.current_position<line_sep><return>self.mtext.lines[line_num]<block_end>@property<def_stmt>current_function self<block_start>""" Return the current function address. """<if_stmt><not>self.mtext<block_start><return>ida_idaapi.BADADDR<block_end><return>self.mtext.mba.entry_ea<block_end>@property<def_stmt>current_token self<block_start>""" Return the token at the current viewport cursor position. """<line_sep><return>self.mtext.get_token_at_position(*self.current_position[:2])<block_end>@property<def_stmt>current_address self<block_start>""" Return the address at the current viewport cursor position. """<line_sep><return>self.mtext.get_address_at_position(*self.current_position[:2])<block_end>@property<def_stmt>current_cursor self<block_start>""" Return the current viewport cursor. """<line_sep><return>self._view_cursors[self._active_maturity]<block_end>#------------------------------------------------------------------------- # Mutable Properties #------------------------------------------------------------------------- @property<def_stmt>current_position self<block_start>""" Return the current viewport cursor position (line_num, view_x, view_y). """<line_sep><return>self.current_cursor.viewport_position<block_end>@current_position.setter<def_stmt>current_position self value<block_start>""" Set the cursor position of the viewport. """<line_sep>self._gen_cursors(value self.active_maturity)<line_sep>self._notify_position_changed()<block_end>@property<def_stmt>verbose self<block_start>""" Return the microcode verbosity status of the viewport. """<line_sep><return>self._verbose<block_end>@verbose.setter<def_stmt>verbose self value<block_start>""" Set the verbosity of the microcode displayed by the viewport. """<if_stmt>self._verbose<eq>value<block_start><return><block_end># update the active verbosity setting self._verbose=value<line_sep># verbosity must have changed, so force a mtext refresh self.refresh_mtext()<block_end>@property<def_stmt>active_maturity self<block_start>""" Return the active microcode maturity level. """<line_sep><return>self._active_maturity<block_end>@active_maturity.setter<def_stmt>active_maturity self new_maturity<block_start>""" Set the active microcode maturity level. """<line_sep>self._active_maturity=new_maturity<line_sep>self._notify_maturity_changed()<block_end>#---------------------------------------------------------------------- # Misc #---------------------------------------------------------------------- <def_stmt>update_mtext self mtext maturity<block_start>""" Set the mtext for a given microcode maturity level. """<line_sep>self._mtext[maturity]=mtext<line_sep>self._view_cursors[maturity]=ViewCursor(0 0 0)<block_end><def_stmt>refresh_mtext self<block_start>""" Regenerate the rendered text for all microcode maturity levels. TODO: This is a bit sloppy, and is basically only used for the verbosity toggle. """<for_stmt>maturity,mtext self._mtext.items()<block_start><if_stmt>maturity<eq>self.active_maturity<block_start>new_mtext=MicrocodeText(mtext.mba self.verbose)<line_sep>self._mtext[maturity]=new_mtext<line_sep>self.current_position=translate_mtext_position(self.current_position mtext new_mtext)<line_sep><continue><block_end>mtext.refresh(self.verbose)<block_end>self._notify_mtext_refreshed()<block_end><def_stmt>_gen_cursors self position mmat_src<block_start>""" Generate the cursors for all levels from a source position and maturity. """<line_sep>mmat_levels=get_mmat_levels()<line_sep>mmat_first,mmat_final=mmat_levels[0] mmat_levels[-1]<line_sep># clear out all the existing cursor mappings self._view_cursors={x:<none><for>x mmat_levels}<line_sep># save the starting cursor line_num,x,y=position<line_sep>self._view_cursors[mmat_src]=ViewCursor(line_num x y <true>)<line_sep># map the cursor backwards from the source maturity mmat_lower=range(mmat_first mmat_src)[::-1]<line_sep>current_maturity=mmat_src<for_stmt>next_maturity mmat_lower<block_start>self._transfer_cursor(current_maturity next_maturity)<line_sep>current_maturity=next_maturity<block_end># map the cursor forward from the source maturity mmat_higher=range(mmat_src+1 mmat_final+1)<line_sep>current_maturity=mmat_src<for_stmt>next_maturity mmat_higher<block_start>self._transfer_cursor(current_maturity next_maturity)<line_sep>current_maturity=next_maturity<block_end><block_end><def_stmt>_transfer_cursor self mmat_src mmat_dst<block_start>""" Translate the cursor position from one maturity to the next. """<line_sep>position=self._view_cursors[mmat_src].viewport_position<line_sep>mapped=self._view_cursors[mmat_src].mapped<line_sep># attempt to translate the position in one mtext to another projection=translate_mtext_position(position self._mtext[mmat_src] self._mtext[mmat_dst])<line_sep># if translation failed, we will generate an approximate cursor <if_stmt><not>projection<block_start>mapped=<false><line_sep>projection=remap_mtext_position(position self._mtext[mmat_src] self._mtext[mmat_dst])<block_end># save the generated cursor line_num,x,y=projection<line_sep>self._view_cursors[mmat_dst]=ViewCursor(line_num x y mapped)<block_end>#---------------------------------------------------------------------- # Callbacks #---------------------------------------------------------------------- <def_stmt>mtext_refreshed self callback<block_start>""" Subscribe a callback for mtext refresh events. """<line_sep>register_callback(self._mtext_refreshed_callbacks callback)<block_end><def_stmt>_notify_mtext_refreshed self<block_start>""" Notify listeners of a mtext refresh event. """<line_sep>notify_callback(self._mtext_refreshed_callbacks)<block_end><def_stmt>position_changed self callback<block_start>""" Subscribe a callback for cursor position changed events. """<line_sep>register_callback(self._position_changed_callbacks callback)<block_end><def_stmt>_notify_position_changed self<block_start>""" Notify listeners of a cursor position changed event. """<line_sep>notify_callback(self._position_changed_callbacks)<block_end><def_stmt>maturity_changed self callback<block_start>""" Subscribe a callback for maturity changed events. """<line_sep>register_callback(self._maturity_changed_callbacks callback)<block_end><def_stmt>_notify_maturity_changed self<block_start>""" Notify listeners of a maturity changed event. """<line_sep>notify_callback(self._maturity_changed_callbacks)<block_end><block_end>#----------------------------------------------------------------------------- # UI Components #----------------------------------------------------------------------------- <class_stmt>MicrocodeExplorerView(QtWidgets.QWidget)<block_start>""" The view component of the Microcode Explorer. """<line_sep>WINDOW_TITLE="Microcode Explorer"<def_stmt>__init__ self controller model<block_start>super(MicrocodeExplorerView self).__init__()<line_sep>self.visible=<false><line_sep># the backing model, and controller for this view (eg, mvc pattern) self.model=model<line_sep>self.controller=controller<line_sep># initialize the plugin UI self._ui_init()<line_sep>self._ui_init_signals()<block_end>#-------------------------------------------------------------------------- # Pseudo Widget Functions #-------------------------------------------------------------------------- <def_stmt>show self<block_start>self.refresh()<line_sep># show the dockable widget flags=ida_kernwin.PluginForm.WOPN_DP_RIGHT|0x200# WOPN_SZHINT ida_kernwin.display_widget(self._twidget flags)<line_sep>ida_kernwin.set_dock_pos(self.WINDOW_TITLE "IDATopLevelDockArea" ida_kernwin.DP_RIGHT)<line_sep>self._code_sync.hook()<block_end><def_stmt>_cleanup self<block_start>self.visible=<false><line_sep>self._twidget=<none><line_sep>self.widget=<none><line_sep>self._code_sync.unhook()<line_sep>self._ui_hooks.unhook()<line_sep># TODO cleanup controller / model <block_end>#-------------------------------------------------------------------------- # Initialization - UI #-------------------------------------------------------------------------- <def_stmt>_ui_init self<block_start>""" Initialize UI elements. """<line_sep>self._ui_init_widget()<line_sep># initialize our ui elements self._ui_init_list()<line_sep>self._ui_init_code()<line_sep>self._ui_init_settings()<line_sep># layout the populated ui just before showing it self._ui_layout()<block_end><def_stmt>_ui_init_widget self<block_start>""" Initialize an IDA widget for this UI control. """<line_sep># create a dockable widget, and save a reference to it for later use self._twidget=ida_kernwin.create_empty_widget(self.WINDOW_TITLE)<line_sep># cast the IDA 'twidget' to a less opaque QWidget object self.widget=ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)<line_sep># hooks to help track the container/widget lifetime <class_stmt>ExplorerUIHooks(ida_kernwin.UI_Hooks)<block_start><def_stmt>widget_invisible _ twidget<block_start><if_stmt>twidget<eq>self._twidget<block_start>self.visible=<false><line_sep>self._cleanup()<block_end><block_end><def_stmt>widget_visible _ twidget<block_start><if_stmt>twidget<eq>self._twidget<block_start>self.visible=<true><block_end><block_end><block_end># install the widget lifetime hooks self._ui_hooks=ExplorerUIHooks()<line_sep>self._ui_hooks.hook()<block_end><def_stmt>_ui_init_list self<block_start>""" Initialize the microcode maturity list. """<line_sep>self._maturity_list=LayerListWidget()<block_end><def_stmt>_ui_init_code self<block_start>""" Initialize the microcode view(s). """<line_sep>self._code_view=MicrocodeView(self.model)<line_sep>self._code_sync=MicroCursorHighlight(self.controller self.model)<line_sep>self._code_sync.track_view(self._code_view.widget)<block_end><def_stmt>_ui_init_settings self<block_start>""" Initialize the explorer settings groupbox. """<line_sep>self._checkbox_cursor=QtWidgets.QCheckBox("Highlight mutual")<line_sep>self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)<line_sep>self._checkbox_verbose=QtWidgets.QCheckBox("Show use/def")<line_sep>self._checkbox_sync=QtWidgets.QCheckBox("Sync hexrays")<line_sep>self._checkbox_sync.setCheckState(QtCore.Qt.Checked)<line_sep>self._groupbox_settings=QtWidgets.QGroupBox("Settings")<line_sep>layout=QtWidgets.QVBoxLayout()<line_sep>layout.addWidget(self._checkbox_cursor)<line_sep>layout.addWidget(self._checkbox_verbose)<line_sep>layout.addWidget(self._checkbox_sync)<line_sep>self._groupbox_settings.setLayout(layout)<block_end><def_stmt>_ui_layout self<block_start>""" Layout the major UI elements of the widget. """<line_sep>layout=QtWidgets.QGridLayout()<line_sep># arrange the widgets in a 'grid' row col row span col span layout.addWidget(self._code_view.widget 0 0 0 1)<line_sep>layout.addWidget(self._maturity_list 0 1 1 1)<line_sep>layout.addWidget(self._groupbox_settings 1 1 1 1)<line_sep># apply the layout to the widget self.widget.setLayout(layout)<block_end><def_stmt>_ui_init_signals self<block_start>""" Connect UI signals. """<line_sep>self._maturity_list.currentItemChanged.connect(<lambda>x y:self.controller.select_maturity(x.text()))<line_sep>self._code_view.connect_signals(self.controller)<line_sep>self._code_view.OnClose=self.hide# HACK # checkboxes self._checkbox_cursor.stateChanged.connect(<lambda>x:self.controller.set_highlight_mutual(bool(x)))<line_sep>self._checkbox_verbose.stateChanged.connect(<lambda>x:self.controller.set_verbose(bool(x)))<line_sep>self._checkbox_sync.stateChanged.connect(<lambda>x:self._code_sync.enable_sync(bool(x)))<line_sep># model signals self.model.mtext_refreshed(self.refresh)<line_sep>self.model.maturity_changed(self.refresh)<block_end>#-------------------------------------------------------------------------- # Misc #-------------------------------------------------------------------------- <def_stmt>refresh self<block_start>""" Refresh the microcode explorer UI based on the model state. """<line_sep>self._maturity_list.setCurrentRow(self.model.active_maturity-1)<line_sep>self._code_view.refresh()<block_end><block_end><class_stmt>LayerListWidget(QtWidgets.QListWidget)<block_start>""" The microcode maturity list widget """<def_stmt>__init__ self<block_start>super(LayerListWidget self).__init__()<line_sep># populate the list widget with the microcode maturity levels self.addItems([get_mmat_name(x)<for>x get_mmat_levels()])<line_sep># select the first maturity level, by default self.setCurrentRow(0)<line_sep># make the list widget a fixed size, slightly wider than it needs to be width=self.sizeHintForColumn(0)<line_sep>self.setMaximumWidth(int(width+width<times>0.10))<block_end><def_stmt>wheelEvent self event<block_start>""" Handle mouse wheel scroll events. """<line_sep>y=event.angleDelta().y()<line_sep># scrolling down, clamp to last row <if_stmt>y<l>0<block_start>next_row=min(self.currentRow()+1 self.count()-1)<block_end># scrolling up, clamp to first row (0) <elif_stmt>y<g>0<block_start>next_row=max(self.currentRow()-1 0)<block_end># horizontal scroll ? nothing to do.. <else_stmt><block_start><return><block_end>self.setCurrentRow(next_row)<block_end><block_end><class_stmt>MicrocodeView(ida_kernwin.simplecustviewer_t)<block_start>""" An IDA-based text area that will render the Hex-Rays microcode. TODO: I'll probably rip this out in the future, as I'll have finer control over the interaction / implementation if I just roll my own microcode text widget. For that reason, excuse its hacky-ness / lack of comments. """<def_stmt>__init__ self model<block_start>super(MicrocodeView self).__init__()<line_sep>self.model=model<line_sep>self.Create()<block_end><def_stmt>connect_signals self controller<block_start>self.controller=controller<line_sep>self.OnCursorPosChanged=<lambda>:controller.select_position(*self.GetPos())<line_sep>self.OnDblClick=<lambda>_:controller.activate_position(*self.GetPos())<line_sep>self.model.position_changed(self.refresh_cursor)<block_end><def_stmt>refresh self<block_start>self.ClearLines()<for_stmt>line self.model.mtext.lines<block_start>self.AddLine(line.tagged_text)<block_end>self.refresh_cursor()<block_end><def_stmt>refresh_cursor self<block_start><if_stmt><not>self.model.current_position<block_start><return><block_end>self.Jump(*self.model.current_position)<block_end><def_stmt>Create self<block_start><if_stmt><not>super(MicrocodeView self).Create(<none>)<block_start><return><false><block_end>self._twidget=self.GetWidget()<line_sep>self.widget=ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)<line_sep><return><true><block_end><def_stmt>OnClose self<block_start><pass><block_end><def_stmt>OnCursorPosChanged self<block_start><pass><block_end><def_stmt>OnDblClick self shift<block_start><pass><block_end><def_stmt>OnPopup self form popup_handle<block_start>controller=self.controller<line_sep># # so, i'm pretty picky about my UI / interactions. IDA puts items in # the right click context menus of custom (code) viewers. # # these items aren't really relevant (imo) to the microcode viewer, # so I do some dirty stuff here to filter them out and ensure only # my items will appear in the context menu. # # there's only one right click context item right now, but in the # future i'm sure there will be more. # <class_stmt>FilterMenu(QtCore.QObject)<block_start><def_stmt>__init__ self qmenu<block_start>super(QtCore.QObject self).__init__()<line_sep>self.qmenu=qmenu<block_end><def_stmt>eventFilter self obj event<block_start><if_stmt>event.type()<ne>QtCore.QEvent.Polish<block_start><return><false><block_end><for_stmt>action self.qmenu.actions()<block_start><if_stmt>action.text()<in>["&Font..." "&Synchronize with"]# lol.. <block_start>qmenu.removeAction(action)<block_end><block_end>self.qmenu.removeEventFilter(self)<line_sep>self.qmenu=<none><line_sep><return><true><block_end><block_end>p_qmenu=ctypes.cast(int(popup_handle) ctypes.POINTER(ctypes.c_void_p))[0]<line_sep>qmenu=sip.wrapinstance(int(p_qmenu) QtWidgets.QMenu)<line_sep>self.filter=FilterMenu(qmenu)<line_sep>qmenu.installEventFilter(self.filter)<line_sep># only handle right clicks on lines containing micro instructions ins_token=self.model.mtext.get_ins_for_line(self.model.current_line)<if_stmt><not>ins_token<block_start><return><false><block_end><class_stmt>MyHandler(ida_kernwin.action_handler_t)<block_start><def_stmt>activate self ctx<block_start>controller.show_subtree(ins_token)<block_end><def_stmt>update self ctx<block_start><return>ida_kernwin.AST_ENABLE_ALWAYS<block_end><block_end># inject the 'View subtree' action into the right click context menu desc=ida_kernwin.action_desc_t(<none> 'View subtree' MyHandler())<line_sep>ida_kernwin.attach_dynamic_action_to_popup(form popup_handle desc <none>)<line_sep><return><true><block_end><block_end>#----------------------------------------------------------------------------- # Util #----------------------------------------------------------------------------- <class_stmt>ViewCursor(object)<block_start>""" TODO """<def_stmt>__init__ self line_num x y mapped=<true><block_start>self.line_num=line_num<line_sep>self.x=x<line_sep>self.y=y<line_sep>self.mapped=mapped<block_end>@property<def_stmt>text_position self<block_start><return>(self.line_num self.x)<block_end>@property<def_stmt>viewport_position self<block_start><return>(self.line_num self.x self.y)<block_end><block_end>
""" @license: MIT @repository: https://github.com/semontesdeoca/MNPR _ _ __ ___ _ __ _ __ _ __ ___ _ _ ___| |_ ___ _ __ ___ | '_ ` _ \| '_ \| '_ \| '__| / __| | | / __| __/ _ \ '_ ` _ \ | | | | | | | | | |_) | | \__ \ |_| \__ \ || __/ | | | | | |_| |_| |_|_| |_| .__/|_| |___/\__, |___/\__\___|_| |_| |_| |_| |___/ @summary: MNPR related functions """<import_from_future_stmt> print_function<import_stmt>os<import_stmt>traceback<import_stmt>maya.cmds<as>cmds<import_stmt>maya.mel<as>mel<import_stmt>coopLib<as>lib<import_stmt>mnpr_info<import_stmt>mnpr_runner<import_stmt>mnpr_matPresets<line_sep>mnpr_info.loadPlugin()<line_sep>dx2sfxAttr={"xUseColorTexture":"Albedo_Texture" "xColorTint":"Color_Tint" "xUseNormalTexture":"Normal_Map" "xFlipU":"Invert_U" "xFlipV":"Invert_V" "xBumpDepth":"Bump_Depth" "xUseSpecularTexture":"Specular_Map" "xSpecular":"Specular_Roll_Off" "xSpecDiffusion":"Specular_Diffusion" "xSpecTransparency":"Specular_Transparency" "xUseShadows":"" "xShadowDepthBias":"" "xDiffuseFactor":"Diffuse_Factor" "xShadeColor":"Shade_Color" "xShadeWrap":"Shade_Wrap" "xUseOverrideShade":"Shade_Override" "xDilute":"Dilute_Paint" "xCangiante":"Cangiante" "xDiluteArea":"Dilute_Area" "xHighArea":"Highlight_Roll_Off" "xHighTransparency":"Highlight_Transparency" "xAtmosphereColor":"" "xRangeStart":"" "xRangeEnd":"" "xDarkEdges":"" "xMainTex":"Albedo_Texture_File" "xNormalTex":"Normal_Map_File" "xSpecTex":"Specular_Map_File"}<def_stmt>check <block_start>"""Makes sure everything is running right"""<line_sep>print("SYSTEM CHECK FOR {0}".format(mnpr_info.prototype))<line_sep># check viewport viewport=lib.getActiveModelPanel()<line_sep>cmds.modelEditor(viewport dtx=<true> e=<true>)# display textures # plugin needs to be loaded mnpr_info.loadRenderer()<line_sep># 3rd party plugins must be loaded cmds.loadPlugin('shaderFXPlugin' quiet=<true>)<if_stmt>cmds.about(nt=<true> q=<true>)<block_start>cmds.loadPlugin('dx11Shader' quiet=<true>)# deprecated (only shadeFXPlugin in the future) <block_end>cmds.loadPlugin('glslShader' quiet=<true>)# deprecated (only shaderFXPlugin in the future) # viewport renderer must be set mel.eval("setRendererAndOverrideInModelPanel vp2Renderer {0} {1};".format(mnpr_info.prototype viewport))<line_sep># modify color of heads up display cmds.displayColor("headsUpDisplayLabels" 2 dormant=<true>)<line_sep>cmds.displayColor("headsUpDisplayValues" 2 dormant=<true>)<line_sep># make sure a config node exists <if_stmt><not>cmds.objExists(mnpr_info.configNode)<block_start>selected=cmds.ls(sl=<true> l=<true>)<line_sep>selectConfig()<line_sep>cmds.select(selected r=<true>)<block_end>lib.printInfo("-> SYSTEM CHECK SUCCESSFUL")<block_end><def_stmt>changeStyle <block_start>"""Resets MNPR to load a new style"""<line_sep># reset stylization cmds.mnpr(resetStylization=<true>)<line_sep># delete old config node <if_stmt>cmds.objExists(mnpr_info.configNode)<block_start>cmds.delete(mnpr_info.configNode)<block_end># flush undo cmds.flushUndo()<line_sep>print("style deleted")<line_sep># deregister node cmds.mnpr(rn=<false>)<line_sep># register node cmds.mnpr(rn=<true>)<line_sep># create new config node selectConfig()<line_sep># refresh AETemplate mnpr_runner.reloadConfig()<line_sep># set new media type mnpr_info.media=cmds.mnpr(style=<true> q=<true>)<line_sep># rebuild opened UI's <import_stmt>mnpr_UIs<if_stmt>cmds.window(mnpr_UIs.BreakdownUI.windowTitle exists=<true>)<block_start>mnpr_runner.openOverrideSettings(rebuild=<true>)<block_end><import_stmt>mnpr_FX<if_stmt>cmds.window(mnpr_FX.MNPR_FX_UI.windowTitle exists=<true>)<block_start>mnpr_runner.openPaintFX(rebuild=<true>)<block_end>lib.printInfo("Style changed")<block_end><def_stmt>togglePlugin force=""<block_start>""" Toggles active or forces desired plugin prototype Args: force (str): plugin name to force """<if_stmt>force<block_start>unloadPlugin(mnpr_info.prototype)<line_sep>mnpr_info.prototype=force<line_sep>check()<block_end><else_stmt># toggle loaded prototype <block_start><if_stmt>cmds.pluginInfo(mnpr_info.prototype loaded=<true> q=<true>)<block_start>unloadPlugin(mnpr_info.prototype)<block_end><else_stmt><block_start>check()<block_end><block_end><block_end><def_stmt>unloadPlugin plugin<block_start>""" Unloads plugin and cleans scene from plugin traces Args: plugin (str): name of plugin to be unloaded """<line_sep># check which prototype is active <if_stmt>cmds.pluginInfo(plugin loaded=<true> q=<true>)# remove traces and unload <block_start><if_stmt>cmds.objExists(mnpr_info.configNode)<block_start>cmds.delete(mnpr_info.configNode)# delete config node <block_end>cmds.flushUndo()# clear undo queue cmds.unloadPlugin(plugin)# unload plugin lib.printInfo("->PLUGIN SUCCESSFULLY UNLOADED")<block_end><block_end><def_stmt>showShaderAttr <block_start>""" Select material and show in attribute editor """<if_stmt>cmds.ls(sl=<true>)<block_start>cmds.hyperShade(smn=<true>)<line_sep>mel.eval("openAEWindow")<block_end><else_stmt><block_start>cmds.warning("Select object with shader")<block_end><block_end><def_stmt>refreshShaders <block_start>""" Refreshes object-space plugin shaders """<line_sep>shaderDir=systemDir("shaders")<if_stmt>os.name<eq>'nt'<and>mnpr_info.backend<eq>'dx11'<block_start>shaderFile=os.path.join(shaderDir "PrototypeC.fx")<if_stmt><not>os.path.isfile(shaderFile)<block_start>shaderFile=os.path.join(shaderDir "prototypeC.fxo")<block_end>shaders=cmds.ls(type="dx11Shader")<block_end><else_stmt><block_start>shaderFile=os.path.join(shaderDir "PrototypeC.ogsfx")<line_sep>shaders=cmds.ls(type="GLSLShader")<block_end><for_stmt>shader shaders<block_start>cmds.setAttr("{0}.shader".format(shader) shaderFile type="string")<block_end>lib.printInfo('Shaders refreshed')<line_sep><return><true><block_end><def_stmt>updateShaderFX <block_start>""" Updates shaderFX shaders"""<line_sep>shaderDir=systemDir("shaders")<line_sep>materials=cmds.ls(type="ShaderfxShader")<line_sep>counter=0<for_stmt>mat materials<block_start>counter<augadd>1<line_sep># get materials attributes matAttrs={}<line_sep>mnpr_matPresets.getMaterialAttrs(mat matAttrs)<line_sep># load new graph shaderFile=os.path.join(shaderDir "{0}.sfx".format(matAttrs["graph"]))<line_sep>cmds.shaderfx(sfxnode=mat loadGraph=shaderFile)<line_sep># set attributes mnpr_matPresets.setMaterialAttrs(mat matAttrs)<line_sep>print("{0} has been updated to the latest version".format(mat))<line_sep>print("{0}/{1} materials updated".format(counter len(materials)))<block_end>lib.printInfo('Shaders updated')<block_end><def_stmt>dx112glsl <block_start>""" Converts dx11 materials to glsl materials """<line_sep>check()<line_sep>dx11Shaders=cmds.ls(type="dx11Shader")<line_sep>print(dx11Shaders)<for_stmt>dx11Shader dx11Shaders<block_start>print("Transfering {0} shader".format(dx11Shader))<line_sep># get all attributes attributes=cmds.listAttr(dx11Shader ud=<true> st="x*" k=<true>)<line_sep>print(attributes)<line_sep># get all connected nodes connectedNodes=cmds.listConnections(dx11Shader t="file" c=<true> p=<true>)<line_sep>print(connectedNodes)<line_sep># get all shapes cmds.select(dx11Shader r=<true>)<line_sep>cmds.hyperShade(objects="")<line_sep>shapes=cmds.ls(sl=<true>)<line_sep>print(shapes)<line_sep># create glsl shader shader=cmds.shadingNode('GLSLShader' asShader=<true> n="{0}_GL".format(dx11Shader))<line_sep>cmds.select(shapes r=<true>)<line_sep>cmds.hyperShade(assign=shader)<line_sep>print(">>> Shader {0} created".format(shader))<line_sep># assign attributes shaderFile=os.path.join(mnpr_info.environment "shaders" "PrototypeC.ogsfx")<line_sep>cmds.setAttr("{0}.shader".format(shader) shaderFile type="string")<line_sep>print("Setting attributes for {0}".format(shader))<for_stmt>attr attributes<block_start>value=cmds.getAttr("{0}.{1}".format(dx11Shader attr))<try_stmt><block_start><if_stmt>type(value)<eq>type([])<block_start>cmds.setAttr("{0}.{1}".format(shader attr) value[0][0] value[0][1] value[0][2] typ="double3")<block_end><else_stmt><block_start>cmds.setAttr("{0}.{1}".format(shader attr) value)<block_end><block_end><except_stmt><block_start>print("Found problemt when setting {0}.{1}, skipping for now".format(shader attr))<block_end><block_end># connect nodes <if_stmt>connectedNodes<block_start><for_stmt>i range(0 len(connectedNodes) 2)<block_start>inputAttr=connectedNodes[i].split(".")[1]<line_sep>cmds.connectAttr(connectedNodes[i+1] "{0}.{1}".format(shader inputAttr))<block_end><block_end># set control sets <if_stmt>cmds.attributeQuery("Color0_Source" node=shader ex=<true>)<block_start>cmds.setAttr("{0}.Color0_Source".format(shader) "color:controlSetA" type="string")<block_end><if_stmt>cmds.attributeQuery("Color1_Source" node=shader ex=<true>)<block_start>cmds.setAttr("{0}.Color1_Source".format(shader) "color:controlSetB" type="string")<block_end><if_stmt>cmds.attributeQuery("Color2_Source" node=shader ex=<true>)<block_start>cmds.setAttr("{0}.Color2_Source".format(shader) "color:controlSetC" type="string")<block_end># delete dx11 shader #cmds.delete(dx11Shader) <block_end><block_end><def_stmt>dx112sfx graph="mnpr_uber"<block_start>""" Converts dx11 materials to shaderFX materials Args: graph (str): ShaderFX graph name (filename) """<line_sep>check()<line_sep>dx11Shaders=cmds.ls(type="dx11Shader")<line_sep>prototypeCNodes=[]<for_stmt>dx11Shader dx11Shaders<block_start>shaderPath=cmds.getAttr("{0}.shader".format(dx11Shader))<if_stmt>"rototypeC"<not><in>shaderPath<block_start><continue><block_end>prototypeCNodes.append(dx11Shader)<line_sep>print("Converting {0} shader".format(dx11Shader))<line_sep># get all attributes attributes=cmds.listAttr(dx11Shader ud=<true> st="x*" k=<true>)<line_sep>print(attributes)<line_sep># get all connected nodes connectedNodes=cmds.listConnections(dx11Shader t="file" c=<true>)<line_sep>print(connectedNodes)<line_sep># get all shapes cmds.select(dx11Shader r=<true>)<line_sep>cmds.hyperShade(objects="")<line_sep>shapes=cmds.ls(sl=<true>)<line_sep>print(shapes)<line_sep># create shaderFX shader shader=cmds.shadingNode('ShaderfxShader' asShader=<true> name="{0}".format(dx11Shader.replace("_WC" "_SFX")))<line_sep>cmds.select(shapes r=<true>)<line_sep>cmds.hyperShade(assign=shader)<line_sep>shaderFile=os.path.join(mnpr_info.environment "shaders" "{0}.sfx".format(graph))<line_sep>cmds.shaderfx(sfxnode=shader loadGraph=shaderFile)<line_sep>print(">>> Shader {0} created".format(shader))<line_sep># assign settings vtxControl=bool(cmds.getAttr("{0}.{1}".format(dx11Shader "xUseControl")))<if_stmt>vtxControl<block_start>nodeId=cmds.shaderfx(sfxnode=shader getNodeIDByName="vtxControls")<line_sep>cmds.shaderfx(sfxnode=shader edit_bool=(nodeId "value" vtxControl))<block_end>shadows=bool(cmds.getAttr("{0}.{1}".format(dx11Shader "xUseShadows")))<if_stmt><not>shadows<block_start>nodeId=cmds.shaderfx(sfxnode=shader getNodeIDByName="Shadow")<line_sep>cmds.shaderfx(sfxnode=shader edit_bool=(nodeId "value" shadows))<block_end>specularity=bool(cmds.getAttr("{0}.{1}".format(dx11Shader "xSpecular")))<if_stmt>specularity<block_start>nodeId=cmds.shaderfx(sfxnode=shader getNodeIDByName="Specularity")<line_sep>cmds.shaderfx(sfxnode=shader edit_bool=(nodeId "value" specularity))<block_end># assign attributes print("Setting attributes for {0}".format(shader))<for_stmt>attr attributes<block_start>value=cmds.getAttr("{0}.{1}".format(dx11Shader attr))<if_stmt>attr<in>dx2sfxAttr<block_start>lib.setAttr(shader dx2sfxAttr[attr] value)<block_end><block_end># assign textures <if_stmt>connectedNodes<block_start><for_stmt>i range(0 len(connectedNodes) 2)<block_start>textureDir=cmds.getAttr("{0}.{1}".format(connectedNodes[i+1] "fileTextureName"))<line_sep>attr=connectedNodes[i].split(".")[1]<line_sep>lib.setAttr(shader dx2sfxAttr[attr] textureDir)<block_end><block_end><block_end># delete prototypeC shaders cmds.delete(prototypeCNodes)<block_end><def_stmt>systemDir folder=''<block_start>""" Returns the system directory Args: folder (str): folder to append to system directory Returns: (str): path to system directory """<line_sep>rootDir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))<line_sep><return>os.path.join(rootDir folder)<block_end><def_stmt>selectConfig <block_start>"""Select configuration node and re-check connections"""<line_sep># delete old configuration nodes <if_stmt>cmds.objExists("NPRConfig")<block_start>cmds.delete("NPRConfig")<block_end><if_stmt><not>cmds.objExists(mnpr_info.configNode)<block_start>print(mnpr_info.configNode)<line_sep>cmds.createNode("mnprConfig" n=mnpr_info.configNode)<line_sep>cmds.connectAttr("{0}.evaluate".format(mnpr_info.configNode) "persp.visibility" f=<true>)<line_sep>mel.eval("AttributeEditor")<line_sep>lib.printInfo("-> CONFIG NODE CREATED AND CONNECTED")<block_end><else_stmt><block_start>cmds.select(mnpr_info.configNode)<line_sep>mel.eval("AttributeEditor")<line_sep>lib.printInfo("Selected {0} configuration node".format(mnpr_info.prototype))<block_end><block_end><def_stmt>optimizePerformance <block_start>"""Function to optimize performance by disabling some Maya functions"""<line_sep>cmds.evaluationManager(mode="off")<block_end># set up animation evaluation to DG <def_stmt>renderFrame saveDir width height renderSize=1 imgFormat=".jpg" override=mnpr_info.prototype<block_start>""" Renders current frame in the viewport Args: saveDir (str): save directory width (int): width in pixels height (int): height in pixels renderSize (float): render size (factor) imgFormat (str): .jpg, .exr, etc) override (str): name of desired override (if any) """<line_sep>check()# check that everything is in order renderSize=resolutionCheck(width height renderSize)# make sure resolution is reasonable # get working values to be changed workingRenderSize=cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))<line_sep>workingColorDepth=cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))<line_sep># set desired attributes <if_stmt>workingColorDepth<ne>2<block_start>lib.setAttr(mnpr_info.configNode "colorDepth" 2)<block_end><if_stmt>renderSize<ne>workingRenderSize<block_start>lib.setAttr(mnpr_info.configNode "renderScale" renderSize)<block_end># prepare renderer cmds.mnpr(g=<true>)# enable mnprGamma mnprOperations=len(cmds.mnpr(lsO=<true>))<line_sep>cmds.mnpr(renderOperation=mnprOperations-1 s=0)# HUD cmds.mnpr(renderOperation=mnprOperations-2 s=0)# UI cmds.refresh()<line_sep># render frame <try_stmt><block_start>screenshotPath=lib.screenshot(saveDir width height format=imgFormat override=override)# render the frame <block_end><except_stmt>WindowsError<block_start>print("Screenshot saving has been canceled")<block_end><except_stmt><block_start>traceback.print_exc()<block_end><if_stmt>screenshotPath# bring everything back to normal <block_start>cmds.mnpr(renderOperation=mnprOperations-1 s=1)# HUD cmds.mnpr(renderOperation=mnprOperations-2 s=1)# UI lib.setAttr(mnpr_info.configNode "renderScale" workingRenderSize)<line_sep>lib.setAttr(mnpr_info.configNode "colorDepth" workingColorDepth)<line_sep>cmds.mnpr(g=<false>)<line_sep>cmds.refresh()<line_sep><return>screenshotPath<block_end><block_end><def_stmt>playblast saveDir width height renderCamera modelPanel renderSize=1<block_start>""" Playblasts the timeslider Args: saveDir (str): save directory with *.mov extension width (int): width in pixels height: height in pixels renderCamera: camera to playblast from modelPanel: modelPanel to playblast from renderSize: render size (factor) """<line_sep>check()# check that everything is in order renderSize=resolutionCheck(width height renderSize)# make sure resolution is reasonable aPlayBackSliderPython=mel.eval('$tmpVar=$gPlayBackSlider')<line_sep>audioNode=cmds.timeControl(aPlayBackSliderPython q=<true> s=<true>)# get audio node # get working values to be changed workingRenderSize=cmds.getAttr("{0}.renderScale".format(mnpr_info.configNode))<line_sep>workingColorDepth=cmds.getAttr("{0}.colorDepth".format(mnpr_info.configNode))<line_sep>workingCamera=cmds.modelEditor(modelPanel cam=<true> q=<true>)<line_sep>workingCameraShape=cmds.listRelatives(workingCamera s=<true>)<if_stmt>workingCameraShape<block_start>workingCameraShape=workingCameraShape[0]<block_end><else_stmt># we already have the shape <block_start>workingCameraShape=workingCamera<block_end># set desired attributes cmds.mnpr(g=<true>)<line_sep>mnprOperations=len(cmds.mnpr(lsO=<true>))<line_sep>cmds.mnpr(renderOperation=mnprOperations-1 s=0)# HUD cmds.mnpr(renderOperation=mnprOperations-2 s=0)# UI cmds.modelEditor(modelPanel cam=renderCamera e=<true>)# change modelPanel lib.setAttr(mnpr_info.configNode "renderScale" renderSize)<line_sep>lib.setAttr(mnpr_info.configNode "colorDepth" 2)# needs to be 32bit to avoid artefacts cmds.refresh()<line_sep># try playblasting <try_stmt><block_start>cmds.playblast(f=saveDir format="qt" w=width h=height percent=100 qlt=100 v=<true> fo=<true> os=<true> s=audioNode compression="PNG")<block_end><except_stmt>RuntimeError<block_start><try_stmt><block_start>cmds.playblast(f=saveDir format="avi" w=width h=height percent=100 qlt=100 v=<true> fo=<true> os=<true> s=audioNode)<block_end><except_stmt>RuntimeError<block_start>cmds.error("Video cannot be playblasted as qt or avi, please check the installed codecs.")<block_end><block_end># bring everything back to normal cmds.mnpr(renderOperation=mnprOperations-1 s=1)# HUD cmds.mnpr(renderOperation=mnprOperations-2 s=1)# UI cmds.modelEditor(modelPanel cam=workingCameraShape e=<true>)<line_sep>lib.setAttr(mnpr_info.configNode "renderScale" workingRenderSize)<line_sep>lib.setAttr(mnpr_info.configNode "colorDepth" workingColorDepth)<line_sep>cmds.mnpr(g=<false>)<line_sep>cmds.refresh()<line_sep>lib.printInfo("Video has been successfully playblasted to: {0}".format(saveDir))<block_end><def_stmt>resolutionCheck width height renderSize=1.0<block_start>""" Checks if resolution is between reasonable hardware limitations Args: width (int): viewport width height (int): viewport height renderSize (float): render size (factor) Returns: renderSize (int): viable render size (factor) """<if_stmt>(width<times>renderSize<g>16384)<or>(height<times>renderSize<g>16384)<block_start>cmds.warning("Resolution too high to supersample, reducing render size")<line_sep><return>resolutionCheck(width height renderSize/2.0)<block_end><else_stmt><block_start><if_stmt>(width<times>height<times>pow(renderSize 2))<g>150000000<block_start>confirm=cmds.confirmDialog(title='Crash Warning' message='Rendering a frame at such high resolutions might take long and even crash Maya\nWould you like to continue anyway?' icn="warning" button=['Yes' 'No'] defaultButton='Yes' cancelButton='No' dismissString='No' ma='center')<if_stmt>confirm<eq>'No'<block_start>cmds.error("Frame capture cancelled by user")<block_end><block_end><block_end><return>renderSize<block_end><def_stmt>updateAE <block_start>mel.eval("refreshEditorTemplates;")<line_sep><return><true><block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>. knowledge_graph<as>knwlgrh<def_stmt>load_data dataset bfs_level=3 relabel=<false><block_start><if_stmt>dataset<in>['aifb' 'mutag' 'bgs' 'am']<block_start><return>knwlgrh.load_entity(dataset bfs_level relabel)<block_end><elif_stmt>dataset<in>['FB15k' 'wn18' 'FB15k-237']<block_start><return>knwlgrh.load_link(dataset)<block_end><else_stmt><block_start><raise>ValueError('Unknown dataset: {}'.format(dataset))<block_end><block_end>
<import_stmt>pytest<import_from_stmt>rdkit Chem<import_from_stmt>aizynthfinder.chem MoleculeException Molecule<def_stmt>test_no_input <block_start><with_stmt>pytest.raises(MoleculeException)<block_start>Molecule()<block_end><block_end><def_stmt>test_create_with_mol <block_start>rd_mol=Chem.MolFromSmiles("O")<line_sep>mol=Molecule(rd_mol=rd_mol)<assert_stmt>mol.smiles<eq>"O"<block_end><def_stmt>test_create_with_smiles <block_start>mol=Molecule(smiles="O")<assert_stmt>Chem.MolToSmiles(mol.rd_mol)<eq>"O"<block_end><def_stmt>test_inchi <block_start>mol=Molecule(smiles="O")<assert_stmt>mol.inchi<eq>"InChI=1S/H2O/h1H2"<block_end><def_stmt>test_inchi_key <block_start>mol=Molecule(smiles="O")<assert_stmt>mol.inchi_key<eq>"<KEY>"<block_end><def_stmt>test_fingerprint <block_start>mol=Molecule(smiles="O")<assert_stmt>sum(mol.fingerprint(2))<eq>1<assert_stmt>sum(mol.fingerprint(2 10))<eq>1<block_end><def_stmt>test_sanitize <block_start>mol=Molecule(smiles="O" sanitize=<true>)<assert_stmt>Chem.MolToSmiles(mol.rd_mol)<eq>"O"<line_sep>mol=Molecule(smiles="c1ccccc1(C)(C)")<with_stmt>pytest.raises(MoleculeException)<block_start>mol.sanitize()<block_end>mol.sanitize(raise_exception=<false>)<assert_stmt>mol.smiles<eq>"CC1(C)CCCCC1"<block_end><def_stmt>test_equality <block_start>mol1=Molecule(smiles="CCCCO")<line_sep>mol2=Molecule(smiles="OCCCC")<assert_stmt>mol1<eq>mol2<block_end><def_stmt>test_basic_equality <block_start>mol1=Molecule(smiles="CC[C@@H](C)O")# R-2-butanol mol2=Molecule(smiles="CC[C@H](C)O")# S-2-butanol <assert_stmt>mol1<ne>mol2<assert_stmt>mol1.basic_compare(mol2)<block_end><def_stmt>test_has_atom_mapping <block_start>mol1=Molecule(smiles="CCCCO")<line_sep>mol2=Molecule(smiles="C[C:5]CCO")<assert_stmt><not>mol1.has_atom_mapping()<assert_stmt>mol2.has_atom_mapping()<block_end><def_stmt>test_remove_atom_mapping <block_start>mol=Molecule(smiles="C[C:5]CCO")<assert_stmt>mol.has_atom_mapping()<line_sep>mol.remove_atom_mapping()<assert_stmt><not>mol.has_atom_mapping()<block_end>
<import_stmt>logging<import_stmt>os<import_from_stmt>scapy.all IP TCP<import_stmt>actions.tree<import_stmt>actions.drop<import_stmt>actions.tamper<import_stmt>actions.duplicate<import_stmt>actions.utils<import_stmt>layers.packet<def_stmt>test_init <block_start>""" Tests initialization """<line_sep>print(actions.action.Action.get_actions("out"))<block_end><def_stmt>test_count_leaves <block_start>""" Tests leaf count is correct. """<line_sep>a=actions.tree.ActionTree("out")<line_sep>logger=logging.getLogger("test")<assert_stmt><not>a.parse("TCP:reserved:0tamper{TCP:flags:replace:S}-|" logger) "Tree parsed malformed DNA"<line_sep>a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|" logger)<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<assert_stmt>a.count_leaves()<eq>1<assert_stmt>a.remove_one()<line_sep>a.add_action(duplicate)<assert_stmt>a.count_leaves()<eq>1<line_sep>duplicate.left=duplicate2<assert_stmt>a.count_leaves()<eq>1<line_sep>duplicate.right=drop<assert_stmt>a.count_leaves()<eq>2<block_end><def_stmt>test_check <block_start>""" Tests action tree check function. """<line_sep>a=actions.tree.ActionTree("out")<line_sep>logger=logging.getLogger("test")<line_sep>a.parse("[TCP:flags:RA]-tamper{TCP:flags:replace:S}-|" logger)<line_sep>p=layers.packet.Packet(IP()/TCP(flags="A"))<assert_stmt><not>a.check(p logger)<line_sep>p=layers.packet.Packet(IP(ttl=64)/TCP(flags="RA"))<assert_stmt>a.check(p logger)<assert_stmt>a.remove_one()<assert_stmt>a.check(p logger)<line_sep>a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|" logger)<assert_stmt>a.check(p logger)<line_sep>a.parse("[IP:ttl:64]-tamper{TCP:flags:replace:S}-|" logger)<assert_stmt>a.check(p logger)<line_sep>p=layers.packet.Packet(IP(ttl=15)/TCP(flags="RA"))<assert_stmt><not>a.check(p logger)<block_end><def_stmt>test_scapy <block_start>""" Tests misc. scapy aspects relevant to strategies. """<line_sep>a=actions.tree.ActionTree("out")<line_sep>logger=logging.getLogger("test")<line_sep>a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|" logger)<line_sep>p=layers.packet.Packet(IP()/TCP(flags="A"))<assert_stmt>a.check(p logger)<line_sep>packets=a.run(p logger)<assert_stmt>packets[0][TCP].flags<eq>"S"<line_sep>p=layers.packet.Packet(IP()/TCP(flags="A"))<assert_stmt>a.check(p logger)<line_sep>a.parse("[TCP:reserved:0]-tamper{TCP:chksum:corrupt}-|" logger)<line_sep>packets=a.run(p logger)<assert_stmt>packets[0][TCP].chksum<assert_stmt>a.check(p logger)<block_end><def_stmt>test_str <block_start>""" Tests string representation. """<line_sep>logger=logging.getLogger("test")<line_sep>t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<assert_stmt>str(a).strip()<eq>"[%s]-|"%str(t)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<assert_stmt>a.add_action(tamper)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"<line_sep># Tree will not add a duplicate action <assert_stmt><not>a.add_action(tamper)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"<assert_stmt>a.add_action(tamper2)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|"<assert_stmt>a.add_action(actions.duplicate.DuplicateAction())<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"<line_sep>drop=actions.drop.DropAction()<assert_stmt>a.add_action(drop)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(drop,),),)-|"<or>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(,drop),),)-|"<assert_stmt>a.remove_action(drop)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"<line_sep># Cannot remove action that is not present <assert_stmt><not>a.remove_action(drop)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>orig="[TCP:urgptr:15963]-duplicate(,drop)-|"<line_sep>a.parse(orig logger)<assert_stmt>a.remove_one()<assert_stmt>orig<ne>str(a)<assert_stmt>str(a)<in>["[TCP:urgptr:15963]-drop-|" "[TCP:urgptr:15963]-duplicate-|"]<block_end><def_stmt>test_pretty_print_send <block_start>t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>a.add_action(duplicate)<line_sep>correct_string="TCP:flags:0\nduplicate\n├── ===> \n└── ===> "<assert_stmt>a.pretty_print()<eq>correct_string<block_end><def_stmt>test_pretty_print logger<block_start>""" Print complex tree, although difficult to test """<line_sep>t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>duplicate3=actions.duplicate.DuplicateAction()<line_sep>duplicate4=actions.duplicate.DuplicateAction()<line_sep>duplicate5=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<line_sep>drop2=actions.drop.DropAction()<line_sep>drop3=actions.drop.DropAction()<line_sep>drop4=actions.drop.DropAction()<line_sep>duplicate.left=duplicate2<line_sep>duplicate.right=duplicate3<line_sep>duplicate2.left=tamper<line_sep>duplicate2.right=drop<line_sep>duplicate3.left=duplicate4<line_sep>duplicate3.right=drop2<line_sep>duplicate4.left=duplicate5<line_sep>duplicate4.right=drop3<line_sep>duplicate5.left=drop4<line_sep>duplicate5.right=tamper2<line_sep>a.add_action(duplicate)<line_sep>correct_string="TCP:flags:0\nduplicate\n├── duplicate\n│ ├── tamper{TCP:flags:replace:S}\n│ │ └── ===> \n│ └── drop\n└── duplicate\n ├── duplicate\n │ ├── duplicate\n │ │ ├── drop\n │ │ └── tamper{TCP:flags:replace:R}\n │ │ └── ===> \n │ └── drop\n └── drop"<assert_stmt>a.pretty_print()<eq>correct_string<assert_stmt>a.pretty_print(visual=<true>)<assert_stmt>os.path.exists("tree.png")<line_sep>os.remove("tree.png")<line_sep>a.parse("[TCP:flags:0]-|" logger)<line_sep>a.pretty_print(visual=<true>)# Empty action tree <assert_stmt><not>os.path.exists("tree.png")<block_end><def_stmt>test_pretty_print_order <block_start>""" Tests the left/right ordering by reading in a new tree """<line_sep>logger=logging.getLogger("test")<line_sep>a=actions.tree.ActionTree("out")<assert_stmt>a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14239},),))-|" logger)<line_sep>correct_pretty_print="TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:replace:14239}\n│ └── ===> \n└── duplicate\n ├── tamper{TCP:flags:replace:S}\n │ └── tamper{TCP:chksum:replace:14239}\n │ └── ===> \n └── ===> "<assert_stmt>a.pretty_print()<eq>correct_pretty_print<block_end><def_stmt>test_parse <block_start>""" Tests string parsing. """<line_sep>logger=logging.getLogger("test")<line_sep>t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>base_t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>base_a=actions.tree.ActionTree("out" trigger=base_t)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>tamper3=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper4=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>a.parse("[TCP:flags:0]-|" logger)<assert_stmt>str(a)<eq>str(base_a)<assert_stmt>len(a)<eq>0<line_sep>base_a.add_action(tamper)<assert_stmt>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}-|" logger)<assert_stmt>str(a)<eq>str(base_a)<assert_stmt>len(a)<eq>1<assert_stmt>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|" logging.getLogger("test"))<line_sep>base_a.add_action(tamper2)<assert_stmt>str(a)<eq>str(base_a)<assert_stmt>len(a)<eq>2<line_sep>base_a.add_action(tamper3)<line_sep>base_a.add_action(tamper4)<assert_stmt>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},),),)-|" logging.getLogger("test"))<assert_stmt>str(a)<eq>str(base_a)<assert_stmt>len(a)<eq>4<line_sep>base_t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>base_a=actions.tree.ActionTree("out" trigger=base_t)<line_sep>duplicate=actions.duplicate.DuplicateAction()<assert_stmt>a.parse("[TCP:flags:0]-duplicate-|" logger)<line_sep>base_a.add_action(duplicate)<assert_stmt>str(a)<eq>str(base_a)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>tamper3=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="A")<line_sep>tamper4=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>duplicate.left=tamper<assert_stmt>a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},)-|" logger)<assert_stmt>str(a)<eq>str(base_a)<line_sep>duplicate.right=tamper2<assert_stmt>a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-|" logger)<assert_stmt>str(a)<eq>str(base_a)<line_sep>tamper2.left=tamper3<assert_stmt>a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:A},))-|" logger)<assert_stmt>str(a)<eq>str(base_a)<line_sep>strategy=actions.utils.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-| \/" logger)<assert_stmt>strategy<assert_stmt>len(strategy.out_actions[0])<eq>3<assert_stmt>len(strategy.in_actions)<eq>0<assert_stmt><not>a.parse("[]" logger)# No valid trigger <assert_stmt><not>a.parse("[TCP:flags:0]-" logger)# No valid ending "|" <assert_stmt><not>a.parse("[TCP:]-|" logger)# invalid trigger <assert_stmt><not>a.parse("[TCP:flags:0]-foo-|" logger)# Non-existent action <assert_stmt><not>a.parse("[TCP:flags:0]--|" logger)# Empty action <assert_stmt><not>a.parse("[TCP:flags:0]-duplicate(,,,)-|" logger)# Bad tree <assert_stmt><not>a.parse("[TCP:flags:0]-duplicate()))-|" logger)# Bad tree <assert_stmt><not>a.parse("[TCP:flags:0]-duplicate(((()-|" logger)# Bad tree <assert_stmt><not>a.parse("[TCP:flags:0]-duplicate(,))))-|" logger)# Bad tree <assert_stmt><not>a.parse("[TCP:flags:0]-drop(duplicate,)-|" logger)# Terminal action with children <assert_stmt><not>a.parse("[TCP:flags:0]-drop(duplicate,duplicate)-|" logger)# Terminal action with children <assert_stmt><not>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(,duplicate)-|" logger)# Non-branching action with right child <assert_stmt><not>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,duplicate)-|" logger)<block_end># Non-branching action with children <def_stmt>test_tree <block_start>""" Tests basic tree functionality. """<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction()<line_sep>tamper2=actions.tamper.TamperAction()<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>a.add_action(<none>)<line_sep>a.add_action(tamper)<assert_stmt>a.get_slots()<eq>1<line_sep>a.add_action(tamper2)<assert_stmt>a.get_slots()<eq>1<line_sep>a.add_action(duplicate)<assert_stmt>a.get_slots()<eq>2<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>drop=actions.drop.DropAction()<line_sep>a.add_action(drop)<assert_stmt>a.get_slots()<eq>0<line_sep>add_success=a.add_action(tamper)<assert_stmt><not>add_success<assert_stmt>a.get_slots()<eq>0<line_sep>rep=""<for_stmt>s a.string_repr(a.action_root)<block_start>rep<augadd>s<block_end><assert_stmt>rep<eq>"drop"<line_sep>print(str(a))<assert_stmt>a.parse("[TCP:flags:A]-duplicate(tamper{TCP:seq:corrupt},)-|" logging.getLogger("test"))<for_stmt>act a<block_start>print(str(a))<block_end><assert_stmt>len(a)<eq>2<assert_stmt>a.get_slots()<eq>2<for_stmt>_ range(100)<block_start><assert_stmt>str(a.get_rand_action("out" request="DropAction"))<eq>"drop"<block_end><block_end><def_stmt>test_remove <block_start>""" Tests remove """<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction()<line_sep>tamper2=actions.tamper.TamperAction()<line_sep>tamper3=actions.tamper.TamperAction()<assert_stmt><not>a.remove_action(tamper)<line_sep>a.add_action(tamper)<assert_stmt>a.remove_action(tamper)<line_sep>a.add_action(tamper)<line_sep>a.add_action(tamper2)<line_sep>a.add_action(tamper3)<assert_stmt>a.remove_action(tamper2)<assert_stmt>tamper2<not><in>a<assert_stmt>tamper.left<eq>tamper3<assert_stmt><not>tamper.right<assert_stmt>len(a)<eq>2<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>tamper=actions.tamper.TamperAction()<line_sep>tamper2=actions.tamper.TamperAction()<line_sep>tamper3=actions.tamper.TamperAction()<line_sep>a.add_action(tamper)<assert_stmt>a.action_root<eq>tamper<line_sep>duplicate.left=tamper2<line_sep>duplicate.right=tamper3<line_sep>a.add_action(duplicate)<assert_stmt>len(a)<eq>4<assert_stmt>a.remove_action(duplicate)<assert_stmt>duplicate<not><in>a<assert_stmt>tamper.left<eq>tamper2<assert_stmt><not>tamper.right<assert_stmt>len(a)<eq>2<line_sep>a.parse("[TCP:flags:A]-|" logging.getLogger("test"))<assert_stmt><not>a.remove_one() "Cannot remove one with no action root"<block_end><def_stmt>test_len <block_start>""" Tests length calculation. """<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction()<line_sep>tamper2=actions.tamper.TamperAction()<assert_stmt>len(a)<eq>0 "__len__ returned wrong length"<line_sep>a.add_action(tamper)<assert_stmt>len(a)<eq>1 "__len__ returned wrong length"<line_sep>a.add_action(tamper)<assert_stmt>len(a)<eq>1 "__len__ returned wrong length"<line_sep>a.add_action(tamper2)<assert_stmt>len(a)<eq>2 "__len__ returned wrong length"<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>a.add_action(duplicate)<assert_stmt>len(a)<eq>3 "__len__ returned wrong length"<block_end><def_stmt>test_contains <block_start>""" Tests contains method """<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction()<line_sep>tamper2=actions.tamper.TamperAction()<line_sep>tamper3=actions.tamper.TamperAction()<assert_stmt><not>a.contains(tamper) "contains incorrect behavior"<assert_stmt><not>a.contains(tamper2) "contains incorrect behavior"<line_sep>a.add_action(tamper)<assert_stmt>a.contains(tamper) "contains incorrect behavior"<assert_stmt><not>a.contains(tamper2) "contains incorrect behavior"<line_sep>add_success=a.add_action(tamper)<assert_stmt><not>add_success "added duplicate action"<assert_stmt>a.contains(tamper) "contains incorrect behavior"<assert_stmt><not>a.contains(tamper2) "contains incorrect behavior"<line_sep>a.add_action(tamper2)<assert_stmt>a.contains(tamper) "contains incorrect behavior"<assert_stmt>a.contains(tamper2) "contains incorrect behavior"<line_sep>a.remove_action(tamper2)<assert_stmt>a.contains(tamper) "contains incorrect behavior"<assert_stmt><not>a.contains(tamper2) "contains incorrect behavior"<line_sep>a.add_action(tamper2)<assert_stmt>a.contains(tamper) "contains incorrect behavior"<assert_stmt>a.contains(tamper2) "contains incorrect behavior"<line_sep>remove_success=a.remove_action(tamper)<assert_stmt>remove_success<assert_stmt><not>a.contains(tamper) "contains incorrect behavior"<assert_stmt>a.contains(tamper2) "contains incorrect behavior"<line_sep>a.add_action(tamper3)<assert_stmt>a.contains(tamper3) "contains incorrect behavior"<assert_stmt>len(a)<eq>2 "len incorrect return"<line_sep>remove_success=a.remove_action(tamper2)<assert_stmt>remove_success<block_end><def_stmt>test_iter <block_start>""" Tests iterator. """<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<assert_stmt>a.add_action(tamper)<assert_stmt>a.add_action(tamper2)<assert_stmt><not>a.add_action(tamper)<for_stmt>node a<block_start>print(node)<block_end><block_end><def_stmt>test_run <block_start>""" Tests running packets through the chain. """<line_sep>logger=logging.getLogger("test")<line_sep>t=actions.trigger.Trigger(<none> <none> <none>)<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<line_sep>packet=layers.packet.Packet(IP()/TCP())<line_sep>a.add_action(tamper)<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>1<assert_stmt><none><not><in>packets<assert_stmt>packets[0].get("TCP" "flags")<eq>"S"<line_sep>a.add_action(tamper2)<line_sep>print(str(a))<line_sep>packet=layers.packet.Packet(IP()/TCP())<assert_stmt><not>a.add_action(tamper) "tree added duplicate action"<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>1<assert_stmt><none><not><in>packets<assert_stmt>packets[0].get("TCP" "flags")<eq>"R"<line_sep>print(str(a))<line_sep>a.remove_action(tamper2)<line_sep>a.remove_action(tamper)<line_sep>a.add_action(duplicate)<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="RA"))<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>2<assert_stmt><none><not><in>packets<assert_stmt>packets[0][TCP].flags<eq>"RA"<assert_stmt>packets[1][TCP].flags<eq>"RA"<line_sep>print(str(a))<line_sep>duplicate.left=tamper<line_sep>duplicate.right=tamper2<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="RA"))<line_sep>print("ABUT TO RUN")<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>2<assert_stmt><none><not><in>packets<line_sep>print(str(a))<line_sep>print(str(packets[0]))<line_sep>print(str(packets[1]))<assert_stmt>packets[0][TCP].flags<eq>"S"<assert_stmt>packets[1][TCP].flags<eq>"R"<line_sep>print(str(a))<line_sep>tamper.left=duplicate2<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="RA"))<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>3<assert_stmt><none><not><in>packets<assert_stmt>packets[0][TCP].flags<eq>"S"<assert_stmt>packets[1][TCP].flags<eq>"S"<assert_stmt>packets[2][TCP].flags<eq>"R"<line_sep>print(str(a))<line_sep>tamper2.left=drop<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="RA"))<line_sep>packets=a.run(packet logging.getLogger("test"))<assert_stmt>len(packets)<eq>2<assert_stmt><none><not><in>packets<assert_stmt>packets[0][TCP].flags<eq>"S"<assert_stmt>packets[1][TCP].flags<eq>"S"<line_sep>print(str(a))<assert_stmt>a.remove_action(duplicate2)<line_sep>tamper.left=actions.drop.DropAction()<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="RA"))<line_sep>packets=a.run(packet logger)<assert_stmt>len(packets)<eq>0<line_sep>print(str(a))<line_sep>a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S},))-|" logger)<line_sep>packet=layers.packet.Packet(IP()/TCP(flags="A"))<assert_stmt>a.check(packet logger)<line_sep>packets=a.run(packet logger)<assert_stmt>len(packets)<eq>3<assert_stmt>packets[0][TCP].flags<eq>"R"<assert_stmt>packets[1][TCP].flags<eq>"S"<assert_stmt>packets[2][TCP].flags<eq>"A"<block_end><def_stmt>test_index <block_start>""" Tests index """<line_sep>a=actions.tree.ActionTree("out")<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>tamper3=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="F")<assert_stmt>a.add_action(tamper)<assert_stmt>a[0]<eq>tamper<assert_stmt><not>a[1]<assert_stmt>a.add_action(tamper2)<assert_stmt>a[0]<eq>tamper<assert_stmt>a[1]<eq>tamper2<assert_stmt>a[-1]<eq>tamper2<assert_stmt><not>a[10]<assert_stmt>a.add_action(tamper3)<assert_stmt>a[-1]<eq>tamper3<assert_stmt><not>a[-11]<block_end><def_stmt>test_mate <block_start>""" Tests mate primitive """<line_sep>logger=logging.getLogger("test")<line_sep>t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<assert_stmt><not>a.choose_one()<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<line_sep>other_a=actions.tree.ActionTree("out" trigger=t)<assert_stmt><not>a.mate(other_a) "Can't mate empty trees"<assert_stmt>a.add_action(tamper)<assert_stmt>other_a.add_action(tamper2)<assert_stmt>a.choose_one()<eq>tamper<assert_stmt>other_a.choose_one()<eq>tamper2<assert_stmt>a.get_parent(tamper)<eq>(<none> <none>)<assert_stmt>other_a.get_parent(tamper2)<eq>(<none> <none>)<assert_stmt>a.add_action(duplicate)<assert_stmt>a.get_parent(duplicate)<eq>(tamper "left")<line_sep>duplicate.right=drop<assert_stmt>a.get_parent(drop)<eq>(duplicate "right")<assert_stmt>other_a.add_action(duplicate2)<line_sep># Test mating a full tree with a full tree <assert_stmt>str(a)<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|"<assert_stmt>str(other_a)<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate,)-|"<assert_stmt>a.swap(duplicate other_a duplicate2)<assert_stmt>str(a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"<assert_stmt>str(other_a).strip()<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate(,drop),)-|"<assert_stmt>len(a)<eq>2<assert_stmt>len(other_a)<eq>3<assert_stmt>duplicate2<not><in>other_a<assert_stmt>duplicate<not><in>a<assert_stmt>tamper.left<eq>duplicate2<assert_stmt>tamper2.left<eq>duplicate<assert_stmt>other_a.get_parent(duplicate)<eq>(tamper2 "left")<assert_stmt>a.get_parent(duplicate2)<eq>(tamper "left")<assert_stmt>other_a.get_parent(drop)<eq>(duplicate "right")<assert_stmt>a.get_parent(<none>)<eq>(<none> <none>)<line_sep># Test mating two trees with just root nodes t=actions.trigger.Trigger("field" "flags" "TCP")<line_sep>a=actions.tree.ActionTree("out" trigger=t)<assert_stmt><not>a.choose_one()<line_sep>tamper=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="S")<line_sep>tamper2=actions.tamper.TamperAction(field="flags" tamper_type="replace" tamper_value="R")<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<line_sep>other_a=actions.tree.ActionTree("out" trigger=t)<assert_stmt><not>a.mate(other_a)<assert_stmt>a.add_action(duplicate)<assert_stmt>other_a.add_action(duplicate2)<assert_stmt>a.mate(other_a)<assert_stmt>a.action_root<eq>duplicate2<assert_stmt>other_a.action_root<eq>duplicate<assert_stmt><not>duplicate.left<and><not>duplicate.right<assert_stmt><not>duplicate2.left<and><not>duplicate2.right<line_sep># Confirm that no nodes have been aliased or connected between the trees <for_stmt>node a<block_start><for_stmt>other_node other_a<block_start><assert_stmt><not>node.left<eq>other_node<assert_stmt><not>node.right<eq>other_node<block_end><block_end># Test mating two trees where one is empty <assert_stmt>a.remove_action(duplicate2)<line_sep># This should swap the duplicate action to be the action root of the other tree <assert_stmt>str(a)<eq>"[TCP:flags:0]-|"<assert_stmt>str(other_a)<eq>"[TCP:flags:0]-duplicate-|"<assert_stmt>a.mate(other_a)<assert_stmt><not>other_a.action_root<assert_stmt>a.action_root<eq>duplicate<assert_stmt>len(a)<eq>1<assert_stmt>len(other_a)<eq>0<line_sep># Confirm that no nodes have been aliased or connected between the trees <for_stmt>node a<block_start><for_stmt>other_node other_a<block_start><if_stmt>other_node<block_start><assert_stmt><not>node.left<eq>other_node<assert_stmt><not>node.right<eq>other_node<block_end><block_end><block_end><assert_stmt>a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|" logger)<line_sep>drop=a.action_root.left.right<assert_stmt>str(drop)<eq>"drop"<line_sep># Note that this will return a valid ActionTree, but because it is empty, # it is technically a False-y value, as it's length is 0 <assert_stmt>other_a.parse("[TCP:flags:0]-|" logger)<eq>other_a<line_sep>a.swap(drop other_a <none>)<assert_stmt>other_a.action_root<eq>drop<assert_stmt><not>a.action_root.left.right<assert_stmt>str(other_a)<eq>"[TCP:flags:0]-drop-|"<assert_stmt>str(a)<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"<line_sep>other_a.swap(drop a a.action_root.left)<line_sep># Confirm that no nodes have been aliased or connected between the trees <for_stmt>node a<block_start><for_stmt>other_node other_a<block_start><if_stmt>other_node<block_start><assert_stmt><not>node.left<eq>other_node<assert_stmt><not>node.right<eq>other_node<block_end><block_end><block_end><assert_stmt>str(other_a)<eq>"[TCP:flags:0]-duplicate-|"<assert_stmt>str(a)<eq>"[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,)-|"<line_sep>a.parse("[TCP:flags:0]-drop-|" logger)<line_sep>other_a.parse("[TCP:flags:0]-duplicate(drop,drop)-|" logger)<line_sep>a_drop=a.action_root<line_sep>other_duplicate=other_a.action_root<line_sep>a.swap(a_drop other_a other_duplicate)<line_sep>print(str(a))<line_sep>print(str(other_a))<assert_stmt>str(other_a)<eq>"[TCP:flags:0]-drop-|"<assert_stmt>str(a)<eq>"[TCP:flags:0]-duplicate(drop,drop)-|"<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>duplicate2=actions.duplicate.DuplicateAction()<line_sep>drop=actions.drop.DropAction()<line_sep>drop2=actions.drop.DropAction()<line_sep>drop3=actions.drop.DropAction()<line_sep>a=actions.tree.ActionTree("out" trigger=t)<line_sep>a.add_action(duplicate)<line_sep>a.add_action(drop)<line_sep>a.add_action(drop2)<assert_stmt>str(a)<eq>"[TCP:flags:0]-duplicate(drop,drop)-|"<assert_stmt>a.get_slots()<eq>0<line_sep>other_a=actions.tree.ActionTree("out" trigger=t)<line_sep>other_a.add_action(drop3)<line_sep>a.swap(drop other_a drop3)<assert_stmt>str(a)<eq>"[TCP:flags:0]-duplicate(drop,drop)-|"<line_sep>a.swap(drop3 other_a drop)<assert_stmt>str(a)<eq>"[TCP:flags:0]-duplicate(drop,drop)-|"<assert_stmt>a.mate(other_a)<block_end><def_stmt>test_choose_one <block_start>""" Tests choose_one functionality """<line_sep>a=actions.tree.ActionTree("out")<line_sep>drop=actions.drop.DropAction()<assert_stmt><not>a.choose_one()<assert_stmt>a.add_action(drop)<assert_stmt>a.choose_one()<eq>drop<assert_stmt>a.remove_action(drop)<assert_stmt><not>a.choose_one()<line_sep>duplicate=actions.duplicate.DuplicateAction()<line_sep>a.add_action(duplicate)<assert_stmt>a.choose_one()<eq>duplicate<line_sep>duplicate.left=drop<assert_stmt>a.choose_one()<in>[duplicate drop]<line_sep># Make sure that both actions get chosen chosen=set()<for_stmt>i range(0 10000)<block_start>act=a.choose_one()<line_sep>chosen.add(act)<block_end><assert_stmt>chosen<eq>set([duplicate drop])<block_end>
""" These tests check basic operation of ide.tasks.archive.do_import_archive """<import_stmt>mock<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>ide.tasks.archive do_import_archive InvalidProjectArchiveException<import_from_stmt>ide.utils.cloudpebble_test CloudpebbleTestCase make_package make_appinfo build_bundle override_settings<import_from_stmt>ide.models.project Project<import_from_stmt>utils.fakes FakeS3<line_sep>__author__='joe'<line_sep>fake_s3=FakeS3()<line_sep>@mock.patch('ide.models.s3file.s3' fake_s3)<class_stmt>TestImportArchive(CloudpebbleTestCase)<block_start><def_stmt>setUp self<block_start>self.login()<block_end>@staticmethod<def_stmt>make_resource_spec name='IMAGE_BLAH'<block_start><return>{'resources':{'media':[{'file':'images/blah.png' 'name':name 'type':'bitmap'}]}}<block_end><def_stmt>test_import_basic_bundle_with_appinfo self<block_start>""" Check that a minimal bundle imports without error """<line_sep>bundle=build_bundle({'src/main.c':'' 'appinfo.json':make_appinfo()})<line_sep>do_import_archive(self.project_id bundle)<block_end><def_stmt>test_throws_with_invalid_appinfo self<block_start>""" Check that appinfo validation is performed with a few invalid values """<line_sep>invalid_things=[('projectType' 'invalid') ('sdkVersion' '1') ('versionLabel' '01.0') ]<for_stmt>k,v invalid_things<block_start>bundle=build_bundle({'src/main.c':'' 'appinfo.json':make_appinfo({k:v})})<with_stmt>self.assertRaises(ValidationError)<block_start>do_import_archive(self.project_id bundle)<block_end><block_end><block_end><def_stmt>test_import_basic_bundle_with_npm_manifest self<block_start>""" Check that archives with package.json can be imported """<line_sep>bundle=build_bundle({'src/main.c':'' 'package.json':make_package(package_options={'name':'myproject'})})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(project.app_long_name 'test')<line_sep>self.assertEqual(project.app_short_name 'myproject')<block_end><def_stmt>test_import_package_with_dependencies self<block_start>""" Check that dependencies in a package.json file are imported into the database """<line_sep>deps={'some_package':'3.14.15' 'another':'http://blah.com/package.git' }<line_sep>bundle=build_bundle({'src/main.c':'' 'package.json':make_package(package_options={'dependencies':deps})})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>actual_deps={d.name:d.version<for>d project.dependencies.all()}<line_sep>self.assertDictEqual(actual_deps deps)<block_end><def_stmt>test_import_package_with_keywords self<block_start>""" Check that keywords in a package.json file are imported into the database """<line_sep>keywords=['pebbles' 'watch' 'bunnies']<line_sep>bundle=build_bundle({'src/main.c':'' 'package.json':make_package(package_options={'keywords':keywords})})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(set(keywords) set(project.keywords))<block_end><def_stmt>test_import_appinfo_with_resources self<block_start>""" Check that a resource can be imported in an appinfo.json project """<line_sep>bundle=build_bundle({'src/main.c':'' 'resources/images/blah.png':'contents!' 'appinfo.json':make_appinfo(options=self.make_resource_spec())})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(project.resources.get().variants.get().get_contents() 'contents!')<block_end><def_stmt>test_import_package_with_resources self<block_start>""" Check that a resource can be imported in an package.json project """<line_sep>bundle=build_bundle({'src/main.c':'' 'resources/images/blah.png':'contents!' 'package.json':make_package(pebble_options=self.make_resource_spec())})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(project.resources.get().variants.get().get_contents() 'contents!')<block_end><def_stmt>test_throws_with_local_file_dependencies self<block_start>""" Throw if any dependencies reference local files """<line_sep>bad_versions=['file:security/breach' '/security/breach' './security/breach' '../security/breach' '~/security/breach']<for_stmt>version bad_versions<block_start>bundle=build_bundle({'src/main.c':'' 'package.json':make_package(package_options={'dependencies':{'some_package':version}})})<with_stmt>self.assertRaises(ValidationError)<block_start>do_import_archive(self.project_id bundle)<block_end><block_end><block_end><def_stmt>test_throws_if_sdk2_project_has_array_appkeys self<block_start>""" Throw when trying to import an sdk 2 project with array appkeys """<line_sep>bundle=build_bundle({'src/main.c':'' 'appinfo.json':make_appinfo(options={'appKeys':[] 'sdkVersion':'2'})})<with_stmt>self.assertRaises(ValidationError)<block_start>do_import_archive(self.project_id bundle)<block_end><block_end><def_stmt>test_invalid_resource_id self<block_start>""" Check that invalid characters are banned from resource IDs """<line_sep>bundle=build_bundle({'src/main.c':'' 'resources/images/blah.png':'contents!' 'package.json':make_package(pebble_options=self.make_resource_spec("<>"))})<with_stmt>self.assertRaises(ValidationError)<block_start>do_import_archive(self.project_id bundle)<block_end><block_end><def_stmt>test_import_json_file self<block_start>""" Check that json files are correctly imported """<line_sep>bundle=build_bundle({'src/js/test.json':'{}' 'src/main.c':'' 'package.json':make_package()})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(project.source_files.filter(file_name='test.json').count() 1)<block_end><def_stmt>test_import_rocky self<block_start>""" Check that json files are correctly imported """<line_sep>bundle=build_bundle({'src/rocky/index.js':'' 'src/common/lib.js':'' 'src/pkjs/app.js':'' 'package.json':make_package(pebble_options={'projectType':'rocky'})})<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertEqual(project.source_files.filter(file_name='index.js' target='app').count() 1)<line_sep>self.assertEqual(project.source_files.filter(file_name='lib.js' target='common').count() 1)<line_sep>self.assertEqual(project.source_files.filter(file_name='app.js' target='pkjs').count() 1)<block_end><block_end>@mock.patch('ide.models.s3file.s3' fake_s3)<class_stmt>TestImportLibrary(CloudpebbleTestCase)<block_start><def_stmt>setUp self<block_start>self.login(type='package')<block_end><def_stmt>test_import_basic_library self<block_start>""" Try importing a basic library """<line_sep>bundle=build_bundle({'include/my-lib.h':'' 'package.json':make_package(pebble_options={'projectType':'package'}) 'src/c/my-lib.c':'' 'src/c/my-priv.h':'' })<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>files={f.file_name:f<for>f project.source_files.all()}<line_sep>self.assertSetEqual(set(files.keys()) {'my-lib.h' 'my-lib.c' 'my-priv.h'})<line_sep>self.assertEqual(files['my-lib.h'].target 'public')<line_sep>self.assertEqual(files['my-lib.c'].target 'app')<line_sep>self.assertEqual(files['my-priv.h'].target 'app')<block_end><def_stmt>test_import_library_with_resources self<block_start>""" Try importing a basic library with resources """<line_sep>bundle=build_bundle({'package.json':make_package(pebble_options={'projectType':'package' 'resources':{'media':[{'type':'bitmap' 'name':'MY_RES1' 'file':'res1.png'} {'type':'bitmap' 'name':'MY_RES2' 'file':'res2.png'}]}}) 'src/resources/res1.png':'' 'src/resources/res2.png':'' })<line_sep>do_import_archive(self.project_id bundle)<line_sep>project=Project.objects.get(pk=self.project_id)<line_sep>self.assertSetEqual({f.file_name<for>f project.resources.all()} {'res1.png' 'res2.png'})<block_end><block_end>
# Copyright (c) Microsoft Corporation and Fairlearn contributors. # Licensed under the MIT License. """Functionality for computing metrics, with a particular focus on disaggregated metrics. For our purpose, a metric is a function with signature ``f(y_true, y_pred, ....)`` where ``y_true`` are the set of true values and ``y_pred`` are values predicted by a machine learning algorithm. Other arguments may be present (most often sample weights), which will affect how the metric is calculated. This module provides the concept of a *disaggregated metric*. This is a metric where in addition to ``y_true`` and ``y_pred`` values, the user provides information about group membership for each sample. For example, a user could provide a 'Gender' column, and the disaggregated metric would contain separate results for the subgroups 'male', 'female' and 'nonbinary' indicated by that column. The underlying metric function is evaluated for each of these three subgroups. This extends to multiple grouping columns, calculating the metric for each combination of subgroups. """<import_stmt>sys<as>_sys<import_from_stmt>._metric_frame MetricFrame# noqa: F401 <import_from_stmt>._make_derived_metric make_derived_metric# noqa: F401 <import_from_stmt>._generated_metrics _generated_metric_dict<import_from_stmt>._disparities # noqa: F401 demographic_parity_difference demographic_parity_ratio equalized_odds_difference equalized_odds_ratio <import_from_stmt>._extra_metrics # noqa: F401 true_positive_rate true_negative_rate false_positive_rate false_negative_rate _balanced_root_mean_squared_error mean_prediction selection_rate _mean_overprediction _mean_underprediction count <line_sep># Add the generated metrics of the form and # `<metric>_{difference,ratio,group_min,group_max` _module_obj=_sys.modules[__name__]<for_stmt>_name,_func _generated_metric_dict.items()<block_start>setattr(_module_obj _name _func)<block_end># ============================================ # Build list of items to be listed in the docs _core=["MetricFrame" "make_derived_metric"]<line_sep>_disparities=["demographic_parity_difference" "demographic_parity_ratio" "equalized_odds_difference" "equalized_odds_ratio"]<line_sep>_extra_metrics=["true_positive_rate" "true_negative_rate" "false_positive_rate" "false_negative_rate" "mean_prediction" "selection_rate" "count"]<line_sep>__all__=_core+_disparities+_extra_metrics+list(sorted(_generated_metric_dict.keys()))<line_sep>
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME> # # This file is part of breast_cancer_classifier. # # breast_cancer_classifier is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # breast_cancer_classifier is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>. # ============================================================================== <import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>src.constants VIEWS<def_stmt>shift_window_inside_image start end image_axis_size input_axis_size<block_start>""" If the window goes outside the bound of the image, then shifts it to fit inside the image. """<if_stmt>start<l>0<block_start>start=0<line_sep>end=start+input_axis_size<block_end><elif_stmt>end<g>image_axis_size<block_start>end=image_axis_size<line_sep>start=end-input_axis_size<block_end><return>start end<block_end><def_stmt>zero_pad_and_align_window image_axis_size input_axis_size max_crop_and_size_noise bidirectional<block_start>""" Adds Zero padding to the image if cropped image is smaller than required window size. """<line_sep>pad_width=input_axis_size-image_axis_size+max_crop_and_size_noise<times>(2<if>bidirectional<else>1)<assert_stmt>(pad_width<ge>0)<if_stmt>bidirectional<block_start>pad_front=int(pad_width/2)<line_sep>start=max_crop_and_size_noise<block_end><else_stmt><block_start>start,pad_front=0 0<block_end>pad_back=pad_width-pad_front<line_sep>end=start+input_axis_size<line_sep><return>start end pad_front pad_back<block_end><def_stmt>simple_resize image_to_resize size<block_start>""" Resizes image to the required size """<line_sep>image_resized=cv2.resize(image_to_resize (size[1] size[0]) interpolation=cv2.INTER_CUBIC)<if_stmt>len(image_to_resize.shape)<eq>3<and>len(image_resized.shape)<eq>2<and>image_to_resize.shape[2]<eq>1<block_start>image_resized=np.expand_dims(image_resized 2)<block_end><return>image_resized<block_end><def_stmt>crop_image image input_size borders<block_start>""" Crops image to the required size using window location """<line_sep>cropped_image=image[borders[0]:borders[1] borders[2]:borders[3]]<if_stmt>((borders[1]-borders[0])<ne>input_size[0])<or>((borders[3]-borders[2])<ne>input_size[1])<block_start>cropped_image=simple_resize(cropped_image input_size)<block_end><return>cropped_image<block_end><def_stmt>window_location_at_center_point input_size center_y center_x<block_start>""" Calculates window location (top, bottom, left, right) given center point and size of augmentation window """<line_sep>half_height=input_size[0]<floordiv>2<line_sep>half_width=input_size[1]<floordiv>2<line_sep>top=center_y-half_height<line_sep>bottom=center_y+input_size[0]-half_height<line_sep>left=center_x-half_width<line_sep>right=center_x+input_size[1]-half_width<line_sep><return>top bottom left right<block_end><def_stmt>sample_crop_best_center image input_size random_number_generator max_crop_noise max_crop_size_noise best_center view<block_start>""" Crops using the best center point and ideal window size. Pads small images to have enough room for crop noise and size noise. Applies crop noise in location of the window borders. """<line_sep>max_crop_noise=np.array(max_crop_noise)<line_sep>crop_noise_multiplier=np.zeros(2 dtype=np.float32)<if_stmt>max_crop_noise.any()# there is no point in sampling crop_noise_multiplier if it's going to be multiplied by (0, 0) <block_start>crop_noise_multiplier=random_number_generator.uniform(low=-1.0 high=1.0 size=2)<block_end>center_y,center_x=best_center<line_sep># get the window around the center point. The window might be outside of the image. top,bottom,left,right=window_location_at_center_point(input_size center_y center_x)<line_sep>pad_y_top,pad_y_bottom,pad_x_right=0 0 0<if_stmt>VIEWS.is_cc(view)<block_start><if_stmt>image.shape[0]<l>input_size[0]+(max_crop_noise[0]+max_crop_size_noise)<times>2# Image is smaller than window size + noise margin in y direction. # CC view: pad at both top and bottom <block_start>top,bottom,pad_y_top,pad_y_bottom=zero_pad_and_align_window(image.shape[0] input_size[0] max_crop_noise[0]+max_crop_size_noise <true>)<block_end><block_end><elif_stmt>VIEWS.is_mlo(view)<block_start><if_stmt>image.shape[0]<l>input_size[0]+max_crop_noise[0]+max_crop_size_noise# Image is smaller than window size + noise margin in y direction. # MLO view: only pad at the bottom <block_start>top,bottom,_,pad_y_bottom=zero_pad_and_align_window(image.shape[0] input_size[0] max_crop_noise[0]+max_crop_size_noise <false>)<block_end><block_end><else_stmt><block_start><raise>KeyError("Unknown view" view)<block_end><if_stmt>image.shape[1]<l>input_size[1]+max_crop_noise[1]+max_crop_size_noise# Image is smaller than window size + noise margin in x direction. <block_start>left,right,_,pad_x_right=zero_pad_and_align_window(image.shape[1] input_size[1] max_crop_noise[1]+max_crop_size_noise <false>)<block_end># Pad image if necessary by allocating new memory and copying contents over <if_stmt>pad_y_top<g>0<or>pad_y_bottom<g>0<or>pad_x_right<g>0<block_start>new_zero_array=np.zeros((image.shape[0]+pad_y_top+pad_y_bottom image.shape[1]+pad_x_right image.shape[2]) dtype=image.dtype)<line_sep>new_zero_array[pad_y_top:image.shape[0]+pad_y_top 0:image.shape[1]]=image<line_sep>image=new_zero_array<block_end># if window is drawn outside of image, shift it to be inside the image. top,bottom=shift_window_inside_image(top bottom image.shape[0] input_size[0])<line_sep>left,right=shift_window_inside_image(left right image.shape[1] input_size[1])<if_stmt>top<eq>0# there is nowhere to shift upwards, we only apply noise downwards <block_start>crop_noise_multiplier[0]=np.abs(crop_noise_multiplier[0])<block_end><elif_stmt>bottom<eq>image.shape[0]# there is nowhere to shift down, we only apply noise upwards <block_start>crop_noise_multiplier[0]=-np.abs(crop_noise_multiplier[0])<block_end># else: we do nothing to the noise multiplier <if_stmt>left<eq>0# there is nowhere to shift left, we only apply noise to move right <block_start>crop_noise_multiplier[1]=np.abs(crop_noise_multiplier[1])<block_end><elif_stmt>right<eq>image.shape[1]# there is nowhere to shift right, we only apply noise to move left <block_start>crop_noise_multiplier[1]=-np.abs(crop_noise_multiplier[1])<block_end># else: we do nothing to the noise multiplier borders=np.array((top bottom left right) dtype=np.int32)<line_sep># Calculate maximum amount of how much the window can move for cropping noise top_margin=top<line_sep>bottom_margin=image.shape[0]-bottom<line_sep>left_margin=left<line_sep>right_margin=image.shape[1]-right<if_stmt>crop_noise_multiplier[0]<ge>0<block_start>vertical_margin=bottom_margin<block_end><else_stmt><block_start>vertical_margin=top_margin<block_end><if_stmt>crop_noise_multiplier[1]<ge>0<block_start>horizontal_margin=right_margin<block_end><else_stmt><block_start>horizontal_margin=left_margin<block_end><if_stmt>vertical_margin<l>max_crop_noise[0]<block_start>max_crop_noise[0]=vertical_margin<block_end><if_stmt>horizontal_margin<l>max_crop_noise[1]<block_start>max_crop_noise[1]=horizontal_margin<block_end>crop_noise=np.round(max_crop_noise<times>crop_noise_multiplier)<line_sep>crop_noise=np.array((crop_noise[0] crop_noise[0] crop_noise[1] crop_noise[1]) dtype=np.int32)<line_sep>borders=borders+crop_noise<line_sep># this is to make sure that the cropping window isn't outside of the image <assert_stmt>(borders[0]<ge>0)<and>(borders[1]<le>image.shape[0])<and>(borders[2]<ge>0)<and>(borders[3]<le>image.shape[1]) "Centre of the crop area is sampled such that the borders are outside of the image. Borders: "+str(borders)+', image shape: '+str(image.shape)<line_sep># return the padded image and cropping window information <return>image borders<block_end><def_stmt>sample_crop image input_size borders random_number_generator max_crop_size_noise<block_start>""" Applies size noise of the window borders. """<line_sep>size_noise_multiplier=random_number_generator.uniform(low=-1.0 high=1.0 size=4)<line_sep>top_margin=borders[0]<line_sep>bottom_margin=image.shape[0]-borders[1]<line_sep>left_margin=borders[2]<line_sep>right_margin=image.shape[1]-borders[3]<line_sep>max_crop_size_noise=min(max_crop_size_noise top_margin bottom_margin left_margin right_margin)<if_stmt>input_size[0]<ge>input_size[1]<block_start>max_crop_size_vertical_noise=max_crop_size_noise<line_sep>max_crop_size_horizontal_noise=np.round(max_crop_size_noise<times>(input_size[1]/input_size[0]))<block_end><elif_stmt>input_size[0]<l>input_size[1]<block_start>max_crop_size_vertical_noise=np.round(max_crop_size_noise<times>(input_size[0]/input_size[1]))<line_sep>max_crop_size_horizontal_noise=max_crop_size_noise<block_end><else_stmt><block_start><raise>RuntimeError()<block_end>max_crop_size_noise=np.array((max_crop_size_vertical_noise max_crop_size_vertical_noise max_crop_size_horizontal_noise max_crop_size_horizontal_noise) dtype=np.int32)<line_sep>size_noise=np.round(max_crop_size_noise<times>size_noise_multiplier)<line_sep>size_noise=np.array(size_noise dtype=np.int32)<line_sep>borders=borders+size_noise<line_sep># this is to make sure that the cropping window isn't outside of the image <assert_stmt>(borders[0]<ge>0)<and>(borders[1]<le>image.shape[0])<and>(borders[2]<ge>0)<and>(borders[3]<le>image.shape[1]) "Center of the crop area is sampled such that the borders are outside of the image. Borders: "+str(borders)+', image shape: '+str(image.shape)<line_sep># Sanity check. make sure that the top is above the bottom <assert_stmt>borders[1]<g>borders[0] "Bottom above the top. Top: "+str(borders[0])+', bottom: '+str(borders[1])<line_sep># Sanity check. make sure that the left is left to the right <assert_stmt>borders[3]<g>borders[2] "Left on the right. Left: "+str(borders[2])+', right: '+str(borders[3])<line_sep><return>borders<block_end><def_stmt>random_augmentation_best_center image input_size random_number_generator max_crop_noise=(0 0) max_crop_size_noise=0 auxiliary_image=<none> best_center=<none> view=""<block_start>""" Crops augmentation window from a given image by applying noise in location and size of the window. """<line_sep>joint_image=np.expand_dims(image 2)<if_stmt>auxiliary_image<is><not><none><block_start>joint_image=np.concatenate([joint_image auxiliary_image] axis=2)<block_end>joint_image,borders=sample_crop_best_center(joint_image input_size random_number_generator max_crop_noise max_crop_size_noise best_center view)<line_sep>borders=sample_crop(joint_image input_size borders random_number_generator max_crop_size_noise)<line_sep>sampled_joint_image=crop_image(joint_image input_size borders)<if_stmt>auxiliary_image<is><none><block_start><return>sampled_joint_image[: : 0] <none><block_end><else_stmt><block_start><return>sampled_joint_image[: : 0] sampled_joint_image[: : 1:]<block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_stmt>time<import_from_stmt>collections defaultdict<import_from_stmt>collections namedtuple<import_from_stmt>contextlib contextmanager<import_from_stmt>cached_property cached_property<import_from_stmt>kafka create_message<import_from_stmt>kafka KafkaClient<import_from_stmt>kafka.common ProduceRequest<import_from_stmt>data_pipeline._position_data_tracker PositionDataTracker<import_from_stmt>data_pipeline._producer_retry RetryHandler<import_from_stmt>data_pipeline._retry_util ExpBackoffPolicy<import_from_stmt>data_pipeline._retry_util MaxRetryError<import_from_stmt>data_pipeline._retry_util Predicate<import_from_stmt>data_pipeline._retry_util retry_on_condition<import_from_stmt>data_pipeline._retry_util RetryPolicy<import_from_stmt>data_pipeline.config get_config<import_from_stmt>data_pipeline.envelope Envelope<line_sep>_EnvelopeAndMessage=namedtuple("_EnvelopeAndMessage" ["envelope" "message"])<line_sep>logger=get_config().logger<line_sep># prepare needs to be in the module top level so it can be serialized for # multiprocessing <def_stmt>_prepare envelope_and_message<block_start><try_stmt><block_start>kwargs={}<if_stmt>envelope_and_message.message.keys<block_start>kwargs['key']=envelope_and_message.message.encoded_keys<block_end><return>create_message(envelope_and_message.envelope.pack(envelope_and_message.message) **kwargs)<block_end><except_stmt><block_start>logger.exception('Prepare failed')<line_sep><raise><block_end><block_end><class_stmt>KafkaProducer(object)<block_start>"""The KafkaProducer deals with buffering messages that need to be published into Kafka, preparing them for publication, and ultimately publishing them. Args: producer_position_callback (function): The producer position callback is called when the KafkaProducer is instantiated, and every time messages are published to notify the producer of current position information of successfully published messages. dry_run (Optional[bool]): When dry_run mode is on, the producer won't talk to real KafKa topic, nor to real Schematizer. Default to False. """<line_sep>@cached_property<def_stmt>envelope self<block_start><return>Envelope()<block_end><def_stmt>__init__ self producer_position_callback dry_run=<false><block_start>self.producer_position_callback=producer_position_callback<line_sep>self.dry_run=dry_run<line_sep>self.kafka_client=KafkaClient(get_config().cluster_config.broker_list)<line_sep>self.position_data_tracker=PositionDataTracker()<line_sep>self._reset_message_buffer()<line_sep>self.skip_messages_with_pii=get_config().skip_messages_with_pii<line_sep>self._publish_retry_policy=RetryPolicy(ExpBackoffPolicy(with_jitter=<true>) max_retry_count=get_config().producer_max_publish_retry_count)<line_sep>self._automatic_flush_enabled=<true><block_end>@contextmanager<def_stmt>disable_automatic_flushing self<block_start>"""Prevents the producer from flushing automatically (e.g. for timeouts or batch size) while the context manager is open. """<try_stmt><block_start>self._automatic_flush_enabled=<false><line_sep><yield><block_end><finally_stmt><block_start>self._automatic_flush_enabled=<true><block_end><block_end><def_stmt>wake self<block_start>"""Should be called periodically if we're not otherwise waking up by publishing, to ensure that messages are actually published. """<line_sep># if we haven't woken up in a while, we may need to flush messages self._flush_if_necessary()<block_end><def_stmt>publish self message<block_start><if_stmt>message.contains_pii<and>self.skip_messages_with_pii<block_start>logger.info("Skipping a PII message - "<concat>"uuid hex: {0}, "<concat>"schema_id: {1}, "<concat>"timestamp: {2}, "<concat>"type: {3}".format(message.uuid_hex message.schema_id message.timestamp message.message_type.name))<line_sep><return><block_end>self._add_message_to_buffer(message)<line_sep>self.position_data_tracker.record_message_buffered(message)<line_sep>self._flush_if_necessary()<block_end><def_stmt>flush_buffered_messages self<block_start>produce_method=(self._publish_produce_requests_dry_run<if>self.dry_run<else>self._publish_produce_requests)<line_sep>produce_method(self._generate_produce_requests())<line_sep>self._reset_message_buffer()<block_end><def_stmt>close self<block_start>self.flush_buffered_messages()<line_sep>self.kafka_client.close()<block_end><def_stmt>_publish_produce_requests self requests<block_start>"""It will try to publish all the produce requests for topics, and retry a number of times until either all the requests are successfully published or it can no longer retry, in which case, the exception will be thrown. Each time the requests that are successfully published in the previous round will be removed from the requests and won't be published again. """<line_sep>unpublished_requests=list(requests)<line_sep>retry_handler=RetryHandler(self.kafka_client unpublished_requests)<def_stmt>has_requests_to_be_sent <block_start><return>bool(retry_handler.requests_to_be_sent)<block_end>retry_handler=retry_on_condition(retry_policy=self._publish_retry_policy retry_conditions=[Predicate(has_requests_to_be_sent)] func_to_retry=self._publish_requests use_previous_result_as_param=<true> retry_handler=retry_handler)<if_stmt>retry_handler.has_unpublished_request<block_start><raise>MaxRetryError(last_result=retry_handler)<block_end><block_end><def_stmt>_publish_requests self retry_handler<block_start>"""Main function to publish message requests. This function is wrapped with retry function and will be retried based on specified retry policy Args: retry_handler: :class:`data_pipeline._producer_retry.RetryHandler` that determines which messages should be retried next time. """<if_stmt><not>retry_handler.requests_to_be_sent<block_start><return>retry_handler<block_end>responses=self._try_send_produce_requests(retry_handler.requests_to_be_sent)<line_sep>retry_handler.update_requests_to_be_sent(responses self.position_data_tracker.topic_to_kafka_offset_map)<line_sep>self._record_success_requests(retry_handler.success_topic_stats_map)<line_sep><return>retry_handler<block_end><def_stmt>_try_send_produce_requests self requests# Either it throws exceptions and none of them succeeds, or it returns # responses of all the requests (success or fail response). <block_start><try_stmt><block_start><return>self.kafka_client.send_produce_request(payloads=requests acks=get_config().kafka_client_ack_count fail_on_error=<false>)<block_end><except_stmt>Exception# Exceptions like KafkaUnavailableError, LeaderNotAvailableError, # UnknownTopicOrPartitionError, etc., are not controlled by # `fail_on_error` flag and could be thrown from the kafka client, # and fail all the requests. We will retry all the requests until # either all of them are successfully published or it exceeds the # maximum retry criteria. <block_start><return>[]<block_end><block_end><def_stmt>_record_success_requests self success_topic_stats_map<block_start><for_stmt>topic_partition,stats success_topic_stats_map.iteritems()<block_start>topic=topic_partition.topic_name<assert_stmt>stats.message_count<eq>len(self.message_buffer[topic])<line_sep>self.position_data_tracker.record_messages_published(topic=topic offset=stats.original_offset message_count=stats.message_count)<line_sep>self.message_buffer.pop(topic)<block_end><block_end><def_stmt>_publish_produce_requests_dry_run self requests<block_start><for_stmt>request requests<block_start>self._publish_single_request_dry_run(request)<block_end><block_end><def_stmt>_publish_single_request_dry_run self request<block_start>topic=request.topic<line_sep>message_count=len(request.messages)<line_sep>self.position_data_tracker.record_messages_published(topic -1 message_count)<block_end><def_stmt>_is_ready_to_flush self<block_start>time_limit=get_config().kafka_producer_flush_time_limit_seconds<line_sep><return>(self._automatic_flush_enabled<and>((time.time()-self.start_time)<ge>time_limit<or>self.message_buffer_size<ge>get_config().kafka_producer_buffer_size))<block_end><def_stmt>_flush_if_necessary self<block_start><if_stmt>self._is_ready_to_flush()<block_start>self.flush_buffered_messages()<block_end><block_end><def_stmt>_add_message_to_buffer self message<block_start>topic=message.topic<line_sep>message=self._prepare_message(message)<line_sep>self.message_buffer[topic].append(message)<line_sep>self.message_buffer_size<augadd>1<block_end><def_stmt>_generate_produce_requests self<block_start><return>[ProduceRequest(topic=topic partition=0 messages=messages)<for>topic,messages self._generate_prepared_topic_and_messages()]<block_end><def_stmt>_generate_prepared_topic_and_messages self<block_start><return>self.message_buffer.iteritems()<block_end><def_stmt>_prepare_message self message<block_start><return>_prepare(_EnvelopeAndMessage(envelope=self.envelope message=message))<block_end><def_stmt>_reset_message_buffer self<block_start><if_stmt><not>hasattr(self 'message_buffer_size')<or>self.message_buffer_size<g>0<block_start>self.producer_position_callback(self.position_data_tracker.get_position_data())<block_end>self.start_time=time.time()<line_sep>self.message_buffer=defaultdict(list)<line_sep>self.message_buffer_size=0<block_end><block_end><class_stmt>LoggingKafkaProducer(KafkaProducer)<block_start><def_stmt>_publish_produce_requests self requests<block_start>logger.info("Flushing buffered messages - requests={0}, messages={1}".format(len(requests) self.message_buffer_size))<try_stmt><block_start>super(LoggingKafkaProducer self)._publish_produce_requests(requests)<line_sep>logger.info("All messages published successfully")<block_end><except_stmt>MaxRetryError<as>e<block_start>logger.exception("Failed to publish all produce requests. {0}".format(repr(e)))<line_sep><raise><block_end><block_end><def_stmt>_reset_message_buffer self<block_start>logger.info("Resetting message buffer for success requests.")<line_sep>super(LoggingKafkaProducer self)._reset_message_buffer()<block_end><def_stmt>_publish_single_request_dry_run self request<block_start>super(LoggingKafkaProducer self)._publish_single_request_dry_run(request)<line_sep>logger.debug("dry_run mode: Would have published {0} messages to {1}".format(len(request.messages) request.topic))<block_end><block_end>
<import_stmt>time<import_stmt>uuid<import_stmt>base64<import_stmt>hashlib<def_stmt>millis <block_start><return>int(round(time.time()<times>1000))<block_end><def_stmt>timestamp <block_start><return>int(time.time())<block_end><def_stmt>base64_encode s<block_start><return>base64.b64encode(s.encode('utf-8')).decode('utf-8')<block_end><def_stmt>base64_decode b<block_start><return>base64.b64decode(b).decode('utf-8')<block_end><def_stmt>generate_uuid <block_start><return>str(uuid.uuid4())<block_end><def_stmt>generate_sha1 text<block_start>sha1_hash=hashlib.sha1()<line_sep>sha1_hash.update(text.encode('utf-8'))<line_sep><return>sha1_hash.hexdigest()<block_end>
<import_from_stmt>django.shortcuts render<import_from_stmt>app.forms ExampleForm<def_stmt>index request<block_start>form=ExampleForm()<line_sep><return>render(request 'index.html' {'form':form})<block_end>
"""Implementation of the 'array_getitem_wrap' operation."""<import_from_stmt>..lib Slice core myia_static<import_from_stmt>..operations array_getitem reshape<def_stmt>_dim_explicit dim dim_size<block_start><if_stmt>dim<l>0<block_start>dim=dim_size+dim<block_end><assert_stmt>dim<ge>0<line_sep><return>dim<block_end>@myia_static<def_stmt>_build_slices a_shp item<block_start>begin=()<line_sep>end=()<line_sep>stride=()<line_sep>remove_dims=()<for_stmt>adx,a enumerate(a_shp)<block_start><if_stmt>adx<l>len(item)<block_start>i=item[adx]<if_stmt>isinstance(i (slice Slice))<block_start>begin=begin+(0<if>i.start<is><none><else>_dim_explicit(i.start a) )<line_sep>end=end+(a<if>i.stop<is><none><else>_dim_explicit(i.stop a) )<line_sep>stride=stride+(1<if>i.step<is><none><else>i.step )<line_sep>remove_dims=remove_dims+(<false> )<block_end><else_stmt><block_start>begin=begin+(_dim_explicit(i a) )<line_sep>end=end+(_dim_explicit(i a)+1 )<line_sep>stride=stride+(1 )<line_sep>remove_dims=remove_dims+(<true> )<block_end><block_end><else_stmt><block_start>begin=begin+(0 )<line_sep>end=end+(a )<line_sep>stride=stride+(1 )<line_sep>remove_dims=remove_dims+(<false> )<block_end><block_end><return>begin end stride remove_dims<block_end>@core<def_stmt>array_getitem_wrap array item<block_start>"""Implementation of `array_getitem`."""<if_stmt>isinstance(item tuple)<block_start>begin,end,stride,remove_dims=_build_slices(array.shape item)<block_end><else_stmt><block_start>begin,end,stride,remove_dims=_build_slices(array.shape (item ))<block_end>ret=array_getitem(array begin end stride)<line_sep>final_shape=()<for_stmt>o,r zip(ret.shape remove_dims)<block_start><if_stmt><not>r<block_start>final_shape=final_shape+(o )<block_end><block_end>ret=reshape(ret final_shape)<line_sep><return>ret<block_end>__operation_defaults__={"name":"array_getitem_wrap" "registered_name":"array_getitem_wrap" "mapping":array_getitem_wrap "python_implementation":<none> }<line_sep>
<import_from_stmt>django.shortcuts render<def_stmt>view_404 request<block_start><return>render(request 'django_tutorial/error_pages/page_404.html' status=404)<block_end><def_stmt>view_500 request<block_start><return>render(request 'django_tutorial/error_pages/page_500.html' status=500)<block_end>
<import_stmt>codecs<import_from_stmt>solthiruthi.dictionary *<import_from_stmt>tamil wordutils<line_sep>TVU,TVU_size=DictionaryBuilder.create(TamilVU)<line_sep>ag,ag2=wordutils.anagrams_in_dictionary(TVU)<with_stmt>codecs.open("demo.txt" "w" "utf-8")<as>fp<block_start>itr=1<for_stmt>k,c ag<block_start>v=ag2[k]<line_sep>fp.write("%03d) %s\n"%(itr " | ".join(v)))<line_sep>itr<augadd>1<block_end><block_end>
<import_from_future_stmt> division<import_stmt>numpy<as>np<import_from_stmt>path Path<import_from_stmt>imageio imread<import_from_stmt>skimage.transform resize<as>imresize<import_from_stmt>kitti_util pose_from_oxts_packet generate_depth_map read_calib_file transform_from_rot_trans<import_from_stmt>datetime datetime<class_stmt>KittiRawLoader(object)<block_start><def_stmt>__init__ self dataset_dir static_frames_file=<none> img_height=128 img_width=416 min_disp=0.2 get_depth=<false> get_pose=<false> depth_size_ratio=1<block_start>dir_path=Path(__file__).realpath().dirname()<line_sep>test_scene_file=dir_path/'test_scenes.txt'<line_sep>self.from_speed=static_frames_file<is><none><if_stmt>static_frames_file<is><not><none><block_start>self.collect_static_frames(static_frames_file)<block_end><with_stmt>open(test_scene_file 'r')<as>f<block_start>test_scenes=f.readlines()<block_end>self.test_scenes=[t[:-1]<for>t test_scenes]<line_sep>self.dataset_dir=dataset_dir<line_sep>self.img_height=img_height<line_sep>self.img_width=img_width<line_sep>self.cam_ids=['02' '03']<line_sep>self.date_list=['2011_09_26' '2011_09_28' '2011_09_29' '2011_09_30' '2011_10_03']<line_sep>self.min_disp=min_disp<line_sep>self.get_depth=get_depth<line_sep>self.get_pose=get_pose<line_sep>self.depth_size_ratio=depth_size_ratio<line_sep>self.collect_train_folders()<block_end><def_stmt>collect_static_frames self static_frames_file<block_start><with_stmt>open(static_frames_file 'r')<as>f<block_start>frames=f.readlines()<block_end>self.static_frames={}<for_stmt>fr frames<block_start><if_stmt>fr<eq>'\n'<block_start><continue><block_end>date,drive,frame_id=fr.split(' ')<line_sep>curr_fid='%.10d'%(np.int(frame_id[:-1]))<if_stmt>drive<not><in>self.static_frames.keys()<block_start>self.static_frames[drive]=[]<block_end>self.static_frames[drive].append(curr_fid)<block_end><block_end><def_stmt>collect_train_folders self<block_start>self.scenes=[]<for_stmt>date self.date_list<block_start>drive_set=(self.dataset_dir/date).dirs()<for_stmt>dr drive_set<block_start><if_stmt>dr.name[:-5]<not><in>self.test_scenes<block_start>self.scenes.append(dr)<block_end><block_end><block_end><block_end><def_stmt>collect_scenes self drive<block_start>train_scenes=[]<for_stmt>c self.cam_ids<block_start>oxts=sorted((drive/'oxts'/'data').files('*.txt'))<with_stmt>open(drive/'oxts'/'timestamps.txt' 'r')<as>f<block_start>times=[datetime.strptime(time_string[:-4] "%Y-%m-%d %H:%M:%S.%f")<for>time_string f.readlines()]<block_end>scene_data={'cid':c 'dir':drive 'speed':[] 'time':[t.timestamp()<for>t times] 'frame_id':[] 'pose':[] 'rel_path':drive.name+'_'+c}<line_sep>scale=<none><line_sep>origin=<none><line_sep>imu2velo=read_calib_file(drive.parent/'calib_imu_to_velo.txt')<line_sep>velo2cam=read_calib_file(drive.parent/'calib_velo_to_cam.txt')<line_sep>cam2cam=read_calib_file(drive.parent/'calib_cam_to_cam.txt')<line_sep>velo2cam_mat=transform_from_rot_trans(velo2cam['R'] velo2cam['T'])<line_sep>imu2velo_mat=transform_from_rot_trans(imu2velo['R'] imu2velo['T'])<line_sep>cam_2rect_mat=transform_from_rot_trans(cam2cam['R_rect_00'] np.zeros(3))<line_sep>imu2cam=cam_2rect_mat@velo2cam_mat@imu2velo_mat<for_stmt>n,f enumerate(oxts)<block_start>metadata=np.genfromtxt(f)<line_sep>speed=metadata[8:11]<line_sep>scene_data['speed'].append(speed)<line_sep>scene_data['frame_id'].append('{:010d}'.format(n))<line_sep>lat=metadata[0]<if_stmt>scale<is><none><block_start>scale=np.cos(lat<times>np.pi/180.)<block_end>pose_matrix=pose_from_oxts_packet(metadata[:6] scale)<if_stmt>origin<is><none><block_start>origin=pose_matrix<block_end>odo_pose=imu2cam@np.linalg.inv(origin)@pose_matrix@np.linalg.inv(imu2cam)<line_sep>scene_data['pose'].append(odo_pose[:3])<block_end>sample=self.load_image(scene_data 0)<if_stmt>sample<is><none><block_start><return>[]<block_end>scene_data['P_rect']=self.get_P_rect(scene_data sample[1] sample[2])<line_sep>scene_data['intrinsics']=scene_data['P_rect'][: :3]<line_sep>train_scenes.append(scene_data)<block_end><return>train_scenes<block_end><def_stmt>get_scene_imgs self scene_data<block_start><def_stmt>construct_sample scene_data i frame_id<block_start>sample={"img":self.load_image(scene_data i)[0] "id":frame_id}<if_stmt>self.get_depth<block_start>sample['depth']=self.get_depth_map(scene_data i)<block_end><if_stmt>self.get_pose<block_start>sample['pose']=scene_data['pose'][i]<block_end><return>sample<block_end><if_stmt>self.from_speed<block_start>cum_displacement=np.zeros(3)<for_stmt>i,(speed1 speed2 t1 t2) enumerate(zip(scene_data['speed'][1:] scene_data['speed'][:-1] scene_data['time'][1:] scene_data['time'][:-1]))<block_start>print(speed1 speed2 t1 t2)<line_sep>cum_displacement<augadd>0.5<times>(speed1+speed2)/(t2-t1)<line_sep>disp_mag=np.linalg.norm(cum_displacement)<if_stmt>disp_mag<g>self.min_disp<block_start>frame_id=scene_data['frame_id'][i]<line_sep><yield>construct_sample(scene_data i frame_id)<line_sep>cum_displacement<augmul>0<block_end><block_end><block_end><else_stmt># from static frame file <block_start>drive=str(scene_data['dir'].name)<for_stmt>(i frame_id) enumerate(scene_data['frame_id'])<block_start><if_stmt>(drive<not><in>self.static_frames.keys())<or>(frame_id<not><in>self.static_frames[drive])<block_start><yield>construct_sample(scene_data i frame_id)<block_end><block_end><block_end><block_end><def_stmt>get_P_rect self scene_data zoom_x zoom_y<block_start>calib_file=scene_data['dir'].parent/'calib_cam_to_cam.txt'<line_sep>filedata=read_calib_file(calib_file)<line_sep>P_rect=np.reshape(filedata['P_rect_'+scene_data['cid']] (3 4))<line_sep>P_rect[0]<augmul>zoom_x<line_sep>P_rect[1]<augmul>zoom_y<line_sep><return>P_rect<block_end><def_stmt>load_image self scene_data tgt_idx<block_start>img_file=scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'<if_stmt><not>img_file.isfile()<block_start><return><none><block_end>img=imread(img_file)<line_sep>zoom_y=self.img_height/img.shape[0]<line_sep>zoom_x=self.img_width/img.shape[1]<line_sep>img=imresize(img (self.img_height self.img_width))<line_sep># workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability img=(img<times>255).astype(np.uint8)<line_sep><return>img zoom_x zoom_y<block_end><def_stmt>get_depth_map self scene_data tgt_idx# compute projection matrix velodyne->image plane <block_start>R_cam2rect=np.eye(4)<line_sep>calib_dir=scene_data['dir'].parent<line_sep>cam2cam=read_calib_file(calib_dir/'calib_cam_to_cam.txt')<line_sep>velo2cam=read_calib_file(calib_dir/'calib_velo_to_cam.txt')<line_sep>velo2cam=np.hstack((velo2cam['R'].reshape(3 3) velo2cam['T'][<ellipsis> np.newaxis]))<line_sep>velo2cam=np.vstack((velo2cam np.array([0 0 0 1.0])))<line_sep>R_cam2rect[:3 :3]=cam2cam['R_rect_00'].reshape(3 3)<line_sep>velo2cam=np.dot(R_cam2rect velo2cam)<line_sep>velo_file_name=scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])<line_sep><return>generate_depth_map(velo_file_name scene_data['P_rect'] velo2cam self.img_width self.img_height self.depth_size_ratio)<block_end><block_end>
"""Asynchronous MongoDB and Redis connections."""<import_from_stmt>functools partial<import_stmt>motor<import_stmt>tornadoredis<import_from_stmt>cloudtunes settings<line_sep>RedisClient=partial(tornadoredis.Client **settings.REDIS)<line_sep>mongo=motor.MotorClient(**settings.MONGODB).cloudtunes<line_sep>redis=RedisClient()<line_sep>
# add LDDMM shooting code into path <import_stmt>sys<line_sep>sys.path.append('../vectormomentum/Code/Python')<line_sep>sys.path.append('../library')<import_from_stmt>subprocess call<import_stmt>argparse<import_stmt>os.path<line_sep>#Add deep learning related libraries <import_from_stmt>collections Counter<import_stmt>torch<import_stmt>prediction_network<import_stmt>util<import_stmt>numpy<as>np<import_from_stmt>skimage exposure<line_sep>#Add LDDMM registration related libraries # pyca modules <import_stmt>PyCA.Core<as>ca<import_stmt>PyCA.Common<as>common<line_sep>#import PyCA.Display as display # vector momentum modules # others <import_stmt>logging<import_stmt>copy<import_stmt>math<import_stmt>registration_methods<line_sep>#parse command line input parser=argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')<line_sep>requiredNamed=parser.add_argument_group('required named arguments')<line_sep>requiredNamed.add_argument('--moving-image' nargs='+' required=<true> metavar=('m1' 'm2, m3...') help='List of moving images, seperated by space.')<line_sep>requiredNamed.add_argument('--target-image' nargs='+' required=<true> metavar=('t1' 't2, t3...') help='List of target images, seperated by space.')<line_sep>requiredNamed.add_argument('--output-prefix' nargs='+' required=<true> metavar=('o1' 'o2, o3...') help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')<line_sep>parser.add_argument('--samples' type=int default=50 metavar='N' help='number of times to sample the network (default: 64)')<line_sep>parser.add_argument('--batch-size' type=int default=64 metavar='N' help='input batch size for prediction network (default: 64)')<line_sep>parser.add_argument('--n-GPU' type=int default=1 metavar='N' help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')<line_sep>parser.add_argument('--use-CPU-for-shooting' action='store_true' default=<false> help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')<line_sep>parser.add_argument('--shoot-steps' type=int default=0 metavar='N' help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')<line_sep>parser.add_argument('--affine-align' action='store_true' default=<false> help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')<line_sep>parser.add_argument('--histeq' action='store_true' default=<false> help='Perform histogram equalization to the moving and target images.')<line_sep>parser.add_argument('--atlas' default="../data/atlas/icbm152.nii" help="Atlas to use for (affine) pre-registration")<line_sep>parser.add_argument('--prediction-parameter' default='../../network_configs/OASIS_predict_probabilistic.pth.tar' help="network parameters for the prediction network")<line_sep>args=parser.parse_args()<line_sep># check validity of input arguments from command line <def_stmt>check_args args# number of input images/output prefix consistency check <block_start>n_moving_images=len(args.moving_image)<line_sep>n_target_images=len(args.target_image)<line_sep>n_output_prefix=len(args.output_prefix)<if_stmt>(n_moving_images<ne>n_target_images)<block_start>print('The number of moving images is not consistent with the number of target images!')<line_sep>sys.exit(1)<block_end><elif_stmt>(n_moving_images<ne>n_output_prefix)<block_start>print('The number of output prefix is not consistent with the number of input images!')<line_sep>sys.exit(1)<block_end># number of GPU check (positive integers) <if_stmt>(args.n_GPU<le>0)<block_start>print('Number of GPUs must be positive!')<line_sep>sys.exit(1)<block_end># geodesic shooting step check (positive integers) <if_stmt>(args.shoot_steps<l>0)<block_start>print('Shooting steps (--shoot-steps) is negative. Using model default step.')<block_end># geodesic shooting step check (positive integers) <if_stmt>(args.samples<l>1)<block_start>print('Number of samples (--samples) is smaller than 1. Using model default step.')<block_end><block_end>#enddef <def_stmt>create_net args network_config<block_start>net_single=prediction_network.net(network_config['network_feature']).cuda()<line_sep>net_single.load_state_dict(network_config['state_dict'])<if_stmt>(args.n_GPU<g>1)<block_start>device_ids=range(0 args.n_GPU)<line_sep>net=torch.nn.DataParallel(net_single device_ids=device_ids).cuda()<block_end><else_stmt><block_start>net=net_single<block_end>net.train()<line_sep><return>net<line_sep><block_end>#enddef <def_stmt>preprocess_image image_pyca histeq<block_start>image_np=common.AsNPCopy(image_pyca)<line_sep>nan_mask=np.isnan(image_np)<line_sep>image_np[nan_mask]=0<line_sep>image_np<augdiv>np.amax(image_np)<line_sep># perform histogram equalization if needed <if_stmt>histeq<block_start>image_np[image_np<ne>0]=exposure.equalize_hist(image_np[image_np<ne>0])<block_end><return>image_np<block_end>#perform deformation prediction <def_stmt>predict_image args<block_start><if_stmt>(args.use_CPU_for_shooting)<block_start>mType=ca.MEM_HOST<block_end><else_stmt><block_start>mType=ca.MEM_DEVICE<block_end># load the prediction network predict_network_config=torch.load(args.prediction_parameter)<line_sep>prediction_net=create_net(args predict_network_config)<line_sep>batch_size=args.batch_size<line_sep>patch_size=predict_network_config['patch_size']<line_sep>input_batch=torch.zeros(batch_size 2 patch_size patch_size patch_size).cuda()<line_sep># start prediction <for_stmt>i range(0 len(args.moving_image))<block_start>common.Mkdir_p(os.path.dirname(args.output_prefix[i]))<if_stmt>(args.affine_align)# Perform affine registration to both moving and target image to the ICBM152 atlas space. # Registration is done using Niftireg. <block_start>call(["reg_aladin" "-noSym" "-speeeeed" "-ref" args.atlas "-flo" args.moving_image[i] "-res" args.output_prefix[i]+"moving_affine.nii" "-aff" args.output_prefix[i]+'moving_affine_transform.txt'])<line_sep>call(["reg_aladin" "-noSym" "-speeeeed" "-ref" args.atlas "-flo" args.target_image[i] "-res" args.output_prefix[i]+"target_affine.nii" "-aff" args.output_prefix[i]+'target_affine_transform.txt'])<line_sep>moving_image=common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii" mType)<line_sep>target_image=common.LoadITKImage(args.output_prefix[i]+"target_affine.nii" mType)<block_end><else_stmt><block_start>moving_image=common.LoadITKImage(args.moving_image[i] mType)<line_sep>target_image=common.LoadITKImage(args.target_image[i] mType)<block_end>#preprocessing of the image moving_image_np=preprocess_image(moving_image args.histeq)<line_sep>target_image_np=preprocess_image(target_image args.histeq)<line_sep>grid=moving_image.grid()<line_sep>moving_image_processed=common.ImFromNPArr(moving_image_np mType)<line_sep>target_image_processed=common.ImFromNPArr(target_image_np mType)<line_sep>moving_image.setGrid(grid)<line_sep>target_image.setGrid(grid)<line_sep>predict_transform_space=<false><if_stmt>'matlab_t7'<in>predict_network_config<block_start>predict_transform_space=<true><block_end># run actual prediction prediction_result=util.predict_momentum(moving_image_np target_image_np input_batch batch_size patch_size prediction_net predict_transform_space)<line_sep>m0=prediction_result['image_space']<line_sep>m0_reg=common.FieldFromNPArr(prediction_result['image_space'] mType)<line_sep>registration_result=registration_methods.geodesic_shooting(moving_image_processed target_image_processed m0_reg args.shoot_steps mType predict_network_config)<line_sep>phi=common.AsNPCopy(registration_result['phiinv'])<line_sep>phi_square=np.power(phi 2)<for_stmt>sample_iter range(1 args.samples)<block_start>print(sample_iter)<line_sep>prediction_result=util.predict_momentum(moving_image_np target_image_np input_batch batch_size patch_size prediction_net predict_transform_space)<line_sep>m0<augadd>prediction_result['image_space']<line_sep>m0_reg=common.FieldFromNPArr(prediction_result['image_space'] mType)<line_sep>registration_result=registration_methods.geodesic_shooting(moving_image_processed target_image_processed m0_reg args.shoot_steps mType predict_network_config)<line_sep>phi<augadd>common.AsNPCopy(registration_result['phiinv'])<line_sep>phi_square<augadd>np.power(common.AsNPCopy(registration_result['phiinv']) 2)<block_end>m0_mean=np.divide(m0 args.samples)<line_sep>m0_reg=common.FieldFromNPArr(m0_mean mType)<line_sep>registration_result=registration_methods.geodesic_shooting(moving_image_processed target_image_processed m0_reg args.shoot_steps mType predict_network_config)<line_sep>phi_mean=registration_result['phiinv']<line_sep>phi_var=np.divide(phi_square args.samples)-np.power(np.divide(phi args.samples) 2)<line_sep>#save result common.SaveITKImage(registration_result['I1'] args.output_prefix[i]+"I1.mhd")<line_sep>common.SaveITKField(phi_mean args.output_prefix[i]+"phiinv_mean.mhd")<line_sep>common.SaveITKField(common.FieldFromNPArr(phi_var mType) args.output_prefix[i]+"phiinv_var.mhd")<block_end><block_end>#enddef <if_stmt>__name__<eq>'__main__'<block_start>check_args(args)<line_sep>predict_image(args)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>..base BaseSKI<import_from_stmt>tods.detection_algorithm.PyodSOD SODPrimitive<class_stmt>SODSKI(BaseSKI)<block_start><def_stmt>__init__ self **hyperparams<block_start>super().__init__(primitive=SODPrimitive **hyperparams)<line_sep>self.fit_available=<true><line_sep>self.predict_available=<true><line_sep>self.produce_available=<false><block_end><block_end>
# Generated by Django 3.0.5 on 2020-07-26 15:45 <import_from_stmt>django.db migrations<def_stmt>update_show_featured apps schema_editor<block_start>Event=apps.get_model("event" "Event")<line_sep>EventSettings=apps.get_model("event" "Event_SettingsStore")<for_stmt>event Event.objects.all()<block_start>old_value=EventSettings.objects.filter(object=event key="show_sneak_peek").first()<if_stmt>old_value<and>old_value.value<eq>"False"<block_start>EventSettings.objects.create(object=event key="show_featured" value="never" )<block_end><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("event" "0022_auto_20200124_1213") ]<line_sep>operations=[migrations.RunPython(update_show_featured migrations.RunPython.noop) ]<block_end>
<import_stmt>dash<import_stmt>dash_bio<as>dashbio<import_stmt>dash_html_components<as>html<import_stmt>dash_core_components<as>dcc<line_sep>external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css']<line_sep>app=dash.Dash(__name__ external_stylesheets=external_stylesheets)<line_sep>app.layout=html.Div(['Select which chromosomes to display on the ideogram below:' dcc.Dropdown(id='displayed-chromosomes' options=[{'label':str(i) 'value':str(i)}<for>i range(1 23)] multi=<true> value=[str(i)<for>i range(1 23)]) dashbio.Ideogram(id='my-dashbio-ideogram') html.Div(id='ideogram-rotated')])<line_sep>@app.callback(dash.dependencies.Output('my-dashbio-ideogram' 'chromosomes') [dash.dependencies.Input('displayed-chromosomes' 'value')])<def_stmt>update_ideogram value<block_start><return>value<block_end>@app.callback(dash.dependencies.Output('ideogram-rotated' 'children') [dash.dependencies.Input('my-dashbio-ideogram' 'rotated')])<def_stmt>update_ideogram_rotated rot<block_start><return>'You have {} selected a chromosome.'.format(''<if>rot<else>'not')<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run_server(debug=<true>)<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. <import_stmt>json<import_stmt>unittest<import_stmt>unittest.mock<as>mock<import_stmt>random<import_from_stmt>tests.common async_test<import_from_stmt>cdm.storage.github GithubAdapter<class_stmt>GithubStorageAdapterTestCase(unittest.TestCase)<block_start><def_stmt>test_make_corpus_path self<block_start>adapter=GithubAdapter()<line_sep>adapter.timeout=2000<line_sep>adapter.maximum_timeout=5000<line_sep>adapter.number_of_retries=0<line_sep># Valid path. self.assertEqual(adapter.create_corpus_path('https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json') '/dir1/dir2/file.json')<line_sep># Invalid path. self.assertIsNone(adapter.create_corpus_path('https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocument/dir1/dir2/file.json'))<block_end>@mock.patch('cdm.utilities.network.cdm_http_client.urllib.request.urlopen' new_callable=mock.mock_open read_data=json.dumps({'Ḽơᶉëᶆ':'ȋṕšᶙṁ'}).encode())@async_test<async_keyword><def_stmt>test_read self mock_urlopen<block_start>adapter=GithubAdapter()<line_sep>adapter.timeout=2000<line_sep>adapter.maximum_timeout=5000<line_sep>raw_data=<await>adapter.read_async('/dir1/dir2/file.json')<line_sep>data=json.loads(raw_data)<line_sep># Verify URL. self.assertEqual(mock_urlopen.call_args[0][0].full_url 'https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json')<line_sep>self.assertEqual(data {'Ḽơᶉëᶆ':'ȋṕšᶙṁ'})<block_end><block_end># Verify data. <if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
file1=open("./logs/pythonlog.txt" 'r+')<line_sep>avg1=0.0<line_sep>lines1=0.0<for_stmt>line file1<block_start>lines1=lines1+1.0<line_sep>avg1=(avg1+float(line))<block_end>avg1=avg1/lines1<line_sep>print(avg1 "for Python with" lines1 "lines")<line_sep>file2=open("./logs/clog.txt" 'r+')<line_sep>avg2=0.0<line_sep>lines2=0.0<for_stmt>line file2<block_start>lines2=lines2+1.0<line_sep>avg2=(avg2+float(line))<block_end>avg2=avg2/lines2<line_sep>print(avg2 "for C with" lines2 "lines")<line_sep>file3=open("./logs/cpplog.txt" 'r+')<line_sep>avg3=0.0<line_sep>lines3=0.0<for_stmt>line file3<block_start>lines3=lines3+1.0<line_sep>avg3=(avg3+float(line))<block_end>avg3=avg3/lines3<line_sep>print(avg3 "for C++ with" lines3 "lines")<line_sep>file4=open("./logs/javalog.txt" 'r+')<line_sep>avg4=0.0<line_sep>lines4=0.0<for_stmt>line file4<block_start>lines4=lines4+1.0<line_sep>avg4=(avg4+float(line))<block_end>avg4=avg4/lines4<line_sep>print(avg4 "for Java with" lines4 "lines")<line_sep>word=""<while_stmt>(word.lower()<ne>"y"<and>word.lower()<ne>"n")<block_start>word=input("Do you want to wipe the previous log? [Y/N]")<if_stmt>(word.lower()<eq>"y")<block_start>file1.truncate(0)<line_sep>file3.truncate(0)<line_sep>file2.truncate(0)<line_sep>file4.truncate(0)<block_end><block_end>print("Done.")<line_sep>file4.close()<line_sep>file3.close()<line_sep>file2.close()<line_sep>file1.close()<line_sep>
# Copyright 2021 The TensorFlow Ranking Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for Keras Estimator."""<import_stmt>os<import_from_stmt>absl.testing parameterized<import_stmt>tensorflow<as>tf<import_from_stmt>google.protobuf text_format<import_from_stmt>tensorflow_ranking.python data<import_from_stmt>tensorflow_ranking.python.keras estimator<as>estimator_lib<import_from_stmt>tensorflow_ranking.python.keras losses<import_from_stmt>tensorflow_ranking.python.keras metrics<import_from_stmt>tensorflow_ranking.python.keras model<import_from_stmt>tensorflow_ranking.python.keras network<import_from_stmt>tensorflow_serving.apis input_pb2<line_sep>_SIZE='example_list_size'<line_sep>_ELWC_PROTO=text_format.Parse(""" context { features { feature { key: "query_length" value { int64_list { value: 3 } } } } } examples { features { feature { key: "unigrams" value { bytes_list { value: "tensorflow" } } } feature { key: "utility" value { float_list { value: 0.0 } } } feature { key: "dense_feature" value { float_list { value: -0.5 value: 0.5 } } } feature { key: "doc_weight" value { float_list { value: 0.0 } } } } } examples { features { feature { key: "unigrams" value { bytes_list { value: ["learning", "to", "rank"] } } } feature { key: "utility" value { float_list { value: 1.0 } } } feature { key: "dense_feature" value { float_list { value: 0.5 value: 0.5 } } } feature { key: "doc_weight" value { float_list { value: 1.0 } } } } } """ input_pb2.ExampleListWithContext())<line_sep>_LABEL_FEATURE='utility'<line_sep>_PADDING_LABEL=-1.<line_sep>_EXAMPLE_WEIGHT_FEATURE='doc_weight'<def_stmt>_get_feature_columns <block_start><def_stmt>_normalizer_fn t<block_start><return>2<times>t<block_end>context_feature_columns={'query_length':tf.feature_column.numeric_column('query_length' shape=(1 ) default_value=0 dtype=tf.int64 normalizer_fn=_normalizer_fn)}<line_sep>example_feature_columns={'utility':tf.feature_column.numeric_column('utility' shape=(1 ) default_value=_PADDING_LABEL dtype=tf.float32) 'unigrams':tf.feature_column.embedding_column(tf.feature_column.categorical_column_with_vocabulary_list('unigrams' vocabulary_list=['ranking' 'regression' 'classification' 'ordinal']) dimension=10) 'dense_feature':tf.feature_column.numeric_column('dense_feature' shape=(2 ) default_value=0.0 dtype=tf.float32)}<line_sep>custom_objects={'_normalizer_fn':_normalizer_fn}<line_sep><return>context_feature_columns example_feature_columns custom_objects<block_end><def_stmt>_get_example_weight_feature_column <block_start><return>tf.feature_column.numeric_column(_EXAMPLE_WEIGHT_FEATURE dtype=tf.float32 default_value=1.)<block_end># This network needs actual layers, otherwise the estimator training fails. <class_stmt>_DummyUnivariateRankingNetwork(network.UnivariateRankingNetwork)<block_start>"""Dummy univariate ranking network with a simple scoring function."""<def_stmt>__init__ self context_feature_columns=<none> example_feature_columns=<none> name='dummy_ranking_network' **kwargs<block_start>super(_DummyUnivariateRankingNetwork self).__init__(context_feature_columns=context_feature_columns example_feature_columns=example_feature_columns name=name **kwargs)<line_sep>self._score_layer=tf.keras.layers.Dense(units=1)<block_end><def_stmt>score self context_features=<none> example_features=<none> training=<true><block_start>example_input=[tf.keras.layers.Flatten()(example_features[name])<for>name sorted(self.example_feature_columns)]<line_sep><return>self._score_layer(tf.concat(example_input axis=1))<block_end><block_end><class_stmt>KerasModelToEstimatorTest(tf.test.TestCase parameterized.TestCase)<block_start><def_stmt>setUp self<block_start>super(KerasModelToEstimatorTest self).setUp()<line_sep>(context_feature_columns example_feature_columns custom_objects)=_get_feature_columns()<line_sep>self._context_feature_columns=context_feature_columns<line_sep>self._example_feature_columns=example_feature_columns<line_sep># Remove label feature from example feature column. <del_stmt>self._example_feature_columns[_LABEL_FEATURE]<line_sep>self._custom_objects=custom_objects<line_sep>self._network=_DummyUnivariateRankingNetwork(context_feature_columns=self._context_feature_columns example_feature_columns=self._example_feature_columns)<line_sep>self._loss=losses.get(losses.RankingLossKey.SOFTMAX_LOSS reduction=tf.compat.v2.losses.Reduction.SUM_OVER_BATCH_SIZE)<line_sep>self._eval_metrics=metrics.default_keras_metrics()<line_sep>self._optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1)<line_sep>self._config=tf.estimator.RunConfig(keep_checkpoint_max=2 save_checkpoints_secs=2)<line_sep>self._data_file=os.path.join(tf.compat.v1.test.get_temp_dir() 'test_elwc.tfrecord')<line_sep>serialized_elwc_list=[_ELWC_PROTO.SerializeToString() ]<times>20<if_stmt>tf.io.gfile.exists(self._data_file)<block_start>tf.io.gfile.remove(self._data_file)<block_end><with_stmt>tf.io.TFRecordWriter(self._data_file)<as>writer<block_start><for_stmt>serialized_elwc serialized_elwc_list<block_start>writer.write(serialized_elwc)<block_end><block_end><block_end><def_stmt>tearDown self<block_start>super(KerasModelToEstimatorTest self).tearDown()<if_stmt>tf.io.gfile.exists(self._data_file)<block_start>tf.io.gfile.remove(self._data_file)<block_end>self._data_file=<none><block_end><def_stmt>_make_input_fn self weights_feature_name=<none><block_start>"""Return an input function, serves weights defined in weights_feature_name. Args: weights_feature_name: (str) A string defines the weights feature in dataset. None if no weights is used. Returns: A function serves features and labels. Weights will be served in features. """<def_stmt>_input_fn <block_start>context_feature_columns,example_feature_columns,_=(_get_feature_columns())<line_sep>context_feature_spec=tf.feature_column.make_parse_example_spec(list(context_feature_columns.values()))<line_sep>label_column=tf.feature_column.numeric_column(_LABEL_FEATURE dtype=tf.float32 default_value=_PADDING_LABEL)<line_sep>weight_column=(_get_example_weight_feature_column()<if>weights_feature_name<eq>_EXAMPLE_WEIGHT_FEATURE<else><none>)<line_sep>example_fc_list=(list(example_feature_columns.values())+[label_column]+([weight_column]<if>weight_column<else>[]))<line_sep>example_feature_spec=tf.feature_column.make_parse_example_spec(example_fc_list)<line_sep>dataset=data.build_ranking_dataset(file_pattern=self._data_file data_format=data.ELWC batch_size=10 context_feature_spec=context_feature_spec example_feature_spec=example_feature_spec list_size=2 reader=tf.data.TFRecordDataset size_feature_name=_SIZE)<line_sep>features=tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()<line_sep>label=tf.squeeze(features.pop(_LABEL_FEATURE) axis=2)<line_sep><return>features label<block_end><return>_input_fn<block_end><def_stmt>test_model_to_estimator_missing_custom_objects self<block_start>keras_model=model.create_keras_model(network=self._network loss=self._loss metrics=self._eval_metrics optimizer=self._optimizer size_feature_name=_SIZE)<line_sep>estimator=estimator_lib.model_to_estimator(model=keras_model config=self._config custom_objects=<none>)<line_sep>self.assertIsInstance(estimator tf.compat.v1.estimator.Estimator)<line_sep># Train and export model. train_spec=tf.estimator.TrainSpec(input_fn=self._make_input_fn() max_steps=1)<line_sep>eval_spec=tf.estimator.EvalSpec(name='eval' input_fn=self._make_input_fn() steps=10)<with_stmt>self.assertRaises(AttributeError)<block_start>tf.estimator.train_and_evaluate(estimator train_spec eval_spec)<block_end><block_end>@parameterized.named_parameters(('without_weights' <none> 'predict') ('with_example_weights' _EXAMPLE_WEIGHT_FEATURE 'predict') ('pointwise_inference' <none> 'regress'))<def_stmt>test_model_to_estimator self weights_feature_name serving_default<block_start>keras_model=model.create_keras_model(network=self._network loss=self._loss metrics=self._eval_metrics optimizer=self._optimizer size_feature_name=_SIZE)<line_sep>estimator=estimator_lib.model_to_estimator(model=keras_model config=self._config weights_feature_name=weights_feature_name custom_objects=self._custom_objects serving_default=serving_default)<line_sep>self.assertIsInstance(estimator tf.compat.v1.estimator.Estimator)<line_sep># Train and export model. train_spec=tf.estimator.TrainSpec(input_fn=self._make_input_fn(weights_feature_name) max_steps=1)<line_sep>eval_spec=tf.estimator.EvalSpec(name='eval' input_fn=self._make_input_fn(weights_feature_name) steps=10)<line_sep>tf.estimator.train_and_evaluate(estimator train_spec eval_spec)<line_sep>context_feature_spec=tf.feature_column.make_parse_example_spec(self._context_feature_columns.values())<line_sep>example_feature_spec=tf.feature_column.make_parse_example_spec(self._example_feature_columns.values())<def_stmt>_make_serving_input_fn serving_default<block_start><if_stmt>serving_default<eq>'predict'<block_start><return>data.build_ranking_serving_input_receiver_fn(data.ELWC context_feature_spec=context_feature_spec example_feature_spec=example_feature_spec size_feature_name=_SIZE)<block_end><else_stmt><block_start><def_stmt>pointwise_serving_fn <block_start>serialized=tf.compat.v1.placeholder(dtype=tf.string shape=[<none>] name='input_ranking_tensor')<line_sep>receiver_tensors={'input_ranking_data':serialized}<line_sep>features=data.parse_from_tf_example(serialized context_feature_spec=context_feature_spec example_feature_spec=example_feature_spec size_feature_name=_SIZE)<line_sep><return>tf.estimator.export.ServingInputReceiver(features receiver_tensors)<block_end><return>pointwise_serving_fn<block_end><block_end>serving_input_receiver_fn=_make_serving_input_fn(serving_default)<line_sep>export_dir=os.path.join(tf.compat.v1.test.get_temp_dir() 'export')<line_sep>estimator.export_saved_model(export_dir serving_input_receiver_fn)<line_sep># Confirm model ran and created checkpoints and saved model. final_ckpt_path=os.path.join(estimator.model_dir 'model.ckpt-1.meta')<line_sep>self.assertTrue(tf.io.gfile.exists(final_ckpt_path))<line_sep>saved_model_pb=os.path.join(export_dir tf.io.gfile.listdir(export_dir)[0] 'saved_model.pb')<line_sep>self.assertTrue(tf.io.gfile.exists(saved_model_pb))<block_end><def_stmt>test_model_to_estimator_wrong_weights_name self<block_start>keras_model=model.create_keras_model(network=self._network loss=self._loss metrics=self._eval_metrics optimizer=self._optimizer size_feature_name=_SIZE)<line_sep>estimator=estimator_lib.model_to_estimator(model=keras_model config=self._config weights_feature_name='weights' custom_objects=self._custom_objects)<line_sep>self.assertIsInstance(estimator tf.compat.v1.estimator.Estimator)<line_sep># Train and export model. train_spec=tf.estimator.TrainSpec(input_fn=self._make_input_fn() max_steps=1)<line_sep>eval_spec=tf.estimator.EvalSpec(name='eval' input_fn=self._make_input_fn() steps=10)<with_stmt>self.assertRaises(ValueError)<block_start>tf.estimator.train_and_evaluate(estimator train_spec eval_spec)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>pytest<import_stmt>tensorrt<as>trt<import_from_stmt>polygraphy mod<import_from_stmt>polygraphy.backend.trt Profile network_from_onnx_bytes<import_from_stmt>tests.models.meta ONNX_MODELS<line_sep>@pytest.fixture(scope="session")<def_stmt>dynamic_identity_network <block_start>builder,network,parser=network_from_onnx_bytes(ONNX_MODELS["dynamic_identity"].loader)<with_stmt>builder network parser<block_start><yield>builder network parser<block_end><block_end><class_stmt>TestProfile(object)<block_start><def_stmt>test_can_add self<block_start>profile=Profile()<line_sep>min,opt,max=(1 1) (2 2) (4 4)<assert_stmt>profile.add("input" min=min opt=opt max=max)<is>profile<line_sep>shape_tuple=profile["input"]<assert_stmt>shape_tuple.min<eq>min<assert_stmt>shape_tuple.opt<eq>opt<assert_stmt>shape_tuple.max<eq>max<block_end>@pytest.mark.skipif(mod.version(trt.__version__)<l>mod.version("7.0") reason="Unsupported for TRT 6")<def_stmt>test_fill_defaults_does_not_overwrite self dynamic_identity_network<block_start>_,network,_=dynamic_identity_network<line_sep>profile=Profile().add("X" (1 1 1 1) (1 1 2 2) (1 1 3 3))<line_sep>profile.fill_defaults(network)<is>profile<assert_stmt>profile["X"].min<eq>(1 1 1 1)<assert_stmt>profile["X"].opt<eq>(1 1 2 2)<assert_stmt>profile["X"].max<eq>(1 1 3 3)<block_end>@pytest.mark.skipif(mod.version(trt.__version__)<l>mod.version("7.0") reason="Unsupported for TRT 6")<def_stmt>test_to_trt self dynamic_identity_network<block_start>builder,network,_=dynamic_identity_network<line_sep>profile=Profile().add("X" (1 2 1 1) (1 2 2 2) (1 2 4 4))<line_sep>trt_profile=profile.to_trt(builder network)<line_sep>trt_profile.get_shape("X")<eq>((1 2 1 1) (1 2 2 2) (1 2 4 4))<block_end><block_end>
# # Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ # Written by <NAME> <<EMAIL>>, # <NAME> <<EMAIL>> # <import_stmt>unittest<import_stmt>torch<import_from_stmt>fast_transformers.attention.attention_layer AttentionLayer<class_stmt>TestAttentionLayer(unittest.TestCase)<block_start><def_stmt>_assert_sizes_attention self qshape kshape vshape<block_start><def_stmt>inner q k v m1 m2 m3<block_start>self.assertEqual(q.shape qshape)<line_sep>self.assertEqual(k.shape kshape)<line_sep>self.assertEqual(v.shape vshape)<line_sep>N,L,H,E=q.shape<line_sep>_,S,_,D=v.shape<line_sep><return>v.new_zeros((N L H D))<block_end><return>inner<block_end><def_stmt>test_forward self<block_start>att=AttentionLayer(self._assert_sizes_attention((10 5 4 25) (10 8 4 25) (10 8 4 25)) 100 4)<line_sep>v=att(torch.rand(10 5 100) torch.rand(10 8 100) torch.rand(10 8 100) <none> <none> <none>)<line_sep>self.assertEqual(v.shape (10 5 100))<line_sep>att=AttentionLayer(self._assert_sizes_attention((10 5 4 32) (10 8 4 32) (10 8 4 64)) 100 4 d_keys=32 d_values=64)<line_sep>v=att(torch.rand(10 5 100) torch.rand(10 8 100) torch.rand(10 8 100) <none> <none> <none>)<line_sep>self.assertEqual(v.shape (10 5 100))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<import_stmt>pickle<import_from_stmt>multiprocessing current_process<import_from_stmt>multiprocessing.spawn import_main_path<line_sep>data=pickle.load(sys.stdin.buffer)<line_sep>current_process().authkey=data['authkey']<line_sep>sys.path=data['path']<line_sep>import_main_path(data['main'])<line_sep>impl=pickle.loads(data['impl'])<import_from_stmt>pulsar.async.concurrency run_actor<line_sep>run_actor(impl)<block_end>
<import_from_stmt>django template<import_from_stmt>django.utils.encoding force_str<import_from_stmt>django.utils.functional keep_lazy<import_from_stmt>django.utils.safestring SafeText mark_safe<import_from_stmt>django.template.defaultfilters stringfilter<line_sep>register=template.Library()<line_sep>_slack_escapes={ord('&'):u'&amp;' ord('<'):u'&lt;' ord('>'):u'&gt;' }<line_sep>@keep_lazy(str SafeText)@register.filter(is_safe=<true>)@stringfilter<def_stmt>escapeslack value<block_start>""" Returns the given text with ampersands and angle brackets encoded for use in the Slack API, per the Slack API documentation: <https://api.slack.com/docs/formatting#how_to_escape_characters> This is based on django.template.defaultfilters.escapejs. """<line_sep><return>mark_safe(force_str(value).translate(_slack_escapes))<block_end>
# Copyright 2018 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """Tests TestEnvironmentDecorator."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>tempfile<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>six<import_from_stmt>PIL Image<import_from_stmt>python.tests.utils test_environment_decorator<line_sep>_OBSERVATION_SPEC=[{'name':'RGB_INTERLEAVED' 'shape':[1 2 3]}]<class_stmt>EnvironmentStub(object)<block_start><def_stmt>__init__ self<block_start>self.test_observation_spec=_OBSERVATION_SPEC<line_sep>self.test_observations=[{'RGB_INTERLEAVED':np.array([[[255 0 0] [128 0 0] [0 0 255]] [[0 255 0] [128 0 0] [0 255 0]]] dtype=np.uint8)} {'RGB_INTERLEAVED':np.array([[[0 255 0] [0 128 0]]] dtype=np.uint8)} {'RGB_INTERLEAVED':np.array([[[0 0 255] [0 0 128]]] dtype=np.uint8)} ]<line_sep>self.test_rewards=[0 1 2 3]<line_sep>self._frame_index=0<line_sep>self.last_actions=<none><line_sep>self.last_steps=<none><line_sep>self.events_return=<none><line_sep>self.is_running_return=<none><line_sep>self.action_spec_return=<none><line_sep>self.reset_return=<none><block_end><def_stmt>step self actions steps<block_start>self.last_actions=actions<line_sep>self.last_steps=steps<line_sep>self._frame_index<augadd>1<line_sep><return>self.test_rewards[self._frame_index-1]<block_end><def_stmt>is_running self<block_start><return>self.is_running_return<block_end><def_stmt>observations self<block_start><return>self.test_observations[self._frame_index]<block_end><def_stmt>events self<block_start><return>self.events_return<block_end><def_stmt>action_spec self<block_start><return>self.action_spec_return<block_end><def_stmt>observation_spec self<block_start><return>self.test_observation_spec<block_end><def_stmt>reset self **_<block_start>self._frame_index=0<line_sep><return>self.reset_return<block_end><def_stmt>num_steps self<block_start><return>self._frame_index<block_end><block_end><class_stmt>TestEnvironmentDecoratorTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self._env=EnvironmentStub()<line_sep>self._decorator=test_environment_decorator.TestEnvironmentDecorator(self._env)<block_end><def_stmt>testStepIsCalled self<block_start>actions=object()<line_sep>steps=3<line_sep>self.assertEqual(self._decorator.step(actions steps) self._env.test_rewards[0])<line_sep>self.assertEqual(self._env.last_actions actions)<line_sep>self.assertEqual(self._env.last_steps steps)<block_end><def_stmt>testAccumulatedReward self<block_start>self._decorator.step(<none> 1)<line_sep>self._decorator.step(<none> 1)<line_sep>self.assertEqual(self._decorator.accumulated_reward() np.sum(self._env.test_rewards[0:2]))<block_end><def_stmt>testResetAccumulatedReward self<block_start>self._decorator.step(<none> 1)<line_sep>self._decorator.reset()<line_sep>self.assertEqual(self._decorator.accumulated_reward() 0)<block_end><def_stmt>testRewardHistory self<block_start>self._decorator.step(<none> 1)<line_sep>self._decorator.step(<none> 1)<line_sep>six.assertCountEqual(self self._decorator.reward_history() self._env.test_rewards[0:2])<block_end><def_stmt>testResetRewardHistory self<block_start>self._decorator.step(<none> 1)<line_sep>self._decorator.reset()<line_sep>six.assertCountEqual(self self._decorator.reward_history() [])<block_end><def_stmt>testAccumulatedEvents self<block_start>events=['event1' 'event2' 'event3']<line_sep>self._env.events_return=events[0]<line_sep>self._decorator.reset()<line_sep>self._env.events_return=events[1]<line_sep>self._decorator.step(<none> 1)<line_sep>self._env.events_return=events[2]<line_sep>self._decorator.step(<none> 1)<line_sep>six.assertCountEqual(self self._decorator.accumulated_events() events)<block_end><def_stmt>testResetAccumulatedEvents self<block_start>events=['event1' 'event2']<line_sep>self._env.events_return=events[0]<line_sep>self._decorator.step(<none> 1)<line_sep>self._env.events_return=events[1]<line_sep>self._decorator.reset()<line_sep>six.assertCountEqual(self self._decorator.accumulated_events() [events[1]])<block_end><def_stmt>testObservationDelegation self<block_start>self.assertEqual(self._env.test_observations[0] self._decorator.observations())<block_end><def_stmt>testObservationSpecDelegation self<block_start>self.assertEqual(self._env.test_observation_spec self._decorator.observation_spec())<block_end><def_stmt>testNumSteps self<block_start>self._decorator.reset()<line_sep>self.assertEqual(self._decorator.num_steps() 0)<line_sep>self._decorator.step(<none> <none>)<line_sep>self.assertEqual(self._decorator.num_steps() 1)<block_end><def_stmt>testMethodDelegation self<block_start>method_names=['is_running' 'events' 'action_spec' 'reset']<for_stmt>name method_names<block_start>result=object()<line_sep>setattr(self._env name+'_return' result)<line_sep>self.assertEqual(getattr(self._decorator name)() result)<block_end><block_end><def_stmt>testSavingFrames self<block_start>self._decorator.reset()<line_sep>self._decorator.step(<none> 1)<line_sep>self._decorator.step(<none> 1)<line_sep>temp_dir=tempfile.mkdtemp()<line_sep>self._decorator.save_frames(temp_dir)<for_stmt>index,observation enumerate(self._env.test_observations)<block_start>expected_image=observation['RGB_INTERLEAVED']<line_sep>image_file_name=os.path.join(temp_dir 'frame{0}.png'.format(index))<line_sep>image=np.asarray(Image.open(image_file_name))<line_sep>self.assertTrue(np.array_equal(image expected_image))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>torch<line_sep># import torch.nn as nn # import torch.nn.functional as F <import_stmt>numpy<as>np<import_from_stmt>torch.autograd Variable<line_sep># import torchwordemb <import_stmt>torch.optim<as>optim<import_stmt>sys<import_stmt>time<import_stmt>gc<import_stmt>pickle<import_stmt>os<import_stmt>models2<as>m<import_stmt>enc_model<as>m3<line_sep># import pandas as pd # from util import * <import_stmt>torch.utils.data<line_sep># from sklearn.metrics import auc # from sklearn import metrics ''' General Training Script for PyTorch Models -- Modified to accommodate more flexible LSTM structure '''<if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--nClassGender" type=int default=2)# Number of classes in gender variable parser.add_argument("--nClassRace" type=int default=25)# Number of classes in race variable parser.add_argument("--nClassEthnic" type=int default=29)# Number of classes in ethnic variable parser.add_argument("--modelName" default="Enc_CNN_LSTM")<line_sep>parser.add_argument("--dimLSTM" type=int default=128)# LSTM dimension parser.add_argument("--dimLSTM_num" type=int default=128)# LSTM dimension for numericals parser.add_argument("--p_dropOut" type=float default=.5)<line_sep>parser.add_argument("--batch_norm" action='store_true')<line_sep>parser.add_argument("--bidir" action='store_true')<line_sep>parser.add_argument("--train_embed" action='store_true')<line_sep>parser.add_argument("--rnnType" default="GRU")<line_sep>parser.add_argument("--enc_len" type=int default=20)<line_sep>parser.add_argument("--doc_len" type=int default=1000)<line_sep>parser.add_argument("--n_iter" type=int default=10)<line_sep>parser.add_argument("--lr" type=float default=0.001)<line_sep>parser.add_argument("--lr_decay3" type=int default=10)# Decay learning rate every lr_decay3 epochs parser.add_argument("--i" type=int default=1)# Index of the element in the parameter set to be tuned parser.add_argument("--batchSizePos" type=int default=16)<line_sep>parser.add_argument("--batchSizeNeg" type=int default=0)<line_sep>parser.add_argument("--num_workers" type=int default=4)<line_sep>parser.add_argument("--flg_cuda" action='store_true')<line_sep>parser.add_argument("--emb_dim" type=int default=300)# Embedding dimension parser.add_argument("--logInterval" type=int default=1)# Print test accuracy every n epochs parser.add_argument("--flgSave" action='store_true')<line_sep>parser.add_argument("--savePath" default='./')<line_sep>parser.add_argument("--filters" type=int default=128)<line_sep>parser.add_argument("--nK" type=int default=3)# Number of kernels parser.add_argument("--randSeed" type=int default=42)<line_sep>parser.add_argument("--posThres" type=float default=0.5)<line_sep>parser.add_argument("--inputPath" default="/ifs/data/razavianlab/encSeq_input/dim50/")<line_sep>parser.add_argument("--alpha_L1" type=float default=0.0)<line_sep>parser.add_argument("--randn_std" type=float default=<none>)<line_sep>parser.add_argument("--flgBias" action='store_true')<line_sep>parser.add_argument("--flg_gradClip" action='store_true')<line_sep>parser.add_argument("--flg_AllLSTM" action='store_true')<line_sep>parser.add_argument("--flg_useNum" action='store_true')<line_sep>args=parser.parse_args()<line_sep>torch.manual_seed(args.randSeed)# For reproducible results <if_stmt>args.flgSave<block_start><if_stmt><not>os.path.isdir(args.savePath)<block_start>os.mkdir(args.savePath)<block_end><block_end># args.d = ['chf', 'kf', 'str'][args.i -1] # lsAlpha = [0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1] # args.alpha_L1 = lsAlpha[args.i -1] <if_stmt>args.flg_AllLSTM<block_start>dimLSTM=args.dimLSTM<times>args.enc_len<block_end><else_stmt><block_start>dimLSTM=args.dimLSTM<block_end>lsDim=[[dimLSTM 256 3] [dimLSTM 512 256 3]][args.i-1]<line_sep>print('General parameters: ' args)<line_sep>unique=<false><line_sep>print("Loading Data")<line_sep># if args.modelName in ['Enc_SumLSTM', 'Enc_CNN_LSTM']: embedding=pickle.load(open(args.inputPath+'embedding.p' 'rb'))<line_sep>embedding=torch.from_numpy(embedding).float()<if_stmt>args.modelName<in>['Enc_SumLSTM' 'Enc_CNN_LSTM' 'DemoLab' 'Enc_CNN_LSTM_DemoLab']<block_start>trainset_pos=m3.encDataset(args.inputPath 'dfTrainPos.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<line_sep>trainset_neg=m3.encDataset(args.inputPath 'dfTrainNeg.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<line_sep>testset=m3.encDataset(args.inputPath 'dfDev.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<block_end><else_stmt><block_start>trainset_pos=m3.staticDataset(args.inputPath 'dfTrainPos.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<line_sep>trainset_neg=m3.staticDataset(args.inputPath 'dfTrainNeg.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<line_sep>testset=m3.staticDataset(args.inputPath 'dfDev.json' args.nClassGender args.nClassRace args.nClassEthnic transform=m3.padOrTruncateToTensor(args.enc_len args.doc_len))<block_end>print('To Loader')<if_stmt>args.flg_cuda<block_start>train_loader_pos=torch.utils.data.DataLoader(trainset_pos batch_size=args.batchSizePos shuffle=<true> pin_memory=<true>)<line_sep>test_loader=torch.utils.data.DataLoader(testset batch_size=args.batchSizePos+args.batchSizeNeg shuffle=<false> pin_memory=<true>)<if_stmt>trainset_neg<is><not><none><block_start>train_loader_neg=torch.utils.data.DataLoader(trainset_neg batch_size=args.batchSizeNeg shuffle=<true> pin_memory=<true>)<block_end><block_end><else_stmt><block_start>train_loader_pos=torch.utils.data.DataLoader(trainset_pos batch_size=args.batchSizePos shuffle=<true> pin_memory=<false>)<line_sep>test_loader=torch.utils.data.DataLoader(testset batch_size=args.batchSizePos+args.batchSizeNeg shuffle=<false> pin_memory=<false>)<if_stmt>trainset_neg<is><not><none><block_start>train_loader_neg=torch.utils.data.DataLoader(trainset_neg batch_size=args.batchSizeNeg shuffle=<true> pin_memory=<false>)<block_end><block_end>model_paras={'enc_len':args.enc_len 'doc_len':args.doc_len 'flg_updateEmb':args.train_embed 'flg_bn':args.batch_norm 'rnnType':args.rnnType 'bidir':args.bidir 'p_dropOut':args.p_dropOut 'lsDim':lsDim 'dimLSTM':args.dimLSTM 'flg_cuda':args.flg_cuda 'filters':args.filters 'Ks':[i+1<for>i range(args.nK)] 'randn_std':args.randn_std 'lastRelu':<true> 'flgBias':args.flgBias 'flg_AllLSTM':args.flg_AllLSTM 'flg_useNum':args.flg_useNum 'dimLSTM_num':args.dimLSTM_num}<line_sep>print('Model parameters: ' model_paras)<if_stmt>args.modelName<in>['Enc_SumLSTM' 'Enc_CNN_LSTM' 'DemoLab' 'Enc_CNN_LSTM_DemoLab']<block_start>model=getattr(m3 args.modelName)(model_paras embedding)<block_end><else_stmt><block_start><import_from_stmt>argparse Namespace<line_sep>static_model_args=Namespace()<line_sep>static_model_args.dropout=args.p_dropOut<line_sep>static_model_args.batch_norm=args.batch_norm<line_sep>static_model_args.kernels=args.nK<line_sep>static_model_args.bidir=args.bidir<line_sep>static_model_args.train_embed=args.train_embed<line_sep>static_model_args.max_len=2000<line_sep>static_model_args.n_out=3<line_sep>static_model_args.h=args.filters<line_sep>static_model_args.n_demo_feat=208<line_sep>model=getattr(m args.modelName)(embedding static_model_args)<block_end><if_stmt>args.flg_cuda<block_start>model=model.cuda()<block_end>print(model)<line_sep>opt=optim.Adam(model.params lr=args.lr)<line_sep>print("Beginning Training")<line_sep>train_paras={'n_iter':args.n_iter 'log_interval':[args.logInterval 1000] 'flg_cuda':args.flg_cuda 'lr_decay':[args.lr 0.9 args.lr_decay3 1e-5] 'flgSave':args.flgSave 'savePath':args.savePath 'posThres':args.posThres 'alpha_L1':args.alpha_L1 'flg_gradClip':args.flg_gradClip}<line_sep>m=m3.trainModel(train_paras train_loader_pos test_loader model opt train_loader_neg=train_loader_neg)<line_sep>_,lsTrainAccuracy,lsTestAccuracy=m.run()<line_sep>testAuc=[np.mean(x[1])<for>x lsTestAccuracy]<line_sep>print('Test AUC max: %.3f'%(max(testAuc)))<line_sep>print('Test AUC final: %.3f'%(testAuc[-1]))<line_sep>stopIdx=min(testAuc.index(max(testAuc))<times>args.logInterval args.n_iter)<line_sep>print('Stop at: %d'%(stopIdx))<block_end>
<import_stmt>logging<import_stmt>uuid<import_from_stmt>typing Any<import_stmt>pytest<import_stmt>requests<import_stmt>test_helpers<import_from_stmt>dcos_test_utils marathon<import_from_stmt>dcos_test_utils.dcos_api DcosApiSession<line_sep>__maintainer__='kensipe'<line_sep>__contact__='<EMAIL>'<line_sep>log=logging.getLogger(__name__)<def_stmt>deploy_test_app_and_check dcos_api_session:DcosApiSession app:dict test_uuid:str<arrow><none><block_start>"""This method deploys the test server app and then pings its /operating_environment endpoint to retrieve the container user running the task. In a mesos container, this will be the marathon user In a docker container this user comes from the USER setting from the app's Dockerfile, which, for the test application is the default, root """<line_sep>expanded_config=test_helpers.get_expanded_config()<line_sep>default_os_user='nobody'<if>expanded_config.get('security')<eq>'strict'<else>'root'<if_stmt>'container'<in>app<and>app['container']['type']<eq>'DOCKER'<block_start>marathon_user='root'<block_end><else_stmt><block_start>marathon_user=app.get('user' default_os_user)<block_end><with_stmt>dcos_api_session.marathon.deploy_and_cleanup(app)<block_start>service_points=dcos_api_session.marathon.get_app_service_endpoints(app['id'])<line_sep>r=requests.get('http://{}:{}/test_uuid'.format(service_points[0].host service_points[0].port))<if_stmt>r.status_code<ne>200<block_start>msg="Test server replied with non-200 reply: '{0} {1}. "<line_sep>msg<augadd>"Detailed explanation of the problem: {2}"<line_sep><raise>Exception(msg.format(r.status_code r.reason r.text))<block_end>r_data=r.json()<assert_stmt>r_data['test_uuid']<eq>test_uuid<line_sep>r=requests.get('http://{}:{}/operating_environment'.format(service_points[0].host service_points[0].port))<if_stmt>r.status_code<ne>200<block_start>msg="Test server replied with non-200 reply: '{0} {1}. "<line_sep>msg<augadd>"Detailed explanation of the problem: {2}"<line_sep><raise>Exception(msg.format(r.status_code r.reason r.text))<block_end>json_uid=r.json()['uid']<if_stmt>marathon_user<eq>'root'<block_start><assert_stmt>json_uid<eq>0 "App running as root should have uid 0."<block_end><else_stmt><block_start><assert_stmt>json_uid<ne>0 ("App running as {} should not have uid 0.".format(marathon_user))<block_end><block_end><block_end>@pytest.mark.first<def_stmt>test_docker_image_availablity <arrow><none><block_start><assert_stmt>test_helpers.docker_pull_image("debian:stretch-slim") "docker pull failed for image used in the test"<block_end><def_stmt>test_if_marathon_app_can_be_deployed dcos_api_session:DcosApiSession<arrow><none><block_start>"""Marathon app deployment integration test This test verifies that marathon app can be deployed, and that service points returned by Marathon indeed point to the app that was deployed. The application being deployed is a simple http server written in python. Please test_server.py for more details. This is done by assigning an unique UUID to each app and passing it to the docker container as an env variable. After successful deployment, the "GET /test_uuid" request is issued to the app. If the returned UUID matches the one assigned to test - test succeeds. """<line_sep>deploy_test_app_and_check(dcos_api_session *test_helpers.marathon_test_app())<block_end><def_stmt>test_if_docker_app_can_be_deployed dcos_api_session:DcosApiSession<arrow><none><block_start>"""Marathon app inside docker deployment integration test. Verifies that a marathon app inside of a docker daemon container can be deployed and accessed as expected. """<line_sep>deploy_test_app_and_check(dcos_api_session *test_helpers.marathon_test_app(network=marathon.Network.BRIDGE container_type=marathon.Container.DOCKER container_port=9080))<block_end>@pytest.mark.parametrize('healthcheck' [marathon.Healthcheck.HTTP marathon.Healthcheck.MESOS_HTTP ])<def_stmt>test_if_ucr_app_can_be_deployed dcos_api_session:DcosApiSession healthcheck:Any<arrow><none><block_start>"""Marathon app inside ucr deployment integration test. Verifies that a marathon docker app inside of a ucr container can be deployed and accessed as expected. """<line_sep>deploy_test_app_and_check(dcos_api_session *test_helpers.marathon_test_app(container_type=marathon.Container.MESOS healthcheck_protocol=healthcheck))<block_end><def_stmt>test_if_marathon_app_can_be_deployed_with_mesos_containerizer dcos_api_session:DcosApiSession<arrow><none><block_start>"""Marathon app deployment integration test using the Mesos Containerizer This test verifies that a Marathon app using the Mesos containerizer with a Docker image can be deployed. This is done by assigning an unique UUID to each app and passing it to the docker container as an env variable. After successfull deployment, the "GET /test_uuid" request is issued to the app. If the returned UUID matches the one assigned to test - test succeds. When port mapping is available (MESOS-4777), this test should be updated to reflect that. """<line_sep>deploy_test_app_and_check(dcos_api_session *test_helpers.marathon_test_app(container_type=marathon.Container.MESOS))<block_end><def_stmt>test_if_marathon_app_can_be_deployed_with_nfs_csi_volume dcos_api_session:DcosApiSession<arrow><none><block_start>"""Marathon app deployment integration test using an NFS CSI volume. This test verifies that a Marathon app can be deployed which attaches to an NFS volume provided by the NFS CSI plugin. In order to accomplish this, we must first set up an NFS share on one agent. """<line_sep># We will run an NFS server on one agent and an app on another agent to # verify CSI volume functionality. <if_stmt>len(dcos_api_session.slaves)<l>2<block_start>pytest.skip("CSI Volume Tests require a minimum of two agents.")<block_end>expanded_config=test_helpers.get_expanded_config()<if_stmt>expanded_config.get('security')<eq>'strict'<block_start>pytest.skip('Cannot setup NFS server as root user with EE strict mode enabled')<block_end>test_uuid=uuid.uuid4().hex<line_sep>hosts=dcos_api_session.slaves[0] dcos_api_session.slaves[1]<line_sep># A helper to run a Metronome job as root to clean up the NFS share on an agent. # We define this here so that it can be used during error handling. <def_stmt>cleanup_nfs <arrow><none><block_start>cleanup_command=""" sudo systemctl stop nfs-server && \ echo '' | sudo tee /etc/exports && \ sudo systemctl restart nfs-utils && \ sudo exportfs -arv && \ sudo rm -rf /var/lib/dcos-nfs-shares/test-volume-001 """<line_sep>cleanup_job={'description':'Clean up NFS share' 'id':'nfs-share-cleanup-{}'.format(test_uuid) 'run':{'cmd':cleanup_command 'cpus':0.5 'mem':256 'disk':32 'user':'root' 'restart':{'policy':'ON_FAILURE'} 'placement':{'constraints':[{'attribute':'@hostname' 'operator':'LIKE' 'value':hosts[0]}]}}}<line_sep>dcos_api_session.metronome_one_off(cleanup_job)<block_end># Run a Metronome job as root to set up the NFS share on an agent. command="""sudo mkdir -p /var/lib/dcos-nfs-shares/test-volume-001 && \ sudo chown -R nobody: /var/lib/dcos-nfs-shares/test-volume-001 && \ sudo chmod 777 /var/lib/dcos-nfs-shares/test-volume-001 && \ echo '/var/lib/dcos-nfs-shares/test-volume-001 *(rw,sync)' | sudo tee /etc/exports && \ sudo systemctl restart nfs-utils && \ sudo exportfs -arv && \ sudo systemctl start nfs-server && \ sudo systemctl enable nfs-server """<line_sep>setup_job={'description':'Set up NFS share' 'id':'nfs-share-setup-{}'.format(test_uuid) 'run':{'cmd':command 'cpus':0.5 'mem':256 'disk':32 'user':'root' 'restart':{'policy':'ON_FAILURE'} 'placement':{'constraints':[{'attribute':'@hostname' 'operator':'LIKE' 'value':hosts[0]}]}}}<line_sep>dcos_api_session.metronome_one_off(setup_job)<line_sep># Create an app which writes to the NFS volume. app={'id':'csi-nfs-write-app-{}'.format(test_uuid) 'instances':1 'cpus':0.5 'mem':256 'cmd':'echo some-stuff > test-volume-dir/output && sleep 999999' 'user':'root' 'container':{'type':'MESOS' 'volumes':[{'mode':'rw' 'containerPath':'test-volume-dir' 'external':{'provider':'csi' 'name':'test-volume-001' 'options':{'pluginName':'nfs.csi.k8s.io' 'capability':{'accessType':'mount' 'accessMode':'MULTI_NODE_MULTI_WRITER' 'fsType':'nfs'} 'volumeContext':{'server':hosts[0] 'share':'/var/lib/dcos-nfs-shares/test-volume-001'}}}}]} 'constraints':[['hostname' 'LIKE' hosts[1]]] 'healthChecks':[{'protocol':'COMMAND' 'command':{'value':'test `cat test-volume-dir/output` = some-stuff'} 'gracePeriodSeconds':5 'intervalSeconds':10 'timeoutSeconds':10 'maxConsecutiveFailures':3}]}<try_stmt><block_start><with_stmt>dcos_api_session.marathon.deploy_and_cleanup(app)# Trivial app if it deploys, there is nothing else to check <block_start><pass><block_end><block_end><except_stmt>Exception<as>error<block_start><raise>(error)<block_end><finally_stmt><block_start>cleanup_nfs()<block_end><block_end><def_stmt>test_if_marathon_pods_can_be_deployed_with_mesos_containerizer dcos_api_session:DcosApiSession<arrow><none><block_start>"""Marathon pods deployment integration test using the Mesos Containerizer This test verifies that a Marathon pods can be deployed. """<line_sep>test_uuid=uuid.uuid4().hex<line_sep># create pod with trivial apps that function as long running processes pod_definition={'id':'/integration-test-pods-{}'.format(test_uuid) 'scaling':{'kind':'fixed' 'instances':1} 'environment':{'PING':'PONG'} 'containers':[{'name':'ct1' 'resources':{'cpus':0.1 'mem':32} 'image':{'kind':'DOCKER' 'id':'debian:stretch-slim'} 'exec':{'command':{'shell':'touch foo; while true; do sleep 1; done'}} 'healthcheck':{'command':{'shell':'test -f foo'}}} {'name':'ct2' 'resources':{'cpus':0.1 'mem':32} 'exec':{'command':{'shell':'echo $PING > foo; while true; do sleep 1; done'}} 'healthcheck':{'command':{'shell':'test $PING = `cat foo`'}}}] 'networks':[{'mode':'host'}]}<with_stmt>dcos_api_session.marathon.deploy_pod_and_cleanup(pod_definition)# Trivial app if it deploys, there is nothing else to check <block_start><pass><block_end><block_end>
<import_stmt>torch.utils.data<as>data<import_stmt>pdb<import_from_stmt>PIL Image<import_stmt>os<import_stmt>os.path<import_stmt>numpy<as>np<line_sep>IMG_EXTENSIONS=['.jpg' '.JPG' '.jpeg' '.JPEG' '.png' '.PNG' '.ppm' '.PPM' '.bmp' '.BMP' ]<def_stmt>is_image_file filename<block_start><return>any(filename.endswith(extension)<for>extension IMG_EXTENSIONS)<block_end><def_stmt>dataloader filepath typ='train'<block_start>left_fold='image_2/'<line_sep>right_fold='image_3/'<line_sep>disp_L='disp_occ_0/'<line_sep>disp_R='disp_occ_1/'<line_sep>image=[img<for>img os.listdir(filepath+left_fold)<if>img.find('_10')<g>-1]<line_sep>image=sorted(image)<line_sep>imglist=[1 3 6 20 26 35 38 41 43 44 49 60 67 70 81 84 89 97 109 119 122 123 129 130 132 134 141 144 152 158 159 165 171 174 179 182 184 186 187 196]<if_stmt>typ<eq>'train'<block_start>train=[image[i]<for>i range(200)<if>i<not><in>imglist]<block_end><elif_stmt>typ<eq>'trainval'<block_start>train=[image[i]<for>i range(200)]<block_end>val=[image[i]<for>i imglist]<line_sep>left_train=[filepath+left_fold+img<for>img train]<line_sep>right_train=[filepath+right_fold+img<for>img train]<line_sep>disp_train_L=[filepath+disp_L+img<for>img train]<line_sep>#disp_train_R = [filepath+disp_R+img for img in train] left_val=[filepath+left_fold+img<for>img val]<line_sep>right_val=[filepath+right_fold+img<for>img val]<line_sep>disp_val_L=[filepath+disp_L+img<for>img val]<line_sep>#disp_val_R = [filepath+disp_R+img for img in val] <return>left_train right_train disp_train_L left_val right_val disp_val_L<block_end>
""" Keyterms -------- :mod:`textacy.extract.keyterms`: Extract keyterms from documents using a variety of rule-based algorithms. """<import_from_stmt>.scake scake<import_from_stmt>.sgrank sgrank<import_from_stmt>.textrank textrank<import_from_stmt>.yake yake<line_sep>
<import_from_stmt>.default *<line_sep>THUMBNAIL_ENGINE='sorl.thumbnail.engines.convert_engine.Engine'<line_sep>THUMBNAIL_CONVERT='convert'<line_sep>
<import_from_stmt>django.test TestCase<line_sep># Create your tests here. # TODO add test to validate application scopes are enforced when creating # TODO add test to validate that application redirect and refresh URL match the site's base url # TODO add unicode handling tests # TODO test large attachments # TODO basic tests for getting and deleting messages, attachments
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>logging<import_stmt>mimetypes<import_stmt>os<import_stmt>re<import_stmt>time<import_stmt>zipfile<import_stmt>html5lib<import_from_stmt>cheroot wsgi<import_from_stmt>django.core.cache cache<import_from_stmt>django.core.handlers.wsgi WSGIRequest<import_from_stmt>django.http HttpResponse<import_from_stmt>django.http HttpResponseNotAllowed<import_from_stmt>django.http HttpResponseNotFound<import_from_stmt>django.http HttpResponseNotModified<import_from_stmt>django.http.response FileResponse<import_from_stmt>django.http.response StreamingHttpResponse<import_from_stmt>django.utils.cache patch_response_headers<import_from_stmt>django.utils.encoding force_str<import_from_stmt>django.utils.http http_date<import_from_stmt>kolibri.core.content.errors InvalidStorageFilenameError<import_from_stmt>kolibri.core.content.utils.paths get_content_storage_file_path<import_from_stmt>kolibri.core.content.utils.paths get_zip_content_base_path<line_sep>logger=logging.getLogger(__name__)<def_stmt>add_security_headers request response<block_start>response["Access-Control-Allow-Origin"]="*"<line_sep>response["Access-Control-Allow-Methods"]="GET, OPTIONS"<line_sep>requested_headers=request.META.get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS" "")<if_stmt>requested_headers<block_start>response["Access-Control-Allow-Headers"]=requested_headers<block_end># restrict CSP to only allow resources to be loaded from self, to prevent info leakage # (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage response["Content-Security-Policy"]="default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:"<line_sep><return>response<block_end><def_stmt>django_response_to_wsgi response environ start_response<block_start>status="%d %s"%(response.status_code response.reason_phrase)<line_sep>response_headers=[(str(k) str(v))<for>k,v response.items()]<for_stmt>c response.cookies.values()<block_start>response_headers.append((str("Set-Cookie") str(c.output(header=""))))<block_end>start_response(force_str(status) response_headers)<if_stmt>getattr(response "file_to_stream" <none>)<is><not><none><and>environ.get("wsgi.file_wrapper")<block_start>response=environ["wsgi.file_wrapper"](response.file_to_stream)<block_end><return>response<block_end>allowed_methods=set(["GET" "OPTIONS"])<line_sep># This is also included in packages/hashi/src/h5p.html # ideally, we should never ever update this code # but if we do we should update it there. INITIALIZE_HASHI_FROM_IFRAME="if (window.parent && window.parent.hashi) {try {window.parent.hashi.initializeIframe(window);} catch (e) {}}"<def_stmt>parse_html content<block_start><try_stmt><block_start>document=html5lib.parse(content namespaceHTMLElements=<false>)<if_stmt><not>document# Could not parse <block_start><return>content<block_end># Because html5lib parses like a browser, it will # always create head and body tags if they are missing. head=document.find("head")<line_sep># Use the makeelement method of the head tag here to ensure that we use the same # Element class for both. Depending on the system and python version we are on, # we may be using the C implementation or the pure python and a mismatch will cause an error. script_tag=head.makeelement("script" {"type":"text/javascript"})<line_sep>script_tag.text=INITIALIZE_HASHI_FROM_IFRAME<line_sep>head.insert(0 script_tag)<line_sep># Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original # content for the doctype and, if found, prepend it to the content serialized by html5lib doctype=<none><try_stmt># Now parse the content as a dom tree instead, so that we capture # any doctype node as a dom node that we can read. <block_start>tree_builder_dom=html5lib.treebuilders.getTreeBuilder("dom")<line_sep>parser_dom=html5lib.HTMLParser(tree_builder_dom namespaceHTMLElements=<false>)<line_sep>tree=parser_dom.parse(content)<line_sep># By HTML Spec if doctype is included, it must be the first thing # in the document, so it has to be the first child node of the document doctype_node=tree.childNodes[0]<line_sep># Check that this node is in fact a doctype node <if_stmt>doctype_node.nodeType<eq>doctype_node.DOCUMENT_TYPE_NODE# render to a string by calling the toxml method # toxml uses single quotes by default, replace with "" <block_start>doctype=doctype_node.toxml().replace("'" '"')<block_end><block_end><except_stmt>Exception<as>e<block_start>logger.warn("Error in HTML5 parsing to determine doctype {}".format(e))<block_end>html=html5lib.serialize(document quote_attr_values="always" omit_optional_tags=<false> minimize_boolean_attributes=<false> use_trailing_solidus=<true> space_before_trailing_solidus=<false> )<if_stmt>doctype<block_start>html=doctype+html<block_end><return>html<block_end><except_stmt>html5lib.html5parser.ParseError<block_start><return>content<block_end><block_end><def_stmt>get_embedded_file zipped_path zipped_filename embedded_filepath<block_start><with_stmt>zipfile.ZipFile(zipped_path)<as>zf# if no path, or a directory, is being referenced, look for an index.html file <block_start><if_stmt><not>embedded_filepath<or>embedded_filepath.endswith("/")<block_start>embedded_filepath<augadd>"index.html"<block_end># get the details about the embedded file, and ensure it exists <try_stmt><block_start>info=zf.getinfo(embedded_filepath)<block_end><except_stmt>KeyError<block_start><return>HttpResponseNotFound('"{}" does not exist inside "{}"'.format(embedded_filepath zipped_filename))<block_end># file size file_size=0<line_sep># try to guess the MIME type of the embedded file being referenced content_type=(mimetypes.guess_type(embedded_filepath)[0]<or>"application/octet-stream")<if_stmt>embedded_filepath.endswith("htm")<or>embedded_filepath.endswith("html")<block_start>content=zf.open(info).read()<line_sep>html=parse_html(content)<line_sep>response=HttpResponse(html content_type=content_type)<line_sep>file_size=len(response.content)<block_end><else_stmt># generate a streaming response object, pulling data from within the zip file <block_start>response=FileResponse(zf.open(info) content_type=content_type)<line_sep>file_size=info.file_size<block_end># set the content-length header to the size of the embedded file <if_stmt>file_size<block_start>response["Content-Length"]=file_size<block_end><return>response<block_end><block_end>path_regex=re.compile("/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)")<line_sep>YEAR_IN_SECONDS=60<times>60<times>24<times>365<def_stmt>_zip_content_from_request request# noqa: C901 <block_start><if_stmt>request.method<not><in>allowed_methods<block_start><return>HttpResponseNotAllowed(allowed_methods)<block_end>match=path_regex.match(request.path_info)<if_stmt>match<is><none><block_start><return>HttpResponseNotFound("Path not found")<block_end><if_stmt>request.method<eq>"OPTIONS"<block_start><return>HttpResponse()<block_end>zipped_filename,embedded_filepath=match.groups()<try_stmt># calculate the local file path to the zip file <block_start>zipped_path=get_content_storage_file_path(zipped_filename)<block_end><except_stmt>InvalidStorageFilenameError<block_start><return>HttpResponseNotFound('"%(filename)s" is not a valid file name'%{"filename":zipped_filename})<block_end># if the zipfile does not exist on disk, return a 404 <if_stmt><not>os.path.exists(zipped_path)<block_start><return>HttpResponseNotFound('"%(filename)s" is not a valid zip file'%{"filename":zipped_filename})<block_end># Sometimes due to URL concatenation, we get URLs with double-slashes in them, like //path/to/file.html. # the zipped_filename and embedded_filepath are defined by the regex capturing groups in the URL defined # in urls.py in the same folder as this file: # r"^zipcontent/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)" # If the embedded_filepath contains a leading slash because of an input URL like: # /zipcontent/filename.zip//file.html # then the embedded_filepath will have a value of "/file.html" # we detect this leading slash in embedded_filepath and remove it. <if_stmt>embedded_filepath.startswith("/")<block_start>embedded_filepath=embedded_filepath[1:]<block_end># Any double-slashes later in the URL will be present as double-slashes, such as: # /zipcontent/filename.zip/path//file.html # giving an embedded_filepath value of "path//file.html" # Normalize the path by converting double-slashes occurring later in the path to a single slash. # This would change our example embedded_filepath to "path/file.html" which will resolve properly. embedded_filepath=embedded_filepath.replace("//" "/")<line_sep># if client has a cached version, use that (we can safely assume nothing has changed, due to MD5) <if_stmt>request.META.get("HTTP_IF_MODIFIED_SINCE")<block_start><return>HttpResponseNotModified()<block_end>CACHE_KEY="ZIPCONTENT_VIEW_RESPONSE_{}/{}".format(zipped_filename embedded_filepath)<line_sep>cached_response=cache.get(CACHE_KEY)<if_stmt>cached_response<is><not><none><block_start><return>cached_response<block_end>response=get_embedded_file(zipped_path zipped_filename embedded_filepath)<line_sep># ensure the browser knows not to try byte-range requests, as we don't support them here response["Accept-Ranges"]="none"<line_sep>response["Last-Modified"]=http_date(time.time())<line_sep>patch_response_headers(response cache_timeout=YEAR_IN_SECONDS)<if_stmt><not>isinstance(response StreamingHttpResponse)<block_start>cache.set(CACHE_KEY response YEAR_IN_SECONDS)<block_end><return>response<block_end><def_stmt>generate_zip_content_response environ<block_start>request=WSGIRequest(environ)<line_sep>response=_zip_content_from_request(request)<line_sep>add_security_headers(request response)<line_sep><return>response<block_end><def_stmt>zip_content_view environ start_response<block_start>""" Handles GET requests and serves a static file from within the zip file. """<line_sep>response=generate_zip_content_response(environ)<line_sep><return>django_response_to_wsgi(response environ start_response)<block_end><def_stmt>get_application <block_start>path_map={get_zip_content_base_path():zip_content_view }<line_sep><return>wsgi.PathInfoDispatcher(path_map)<block_end>
# -*- coding: utf-8 -*- <import_stmt>re<import_stmt>sys<import_stmt>mamonsu.lib.platform<as>platform<class_stmt>color(object)<block_start>mapping={'BOLD':'\033[0;0m\033[1;1m' 'RED':'\033[1;31m' 'GRAY':'\033[1;30m' 'PURPLE':'\033[1;35m' 'BLUE':'\033[1;34m' 'END':'\033[1;m'}<def_stmt>__init__ self<block_start>self.color=sys.stdout.isatty()<block_end><def_stmt>disable self<block_start>self.color=<false><block_end><def_stmt>__getattr__ self name<block_start><if_stmt>self.color<block_start><return>self.mapping[name]<block_end><else_stmt><block_start><return>''<block_end><block_end><block_end>TermColor=color()<line_sep># int (bytes) => str (human readable) <def_stmt>humansize_bytes nbytes<block_start>fmt='{0:>6} {1}'<if_stmt><not>isinstance(nbytes platform.INTEGER_TYPES)<block_start><return>'ERROR'<block_end><if_stmt>nbytes<eq>0<block_start><return>fmt.format(0 'B')<block_end>i,suffixes,=0 ['B' 'KB' 'MB' 'GB' 'TB' 'PB']<while_stmt>nbytes<ge>1024<and>i<l>len(suffixes)-1<block_start>nbytes<augdiv>1024.<line_sep>i<augadd>1<block_end>f=('%.2f'%nbytes).rstrip('0').rstrip('.')<line_sep><return>fmt.format(f suffixes[i])<block_end># str (some formates) => str (human readable) <def_stmt>humansize value<block_start>m=re.search('(\d+) (\S+)' value)<if_stmt>m<is><none><block_start><return>value<block_end>val,suff=m.group(1) m.group(2)<line_sep>val,suff=int(val) suff.upper()<if_stmt>suff<eq>'S'<block_start><return>value<block_end><if_stmt>suff<eq>'MS'<block_start><return>value<block_end><if_stmt>suff<eq>'B'<block_start><return>humansize_bytes(val)<block_end><if_stmt>suff<eq>'KB'<block_start><return>humansize_bytes(val<times>1024)<block_end><if_stmt>suff<eq>'4KB'<block_start><return>humansize_bytes(val<times>1024<times>4)<block_end><if_stmt>suff<eq>'8KB'<block_start><return>humansize_bytes(val<times>1024<times>8)<block_end><if_stmt>suff<eq>'16KB'<block_start><return>humansize_bytes(val<times>1024<times>16)<block_end><if_stmt>suff<eq>'MB'<block_start><return>humansize_bytes(val<times>1024<times>1024)<block_end><if_stmt>suff<eq>'GB'<block_start><return>humansize_bytes(val<times>1024<times>1024<times>1024)<block_end><if_stmt>suff<eq>'TB'<block_start><return>humansize_bytes(val<times>1024<times>1024<times>1024<times>1024)<block_end><return>value<block_end><def_stmt>header_h1 info<block_start><return>"\n{0}{1}{2}{3}\n".format(TermColor.BOLD TermColor.RED info.upper() TermColor.END)<block_end><def_stmt>key_val_h1 key val spaces=12<block_start>fmt=" {0}{1}{2:"+str(spaces)+"}{3}: {4}\n"<line_sep><return>fmt.format(TermColor.BOLD TermColor.PURPLE key TermColor.END val)<block_end><def_stmt>header_h2 info<block_start><return>" {0}{1}{2}{3}\n".format(TermColor.BOLD TermColor.PURPLE info TermColor.END)<block_end><def_stmt>key_val_h2 key val delim=': '<block_start><return>" {0}{1}{2:4}{3}{4}{5}\n".format(TermColor.BOLD TermColor.BLUE key TermColor.END delim val)<block_end><def_stmt>topline_h1 arr=<none> delim=" \t"<block_start><if_stmt>arr<is><none><block_start>arr=[]<block_end>result="{0}{1}".format(TermColor.BOLD TermColor.BLUE)<for_stmt>x arr<block_start>result="{0}{1}{2}".format(result delim x)<block_end><return>"{0}{1}\n".format(result TermColor.END)<block_end><def_stmt>format_raw_h1 raw=""<block_start>result=[]<for_stmt>i,line enumerate(raw.split("\n"))<block_start><if_stmt>i<eq>0<block_start>result.append(" {0}{1}{2}{3}".format(TermColor.BOLD TermColor.BLUE line TermColor.END))<block_end><else_stmt><block_start>result.append(" {0}".format(line))<block_end><block_end><return>"\n".join(result)+"\n"<block_end>
<import_from_stmt>.report_builder ReportBuilder<import_from_stmt>.latex_report_builder LatexReportBuilder<import_from_stmt>.markdown_report_builder MarkdownReportBuilder<import_from_stmt>.json_report_builder JsonReportBuilder<import_from_stmt>.html_report_builder JinjaHtmlReportBuilder<line_sep>
# # Author: <EMAIL> # Date: 01/25/2019 # """ Utils for training and optimization """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>utils<line_sep>logger=utils.get_logger()<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>bert.optimization BertAdam<def_stmt>zero_grad model optimizer_param<block_start>model.zero_grad()<for_stmt>n,p optimizer_param<block_start>p.grad=<none><block_end><block_end><def_stmt>dump_parameter_names model path<block_start><with_stmt>open(path 'w' encoding='utf8')<as>fs<block_start>fs.write('{}\n'.format('\n'.join([n<for>n,p model.named_parameters()])))<block_end><block_end><def_stmt>copy_optimizer_params_to_model named_params_model named_params_optimizer<block_start>""" Utility function for optimize_on_cpu and 16-bits training. Copy the parameters optimized on CPU/RAM back to the model on GPU """<for_stmt>(name_opti param_opti),(name_model param_model) zip(named_params_optimizer named_params_model)<block_start><if_stmt>name_opti<ne>name_model<block_start>logger.error("name_opti != name_model: {} {}".format(name_opti name_model))<line_sep><raise>ValueError<block_end>param_model.data.copy_(param_opti.data)<block_end><block_end><def_stmt>set_optimizer_params_grad named_params_optimizer named_params_model test_nan=<false><block_start>""" Utility function for optimize_on_cpu and 16-bits training. Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model """<line_sep>is_nan=<false><for_stmt>(name_opti param_opti),(name_model param_model) zip(named_params_optimizer named_params_model)<block_start><if_stmt>name_opti<ne>name_model<block_start>logger.error("name_opti != name_model: {} {}".format(name_opti name_model))<line_sep><raise>ValueError<block_end><if_stmt>param_model.grad<is><not><none><block_start>norm=param_model.grad.norm()<if_stmt>test_nan<and>(torch.isnan(norm)<or>torch.isinf(norm))<block_start>is_nan=<true><block_end><if_stmt>param_opti.grad<is><none><block_start>param_opti.grad=torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))<block_end>param_opti.grad.data.copy_(param_model.grad.data)<block_end><else_stmt><block_start>param_opti.grad=<none><block_end><block_end><return>is_nan<block_end><def_stmt>create_optimizer model args num_train_steps=<none> init_spec=<none> no_decay=['bias' 'LayerNorm.weight']# Prepare optimizer <block_start><if_stmt>args.fp16<block_start>dcnt=torch.cuda.device_count()<if_stmt>args.no_even_grad<block_start>param_optimizer=[(n param.detach().clone().type(torch.cuda.FloatTensor).requires_grad_())<for>i,(n param) enumerate(model.named_parameters())]<block_end><else_stmt><block_start>total_size=sum(np.prod(p.size())<for>p model.parameters())<line_sep>quota={i:0<for>i range(dcnt)}<line_sep>quota[0]=total_size<floordiv>(dcnt<times>2)<line_sep>param_optimizer=[]<for_stmt>i,(n param) enumerate(model.named_parameters())<block_start>ps=np.prod(param.size())<line_sep>index=list(sorted(quota.items() key=<lambda>x:x[1]))[0][0]<line_sep>quota[index]<augadd>ps<line_sep>cp=param.clone().type(torch.cuda.FloatTensor).detach().to('cuda:{}'.format(index)).requires_grad_()<line_sep>param_optimizer<augadd>[(n cp)]<block_end><block_end><block_end><elif_stmt>args.optimize_on_cpu<block_start>param_optimizer=[(n param.clone().detach().to('cpu').requires_grad_())<for>n,param model.named_parameters()]<block_end><else_stmt><block_start>param_optimizer=[(n p)<for>n,p model.named_parameters()]<block_end>group0=dict(params=[] weight_decay_rate=args.weight_decay names=[])<line_sep>group1=dict(params=[] weight_decay_rate=0.00 names=[])<for_stmt>(n p) param_optimizer<block_start><if_stmt><not>any(nd<in>n<for>nd no_decay)<block_start>group0['params'].append(p)<line_sep>group0['names'].append(n)<block_end><else_stmt><block_start>group1['params'].append(p)<line_sep>group1['names'].append(n)<block_end><block_end>optimizer_grouped_parameters=[group0 group1]<line_sep>t_total=num_train_steps<line_sep>optimizer=<none><if_stmt>t_total<block_start><if_stmt>args.local_rank<ne>-1<block_start>t_total=t_total<floordiv>torch.distributed.get_world_size()<block_end>optimizer=BertAdam(optimizer_grouped_parameters lr=args.learning_rate b1=args.adam_beta1 b2=args.adam_beta2 v1=args.qhadam_v1 v2=args.qhadam_v2 lr_ends=args.lr_schedule_ends e=args.epsilon warmup=args.warmup_proportion<if>args.warmup_proportion<l>1<else>args.warmup_proportion/t_total t_total=t_total schedule=args.lr_schedule max_grad_norm=args.max_grad_norm global_grad_norm=args.global_grad_norm init_spec=init_spec weight_decay_rate=args.weight_decay)<block_end><return>optimizer param_optimizer t_total<block_end>
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild <import_from_stmt>pkg_resources parse_version<import_stmt>kaitaistruct<import_from_stmt>kaitaistruct KaitaiStruct KaitaiStream BytesIO<import_stmt>collections<if_stmt>parse_version(kaitaistruct.__version__)<l>parse_version('0.9')<block_start><raise>Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s"%(kaitaistruct.__version__))<block_end><class_stmt>MifareClassic(KaitaiStruct)<block_start>"""You can get a dump for testing by the link: https://github.com/zhovner/mfdread/raw/master/dump.mfd .. seealso:: Source - https://github.com/nfc-tools/libnfc https://www.nxp.com/docs/en/data-sheet/MF1S70YYX_V1.pdf """<line_sep>SEQ_FIELDS=["sectors"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['sectors']['start']=self._io.pos()<line_sep>self._raw_sectors=[]<line_sep>self.sectors=[]<line_sep>i=0<while_stmt><not>self._io.is_eof()<block_start><if_stmt><not>'arr'<in>self._debug['sectors']<block_start>self._debug['sectors']['arr']=[]<block_end>self._debug['sectors']['arr'].append({'start':self._io.pos()})<line_sep>self._raw_sectors.append(self._io.read_bytes((((4<if>i<ge>32<else>1)<times>4)<times>16)))<line_sep>_io__raw_sectors=KaitaiStream(BytesIO(self._raw_sectors[-1]))<line_sep>_t_sectors=MifareClassic.Sector(i<eq>0 _io__raw_sectors self self._root)<line_sep>_t_sectors._read()<line_sep>self.sectors.append(_t_sectors)<line_sep>self._debug['sectors']['arr'][len(self.sectors)-1]['end']=self._io.pos()<line_sep>i<augadd>1<block_end>self._debug['sectors']['end']=self._io.pos()<block_end><class_stmt>Key(KaitaiStruct)<block_start>SEQ_FIELDS=["key"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['key']['start']=self._io.pos()<line_sep>self.key=self._io.read_bytes(6)<line_sep>self._debug['key']['end']=self._io.pos()<block_end><block_end><class_stmt>Sector(KaitaiStruct)<block_start>SEQ_FIELDS=["manufacturer" "data_filler" "trailer"]<def_stmt>__init__ self has_manufacturer _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.has_manufacturer=has_manufacturer<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><if_stmt>self.has_manufacturer<block_start>self._debug['manufacturer']['start']=self._io.pos()<line_sep>self.manufacturer=MifareClassic.Manufacturer(self._io self self._root)<line_sep>self.manufacturer._read()<line_sep>self._debug['manufacturer']['end']=self._io.pos()<block_end>self._debug['data_filler']['start']=self._io.pos()<line_sep>self._raw_data_filler=self._io.read_bytes(((self._io.size()-self._io.pos())-16))<line_sep>_io__raw_data_filler=KaitaiStream(BytesIO(self._raw_data_filler))<line_sep>self.data_filler=MifareClassic.Sector.Filler(_io__raw_data_filler self self._root)<line_sep>self.data_filler._read()<line_sep>self._debug['data_filler']['end']=self._io.pos()<line_sep>self._debug['trailer']['start']=self._io.pos()<line_sep>self.trailer=MifareClassic.Trailer(self._io self self._root)<line_sep>self.trailer._read()<line_sep>self._debug['trailer']['end']=self._io.pos()<block_end><class_stmt>Values(KaitaiStruct)<block_start>SEQ_FIELDS=["values"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['values']['start']=self._io.pos()<line_sep>self.values=[]<line_sep>i=0<while_stmt><not>self._io.is_eof()<block_start><if_stmt><not>'arr'<in>self._debug['values']<block_start>self._debug['values']['arr']=[]<block_end>self._debug['values']['arr'].append({'start':self._io.pos()})<line_sep>_t_values=MifareClassic.Sector.Values.ValueBlock(self._io self self._root)<line_sep>_t_values._read()<line_sep>self.values.append(_t_values)<line_sep>self._debug['values']['arr'][len(self.values)-1]['end']=self._io.pos()<line_sep>i<augadd>1<block_end>self._debug['values']['end']=self._io.pos()<block_end><class_stmt>ValueBlock(KaitaiStruct)<block_start>SEQ_FIELDS=["valuez" "addrz"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['valuez']['start']=self._io.pos()<line_sep>self.valuez=[<none>]<times>(3)<for_stmt>i range(3)<block_start><if_stmt><not>'arr'<in>self._debug['valuez']<block_start>self._debug['valuez']['arr']=[]<block_end>self._debug['valuez']['arr'].append({'start':self._io.pos()})<line_sep>self.valuez[i]=self._io.read_u4le()<line_sep>self._debug['valuez']['arr'][i]['end']=self._io.pos()<block_end>self._debug['valuez']['end']=self._io.pos()<line_sep>self._debug['addrz']['start']=self._io.pos()<line_sep>self.addrz=[<none>]<times>(4)<for_stmt>i range(4)<block_start><if_stmt><not>'arr'<in>self._debug['addrz']<block_start>self._debug['addrz']['arr']=[]<block_end>self._debug['addrz']['arr'].append({'start':self._io.pos()})<line_sep>self.addrz[i]=self._io.read_u1()<line_sep>self._debug['addrz']['arr'][i]['end']=self._io.pos()<block_end>self._debug['addrz']['end']=self._io.pos()<block_end>@property<def_stmt>addr self<block_start><if_stmt>hasattr(self '_m_addr')<block_start><return>self._m_addr<if>hasattr(self '_m_addr')<else><none><block_end><if_stmt>self.valid<block_start>self._m_addr=self.addrz[0]<block_end><return>self._m_addr<if>hasattr(self '_m_addr')<else><none><block_end>@property<def_stmt>addr_valid self<block_start><if_stmt>hasattr(self '_m_addr_valid')<block_start><return>self._m_addr_valid<if>hasattr(self '_m_addr_valid')<else><none><block_end>self._m_addr_valid=((self.addrz[0]<eq>~(self.addrz[1]))<and>(self.addrz[0]<eq>self.addrz[2])<and>(self.addrz[1]<eq>self.addrz[3]))<line_sep><return>self._m_addr_valid<if>hasattr(self '_m_addr_valid')<else><none><block_end>@property<def_stmt>valid self<block_start><if_stmt>hasattr(self '_m_valid')<block_start><return>self._m_valid<if>hasattr(self '_m_valid')<else><none><block_end>self._m_valid=((self.value_valid)<and>(self.addr_valid))<line_sep><return>self._m_valid<if>hasattr(self '_m_valid')<else><none><block_end>@property<def_stmt>value_valid self<block_start><if_stmt>hasattr(self '_m_value_valid')<block_start><return>self._m_value_valid<if>hasattr(self '_m_value_valid')<else><none><block_end>self._m_value_valid=((self.valuez[0]<eq>~(self.valuez[1]))<and>(self.valuez[0]<eq>self.valuez[2]))<line_sep><return>self._m_value_valid<if>hasattr(self '_m_value_valid')<else><none><block_end>@property<def_stmt>value self<block_start><if_stmt>hasattr(self '_m_value')<block_start><return>self._m_value<if>hasattr(self '_m_value')<else><none><block_end><if_stmt>self.valid<block_start>self._m_value=self.valuez[0]<block_end><return>self._m_value<if>hasattr(self '_m_value')<else><none><block_end><block_end><block_end><class_stmt>Filler(KaitaiStruct)<block_start>"""only to create _io."""<line_sep>SEQ_FIELDS=["data"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['data']['start']=self._io.pos()<line_sep>self.data=self._io.read_bytes(self._io.size())<line_sep>self._debug['data']['end']=self._io.pos()<block_end><block_end>@property<def_stmt>block_size self<block_start><if_stmt>hasattr(self '_m_block_size')<block_start><return>self._m_block_size<if>hasattr(self '_m_block_size')<else><none><block_end>self._m_block_size=16<line_sep><return>self._m_block_size<if>hasattr(self '_m_block_size')<else><none><block_end>@property<def_stmt>data self<block_start><if_stmt>hasattr(self '_m_data')<block_start><return>self._m_data<if>hasattr(self '_m_data')<else><none><block_end>self._m_data=self.data_filler.data<line_sep><return>self._m_data<if>hasattr(self '_m_data')<else><none><block_end>@property<def_stmt>blocks self<block_start><if_stmt>hasattr(self '_m_blocks')<block_start><return>self._m_blocks<if>hasattr(self '_m_blocks')<else><none><block_end>io=self.data_filler._io<line_sep>_pos=io.pos()<line_sep>io.seek(0)<line_sep>self._debug['_m_blocks']['start']=io.pos()<line_sep>self._m_blocks=[]<line_sep>i=0<while_stmt><not>io.is_eof()<block_start><if_stmt><not>'arr'<in>self._debug['_m_blocks']<block_start>self._debug['_m_blocks']['arr']=[]<block_end>self._debug['_m_blocks']['arr'].append({'start':io.pos()})<line_sep>self._m_blocks.append(io.read_bytes(self.block_size))<line_sep>self._debug['_m_blocks']['arr'][len(self._m_blocks)-1]['end']=io.pos()<line_sep>i<augadd>1<block_end>self._debug['_m_blocks']['end']=io.pos()<line_sep>io.seek(_pos)<line_sep><return>self._m_blocks<if>hasattr(self '_m_blocks')<else><none><block_end>@property<def_stmt>values self<block_start><if_stmt>hasattr(self '_m_values')<block_start><return>self._m_values<if>hasattr(self '_m_values')<else><none><block_end>io=self.data_filler._io<line_sep>_pos=io.pos()<line_sep>io.seek(0)<line_sep>self._debug['_m_values']['start']=io.pos()<line_sep>self._m_values=MifareClassic.Sector.Values(io self self._root)<line_sep>self._m_values._read()<line_sep>self._debug['_m_values']['end']=io.pos()<line_sep>io.seek(_pos)<line_sep><return>self._m_values<if>hasattr(self '_m_values')<else><none><block_end><block_end><class_stmt>Manufacturer(KaitaiStruct)<block_start>SEQ_FIELDS=["nuid" "bcc" "sak" "atqa" "manufacturer"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['nuid']['start']=self._io.pos()<line_sep>self.nuid=self._io.read_u4le()<line_sep>self._debug['nuid']['end']=self._io.pos()<line_sep>self._debug['bcc']['start']=self._io.pos()<line_sep>self.bcc=self._io.read_u1()<line_sep>self._debug['bcc']['end']=self._io.pos()<line_sep>self._debug['sak']['start']=self._io.pos()<line_sep>self.sak=self._io.read_u1()<line_sep>self._debug['sak']['end']=self._io.pos()<line_sep>self._debug['atqa']['start']=self._io.pos()<line_sep>self.atqa=self._io.read_u2le()<line_sep>self._debug['atqa']['end']=self._io.pos()<line_sep>self._debug['manufacturer']['start']=self._io.pos()<line_sep>self.manufacturer=self._io.read_bytes(8)<line_sep>self._debug['manufacturer']['end']=self._io.pos()<block_end><block_end><class_stmt>Trailer(KaitaiStruct)<block_start>SEQ_FIELDS=["key_a" "access_bits" "user_byte" "key_b"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['key_a']['start']=self._io.pos()<line_sep>self.key_a=MifareClassic.Key(self._io self self._root)<line_sep>self.key_a._read()<line_sep>self._debug['key_a']['end']=self._io.pos()<line_sep>self._debug['access_bits']['start']=self._io.pos()<line_sep>self._raw_access_bits=self._io.read_bytes(3)<line_sep>_io__raw_access_bits=KaitaiStream(BytesIO(self._raw_access_bits))<line_sep>self.access_bits=MifareClassic.Trailer.AccessConditions(_io__raw_access_bits self self._root)<line_sep>self.access_bits._read()<line_sep>self._debug['access_bits']['end']=self._io.pos()<line_sep>self._debug['user_byte']['start']=self._io.pos()<line_sep>self.user_byte=self._io.read_u1()<line_sep>self._debug['user_byte']['end']=self._io.pos()<line_sep>self._debug['key_b']['start']=self._io.pos()<line_sep>self.key_b=MifareClassic.Key(self._io self self._root)<line_sep>self.key_b._read()<line_sep>self._debug['key_b']['end']=self._io.pos()<block_end><class_stmt>AccessConditions(KaitaiStruct)<block_start>SEQ_FIELDS=["raw_chunks"]<def_stmt>__init__ self _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start>self._debug['raw_chunks']['start']=self._io.pos()<line_sep>self.raw_chunks=[<none>]<times>(self._parent.ac_count_of_chunks)<for_stmt>i range(self._parent.ac_count_of_chunks)<block_start><if_stmt><not>'arr'<in>self._debug['raw_chunks']<block_start>self._debug['raw_chunks']['arr']=[]<block_end>self._debug['raw_chunks']['arr'].append({'start':self._io.pos()})<line_sep>self.raw_chunks[i]=self._io.read_bits_int_be(4)<line_sep>self._debug['raw_chunks']['arr'][i]['end']=self._io.pos()<block_end>self._debug['raw_chunks']['end']=self._io.pos()<block_end><class_stmt>TrailerAc(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self ac _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.ac=ac<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end>@property<def_stmt>can_read_key_b self<block_start>"""key A is required."""<if_stmt>hasattr(self '_m_can_read_key_b')<block_start><return>self._m_can_read_key_b<if>hasattr(self '_m_can_read_key_b')<else><none><block_end>self._m_can_read_key_b=self.ac.inv_shift_val<le>2<line_sep><return>self._m_can_read_key_b<if>hasattr(self '_m_can_read_key_b')<else><none><block_end>@property<def_stmt>can_write_keys self<block_start><if_stmt>hasattr(self '_m_can_write_keys')<block_start><return>self._m_can_write_keys<if>hasattr(self '_m_can_write_keys')<else><none><block_end>self._m_can_write_keys=((((self.ac.inv_shift_val+1)%3)<ne>0)<and>(self.ac.inv_shift_val<l>6))<line_sep><return>self._m_can_write_keys<if>hasattr(self '_m_can_write_keys')<else><none><block_end>@property<def_stmt>can_write_access_bits self<block_start><if_stmt>hasattr(self '_m_can_write_access_bits')<block_start><return>self._m_can_write_access_bits<if>hasattr(self '_m_can_write_access_bits')<else><none><block_end>self._m_can_write_access_bits=self.ac.bits[2].b<line_sep><return>self._m_can_write_access_bits<if>hasattr(self '_m_can_write_access_bits')<else><none><block_end>@property<def_stmt>key_b_controls_write self<block_start><if_stmt>hasattr(self '_m_key_b_controls_write')<block_start><return>self._m_key_b_controls_write<if>hasattr(self '_m_key_b_controls_write')<else><none><block_end>self._m_key_b_controls_write=<not>(self.can_read_key_b)<line_sep><return>self._m_key_b_controls_write<if>hasattr(self '_m_key_b_controls_write')<else><none><block_end><block_end><class_stmt>ChunkBitRemap(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self bit_no _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.bit_no=bit_no<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end>@property<def_stmt>shift_value self<block_start><if_stmt>hasattr(self '_m_shift_value')<block_start><return>self._m_shift_value<if>hasattr(self '_m_shift_value')<else><none><block_end>self._m_shift_value=(-1<if>self.bit_no<eq>1<else>1)<line_sep><return>self._m_shift_value<if>hasattr(self '_m_shift_value')<else><none><block_end>@property<def_stmt>chunk_no self<block_start><if_stmt>hasattr(self '_m_chunk_no')<block_start><return>self._m_chunk_no<if>hasattr(self '_m_chunk_no')<else><none><block_end>self._m_chunk_no=(((self.inv_chunk_no+self.shift_value)+self._parent._parent.ac_count_of_chunks)%self._parent._parent.ac_count_of_chunks)<line_sep><return>self._m_chunk_no<if>hasattr(self '_m_chunk_no')<else><none><block_end>@property<def_stmt>inv_chunk_no self<block_start><if_stmt>hasattr(self '_m_inv_chunk_no')<block_start><return>self._m_inv_chunk_no<if>hasattr(self '_m_inv_chunk_no')<else><none><block_end>self._m_inv_chunk_no=(self.bit_no+self.shift_value)<line_sep><return>self._m_inv_chunk_no<if>hasattr(self '_m_inv_chunk_no')<else><none><block_end><block_end><class_stmt>DataAc(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self ac _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.ac=ac<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end>@property<def_stmt>read_key_a_required self<block_start><if_stmt>hasattr(self '_m_read_key_a_required')<block_start><return>self._m_read_key_a_required<if>hasattr(self '_m_read_key_a_required')<else><none><block_end>self._m_read_key_a_required=self.ac.val<le>4<line_sep><return>self._m_read_key_a_required<if>hasattr(self '_m_read_key_a_required')<else><none><block_end>@property<def_stmt>write_key_b_required self<block_start><if_stmt>hasattr(self '_m_write_key_b_required')<block_start><return>self._m_write_key_b_required<if>hasattr(self '_m_write_key_b_required')<else><none><block_end>self._m_write_key_b_required=((((<not>(self.read_key_a_required))<or>(self.read_key_b_required)))<and>(<not>(self.ac.bits[0].b)))<line_sep><return>self._m_write_key_b_required<if>hasattr(self '_m_write_key_b_required')<else><none><block_end>@property<def_stmt>write_key_a_required self<block_start><if_stmt>hasattr(self '_m_write_key_a_required')<block_start><return>self._m_write_key_a_required<if>hasattr(self '_m_write_key_a_required')<else><none><block_end>self._m_write_key_a_required=self.ac.val<eq>0<line_sep><return>self._m_write_key_a_required<if>hasattr(self '_m_write_key_a_required')<else><none><block_end>@property<def_stmt>read_key_b_required self<block_start><if_stmt>hasattr(self '_m_read_key_b_required')<block_start><return>self._m_read_key_b_required<if>hasattr(self '_m_read_key_b_required')<else><none><block_end>self._m_read_key_b_required=self.ac.val<le>6<line_sep><return>self._m_read_key_b_required<if>hasattr(self '_m_read_key_b_required')<else><none><block_end>@property<def_stmt>decrement_available self<block_start><if_stmt>hasattr(self '_m_decrement_available')<block_start><return>self._m_decrement_available<if>hasattr(self '_m_decrement_available')<else><none><block_end>self._m_decrement_available=((((self.ac.bits[1].b)<or>(<not>(self.ac.bits[0].b))))<and>(<not>(self.ac.bits[2].b)))<line_sep><return>self._m_decrement_available<if>hasattr(self '_m_decrement_available')<else><none><block_end>@property<def_stmt>increment_available self<block_start><if_stmt>hasattr(self '_m_increment_available')<block_start><return>self._m_increment_available<if>hasattr(self '_m_increment_available')<else><none><block_end>self._m_increment_available=((((<not>(self.ac.bits[0].b))<and>(<not>(self.read_key_a_required))<and>(<not>(self.read_key_b_required))))<or>(((<not>(self.ac.bits[0].b))<and>(self.read_key_a_required)<and>(self.read_key_b_required))))<line_sep><return>self._m_increment_available<if>hasattr(self '_m_increment_available')<else><none><block_end><block_end><class_stmt>Ac(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self index _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.index=index<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end><class_stmt>AcBit(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self i chunk _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.i=i<line_sep>self.chunk=chunk<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end>@property<def_stmt>n self<block_start><if_stmt>hasattr(self '_m_n')<block_start><return>self._m_n<if>hasattr(self '_m_n')<else><none><block_end>self._m_n=((self.chunk<rshift>self.i)&1)<line_sep><return>self._m_n<if>hasattr(self '_m_n')<else><none><block_end>@property<def_stmt>b self<block_start><if_stmt>hasattr(self '_m_b')<block_start><return>self._m_b<if>hasattr(self '_m_b')<else><none><block_end>self._m_b=self.n<eq>1<line_sep><return>self._m_b<if>hasattr(self '_m_b')<else><none><block_end><block_end>@property<def_stmt>bits self<block_start><if_stmt>hasattr(self '_m_bits')<block_start><return>self._m_bits<if>hasattr(self '_m_bits')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_bits']['start']=self._io.pos()<line_sep>self._m_bits=[<none>]<times>(self._parent._parent.ac_bits)<for_stmt>i range(self._parent._parent.ac_bits)<block_start><if_stmt><not>'arr'<in>self._debug['_m_bits']<block_start>self._debug['_m_bits']['arr']=[]<block_end>self._debug['_m_bits']['arr'].append({'start':self._io.pos()})<line_sep>_t__m_bits=MifareClassic.Trailer.AccessConditions.Ac.AcBit(self.index self._parent.chunks[i].chunk self._io self self._root)<line_sep>_t__m_bits._read()<line_sep>self._m_bits[i]=_t__m_bits<line_sep>self._debug['_m_bits']['arr'][i]['end']=self._io.pos()<block_end>self._debug['_m_bits']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_bits<if>hasattr(self '_m_bits')<else><none><block_end>@property<def_stmt>val self<block_start>"""c3 c2 c1."""<if_stmt>hasattr(self '_m_val')<block_start><return>self._m_val<if>hasattr(self '_m_val')<else><none><block_end>self._m_val=(((self.bits[2].n<lshift>2)|(self.bits[1].n<lshift>1))|self.bits[0].n)<line_sep><return>self._m_val<if>hasattr(self '_m_val')<else><none><block_end>@property<def_stmt>inv_shift_val self<block_start><if_stmt>hasattr(self '_m_inv_shift_val')<block_start><return>self._m_inv_shift_val<if>hasattr(self '_m_inv_shift_val')<else><none><block_end>self._m_inv_shift_val=(((self.bits[0].n<lshift>2)|(self.bits[1].n<lshift>1))|self.bits[2].n)<line_sep><return>self._m_inv_shift_val<if>hasattr(self '_m_inv_shift_val')<else><none><block_end><block_end><class_stmt>ValidChunk(KaitaiStruct)<block_start>SEQ_FIELDS=[]<def_stmt>__init__ self inv_chunk chunk _io _parent=<none> _root=<none><block_start>self._io=_io<line_sep>self._parent=_parent<line_sep>self._root=_root<if>_root<else>self<line_sep>self.inv_chunk=inv_chunk<line_sep>self.chunk=chunk<line_sep>self._debug=collections.defaultdict(dict)<block_end><def_stmt>_read self<block_start><pass><block_end>@property<def_stmt>valid self<block_start><if_stmt>hasattr(self '_m_valid')<block_start><return>self._m_valid<if>hasattr(self '_m_valid')<else><none><block_end>self._m_valid=(self.inv_chunk^self.chunk)<eq>15<line_sep><return>self._m_valid<if>hasattr(self '_m_valid')<else><none><block_end><block_end>@property<def_stmt>data_acs self<block_start><if_stmt>hasattr(self '_m_data_acs')<block_start><return>self._m_data_acs<if>hasattr(self '_m_data_acs')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_data_acs']['start']=self._io.pos()<line_sep>self._m_data_acs=[<none>]<times>((self._parent.acs_in_sector-1))<for_stmt>i range((self._parent.acs_in_sector-1))<block_start><if_stmt><not>'arr'<in>self._debug['_m_data_acs']<block_start>self._debug['_m_data_acs']['arr']=[]<block_end>self._debug['_m_data_acs']['arr'].append({'start':self._io.pos()})<line_sep>_t__m_data_acs=MifareClassic.Trailer.AccessConditions.DataAc(self.acs_raw[i] self._io self self._root)<line_sep>_t__m_data_acs._read()<line_sep>self._m_data_acs[i]=_t__m_data_acs<line_sep>self._debug['_m_data_acs']['arr'][i]['end']=self._io.pos()<block_end>self._debug['_m_data_acs']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_data_acs<if>hasattr(self '_m_data_acs')<else><none><block_end>@property<def_stmt>remaps self<block_start><if_stmt>hasattr(self '_m_remaps')<block_start><return>self._m_remaps<if>hasattr(self '_m_remaps')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_remaps']['start']=self._io.pos()<line_sep>self._m_remaps=[<none>]<times>(self._parent.ac_bits)<for_stmt>i range(self._parent.ac_bits)<block_start><if_stmt><not>'arr'<in>self._debug['_m_remaps']<block_start>self._debug['_m_remaps']['arr']=[]<block_end>self._debug['_m_remaps']['arr'].append({'start':self._io.pos()})<line_sep>_t__m_remaps=MifareClassic.Trailer.AccessConditions.ChunkBitRemap(i self._io self self._root)<line_sep>_t__m_remaps._read()<line_sep>self._m_remaps[i]=_t__m_remaps<line_sep>self._debug['_m_remaps']['arr'][i]['end']=self._io.pos()<block_end>self._debug['_m_remaps']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_remaps<if>hasattr(self '_m_remaps')<else><none><block_end>@property<def_stmt>acs_raw self<block_start><if_stmt>hasattr(self '_m_acs_raw')<block_start><return>self._m_acs_raw<if>hasattr(self '_m_acs_raw')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_acs_raw']['start']=self._io.pos()<line_sep>self._m_acs_raw=[<none>]<times>(self._parent.acs_in_sector)<for_stmt>i range(self._parent.acs_in_sector)<block_start><if_stmt><not>'arr'<in>self._debug['_m_acs_raw']<block_start>self._debug['_m_acs_raw']['arr']=[]<block_end>self._debug['_m_acs_raw']['arr'].append({'start':self._io.pos()})<line_sep>_t__m_acs_raw=MifareClassic.Trailer.AccessConditions.Ac(i self._io self self._root)<line_sep>_t__m_acs_raw._read()<line_sep>self._m_acs_raw[i]=_t__m_acs_raw<line_sep>self._debug['_m_acs_raw']['arr'][i]['end']=self._io.pos()<block_end>self._debug['_m_acs_raw']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_acs_raw<if>hasattr(self '_m_acs_raw')<else><none><block_end>@property<def_stmt>trailer_ac self<block_start><if_stmt>hasattr(self '_m_trailer_ac')<block_start><return>self._m_trailer_ac<if>hasattr(self '_m_trailer_ac')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_trailer_ac']['start']=self._io.pos()<line_sep>self._m_trailer_ac=MifareClassic.Trailer.AccessConditions.TrailerAc(self.acs_raw[(self._parent.acs_in_sector-1)] self._io self self._root)<line_sep>self._m_trailer_ac._read()<line_sep>self._debug['_m_trailer_ac']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_trailer_ac<if>hasattr(self '_m_trailer_ac')<else><none><block_end>@property<def_stmt>chunks self<block_start><if_stmt>hasattr(self '_m_chunks')<block_start><return>self._m_chunks<if>hasattr(self '_m_chunks')<else><none><block_end>_pos=self._io.pos()<line_sep>self._io.seek(0)<line_sep>self._debug['_m_chunks']['start']=self._io.pos()<line_sep>self._m_chunks=[<none>]<times>(self._parent.ac_bits)<for_stmt>i range(self._parent.ac_bits)<block_start><if_stmt><not>'arr'<in>self._debug['_m_chunks']<block_start>self._debug['_m_chunks']['arr']=[]<block_end>self._debug['_m_chunks']['arr'].append({'start':self._io.pos()})<line_sep>_t__m_chunks=MifareClassic.Trailer.AccessConditions.ValidChunk(self.raw_chunks[self.remaps[i].inv_chunk_no] self.raw_chunks[self.remaps[i].chunk_no] self._io self self._root)<line_sep>_t__m_chunks._read()<line_sep>self._m_chunks[i]=_t__m_chunks<line_sep>self._debug['_m_chunks']['arr'][i]['end']=self._io.pos()<block_end>self._debug['_m_chunks']['end']=self._io.pos()<line_sep>self._io.seek(_pos)<line_sep><return>self._m_chunks<if>hasattr(self '_m_chunks')<else><none><block_end><block_end>@property<def_stmt>ac_bits self<block_start><if_stmt>hasattr(self '_m_ac_bits')<block_start><return>self._m_ac_bits<if>hasattr(self '_m_ac_bits')<else><none><block_end>self._m_ac_bits=3<line_sep><return>self._m_ac_bits<if>hasattr(self '_m_ac_bits')<else><none><block_end>@property<def_stmt>acs_in_sector self<block_start><if_stmt>hasattr(self '_m_acs_in_sector')<block_start><return>self._m_acs_in_sector<if>hasattr(self '_m_acs_in_sector')<else><none><block_end>self._m_acs_in_sector=4<line_sep><return>self._m_acs_in_sector<if>hasattr(self '_m_acs_in_sector')<else><none><block_end>@property<def_stmt>ac_count_of_chunks self<block_start><if_stmt>hasattr(self '_m_ac_count_of_chunks')<block_start><return>self._m_ac_count_of_chunks<if>hasattr(self '_m_ac_count_of_chunks')<else><none><block_end>self._m_ac_count_of_chunks=(self.ac_bits<times>2)<line_sep><return>self._m_ac_count_of_chunks<if>hasattr(self '_m_ac_count_of_chunks')<else><none><block_end><block_end><block_end>
<import_stmt>os<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>..views.viewhelper delay_refresh_detail<import_from_stmt>..helper.config config<def_stmt>edit filepath:Path loop<block_start><if_stmt>isinstance(filepath str)<block_start>filepath=Path(filepath)<block_end>editor=os.environ.get('EDITOR' 'vi').lower()<line_sep># vim <if_stmt>editor<eq>'vi'<or>editor<eq>'vim'<block_start>cmd=editor+' '+str(filepath)<line_sep>current_directory=Path.cwd()<line_sep>os.chdir(filepath.parent)<if_stmt>config.tmux_support<and>is_inside_tmux()<block_start>open_in_new_tmux_window(cmd)<block_end><else_stmt><block_start>subprocess.call(cmd shell=<true>)<line_sep>delay_refresh_detail(loop)<block_end>os.chdir(current_directory)<block_end># sublime text <elif_stmt>editor<eq>'sublime'<block_start>cmd='subl '+str(filepath)<line_sep>subprocess.call(cmd shell=<true>)<block_end><block_end><def_stmt>is_inside_tmux <block_start><return>'TMUX'<in>os.environ<block_end><def_stmt>open_in_new_tmux_window edit_cmd# close other panes if exist, so that the detail pane is the only pane <block_start><try_stmt><block_start>output=subprocess.check_output("tmux list-panes | wc -l" shell=<true>)<line_sep>num_pane=int(output)<if_stmt>num_pane<g>1<block_start>subprocess.check_call("tmux kill-pane -a" shell=<true>)<block_end><block_end><except_stmt>Exception<block_start><pass><block_end>cmd="tmux split-window -h"<line_sep>os.system(cmd)<line_sep>cmd="tmux send-keys -t right '%s' C-m"%edit_cmd<line_sep>os.system(cmd)<block_end>
<import_from_stmt>setuptools setup find_packages<line_sep>requires=['PyYAML==5.1']<line_sep>setup(name='swagger_to_uml' version='0.1' description='swagger_to_uml' classifiers=["Programming Language :: Python"] author='<NAME>' author_email='<EMAIL>' license='MIT' url='http://nlohmann.me' keywords='swagger uml plantuml' packages=find_packages() include_package_data=<true> zip_safe=<false> install_requires=requires tests_require=requires scripts=['bin/swagger_to_uml'])<line_sep>
#this demo shows how to create multiple screens, load and unload them properly without causing memory leak <import_stmt>lvgl<as>lv<import_stmt>lvgl_helper<as>lv_h<import_stmt>lcd<import_stmt>time<import_from_stmt>machine Timer<import_from_stmt>machine I2C<import_from_stmt>touch Touch TouchLow<import_stmt>KPU<as>kpu<import_stmt>gc<line_sep>config_touchscreen_support=<true><line_sep>board_m1n=<false><line_sep>lcd.init()<line_sep>TOUCH=<none><def_stmt>read_cb drv ptr<block_start>data=lv.indev_data_t.cast(ptr)<line_sep>TOUCH.event()<line_sep>data.point=lv.point_t({'x':TOUCH.points[1][0] 'y':TOUCH.points[1][1]})<line_sep>data.state=lv.INDEV_STATE.PR<if>TOUCH.state<eq>1<else>lv.INDEV_STATE.REL<line_sep><return><false><block_end><if_stmt>config_touchscreen_support<block_start>i2c=I2C(I2C.I2C0 freq=1000<times>1000 scl=24 sda=27)# 24 27) devices=i2c.scan()<line_sep>print("devs" devices)# devs 0 [16, 38, 52, 56] TouchLow.config(i2c)<line_sep>TOUCH=Touch(480 320 200)<block_end>lv.init()<line_sep>disp_buf1=lv.disp_buf_t()<line_sep>buf1_1=bytearray(320<times>10)<line_sep>lv.disp_buf_init(disp_buf1 buf1_1 <none> len(buf1_1)<floordiv>4)<line_sep>disp_drv=lv.disp_drv_t()<line_sep>lv.disp_drv_init(disp_drv)<line_sep>disp_drv.buffer=disp_buf1<line_sep>disp_drv.flush_cb=lv_h.flush<if_stmt>board_m1n<block_start>disp_drv.hor_res=240<line_sep>disp_drv.ver_res=240<block_end><else_stmt><block_start>disp_drv.hor_res=480<line_sep>disp_drv.ver_res=320<block_end>lv.disp_drv_register(disp_drv)<if_stmt>config_touchscreen_support<block_start>indev_drv=lv.indev_drv_t()<line_sep>lv.indev_drv_init(indev_drv)<line_sep>indev_drv.type=lv.INDEV_TYPE.POINTER<line_sep>indev_drv.read_cb=read_cb<line_sep>lv.indev_drv_register(indev_drv)<block_end>lv.log_register_print_cb(<lambda>level path line msg:print('%s(%d): %s'%(path line msg)))<class_stmt>UI<block_start><def_stmt>__init__ self<block_start>self.scr1=self.create_scr1()<line_sep>self.scr2=self.create_scr2()<block_end><def_stmt>create_scr1 self<block_start>scr1=lv.obj()<line_sep>btn1=lv.btn(scr1)<line_sep>btn1.align(scr1 lv.ALIGN.CENTER 0 0)<line_sep>label1=lv.label(btn1)<line_sep>label1.set_text("Button 1")<line_sep>label1.set_size(20 20)<line_sep><return>scr1<block_end><def_stmt>create_scr2 self<block_start>scr2=lv.obj()<line_sep>btn2=lv.btn(scr2)<line_sep>btn2.align(scr2 lv.ALIGN.CENTER 0 0)<line_sep>label2=lv.label(btn2)<line_sep>label2.set_text("Button 2")<line_sep>label2.set_size(20 20)<line_sep><return>scr2<block_end><block_end>ui=UI()<line_sep>kpu.memtest()<def_stmt>on_timer timer<block_start>lv.tick_inc(5)<line_sep>lv.task_handler()<line_sep>gc.collect()<block_end>timer=Timer(Timer.TIMER0 Timer.CHANNEL0 mode=Timer.MODE_PERIODIC period=5 unit=Timer.UNIT_MS callback=on_timer arg=<none>)<while_stmt><true><block_start>tim=time.ticks_ms()<while_stmt>time.ticks_ms()-tim<l>500<block_start><pass><block_end>lv.scr_load(ui.scr1)<line_sep>kpu.memtest()<line_sep>tim=time.ticks_ms()<while_stmt>time.ticks_ms()-tim<l>500<block_start><pass><block_end>lv.scr_load(ui.scr2)<line_sep>kpu.memtest()<block_end>
""" Utility routines for the markov submodule """<import_stmt>numpy<as>np<import_from_stmt>numba jit<line_sep>@jit(nopython=<true> cache=<true>)<def_stmt>sa_indices num_states num_actions<block_start>""" Generate `s_indices` and `a_indices` for `DiscreteDP`, for the case where all the actions are feasible at every state. Parameters ---------- num_states : scalar(int) Number of states. num_actions : scalar(int) Number of actions. Returns ------- s_indices : ndarray(int, ndim=1) Array containing the state indices. a_indices : ndarray(int, ndim=1) Array containing the action indices. Examples -------- >>> s_indices, a_indices = qe.markov.sa_indices(4, 3) >>> s_indices array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]) >>> a_indices array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]) """<line_sep>L=num_states<times>num_actions<line_sep>dtype=np.int_<line_sep>s_indices=np.empty(L dtype=dtype)<line_sep>a_indices=np.empty(L dtype=dtype)<line_sep>i=0<for_stmt>s range(num_states)<block_start><for_stmt>a range(num_actions)<block_start>s_indices[i]=s<line_sep>a_indices[i]=a<line_sep>i<augadd>1<block_end><block_end><return>s_indices a_indices<block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_fill_dense_Q s_indices a_indices Q_in Q_out<block_start>L=Q_in.shape[0]<for_stmt>i range(L)<block_start>Q_out[s_indices[i] a_indices[i] :]=Q_in[i :]<block_end><return>Q_out<block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_s_wise_max_argmax a_indices a_indptr vals out_max out_argmax<block_start>n=len(out_max)<for_stmt>i range(n)<block_start><if_stmt>a_indptr[i]<ne>a_indptr[i+1]<block_start>m=a_indptr[i]<for_stmt>j range(a_indptr[i]+1 a_indptr[i+1])<block_start><if_stmt>vals[j]<g>vals[m]<block_start>m=j<block_end><block_end>out_max[i]=vals[m]<line_sep>out_argmax[i]=a_indices[m]<block_end><block_end><block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_s_wise_max a_indices a_indptr vals out_max<block_start>n=len(out_max)<for_stmt>i range(n)<block_start><if_stmt>a_indptr[i]<ne>a_indptr[i+1]<block_start>m=a_indptr[i]<for_stmt>j range(a_indptr[i]+1 a_indptr[i+1])<block_start><if_stmt>vals[j]<g>vals[m]<block_start>m=j<block_end><block_end>out_max[i]=vals[m]<block_end><block_end><block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_find_indices a_indices a_indptr sigma out<block_start>n=len(sigma)<for_stmt>i range(n)<block_start><for_stmt>j range(a_indptr[i] a_indptr[i+1])<block_start><if_stmt>sigma[i]<eq>a_indices[j]<block_start>out[i]=j<block_end><block_end><block_end><block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_has_sorted_sa_indices s_indices a_indices<block_start>""" Check whether `s_indices` and `a_indices` are sorted in lexicographic order. Parameters ---------- s_indices, a_indices : ndarray(ndim=1) Returns ------- bool Whether `s_indices` and `a_indices` are sorted. """<line_sep>L=len(s_indices)<for_stmt>i range(L-1)<block_start><if_stmt>s_indices[i]<g>s_indices[i+1]<block_start><return><false><block_end><if_stmt>s_indices[i]<eq>s_indices[i+1]<block_start><if_stmt>a_indices[i]<ge>a_indices[i+1]<block_start><return><false><block_end><block_end><block_end><return><true><block_end>@jit(nopython=<true> cache=<true>)<def_stmt>_generate_a_indptr num_states s_indices out<block_start>""" Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be in sorted order. Parameters ---------- num_states : scalar(int) s_indices : ndarray(int, ndim=1) out : ndarray(int, ndim=1) Length must be num_states+1. """<line_sep>idx=0<line_sep>out[0]=0<for_stmt>s range(num_states-1)<block_start><while_stmt>(s_indices[idx]<eq>s)<block_start>idx<augadd>1<block_end>out[s+1]=idx<block_end>out[num_states]=len(s_indices)<block_end>
# coding: utf-8 <import_from_stmt>sogou_mrc.data.vocabulary Vocabulary<import_from_stmt>sogou_mrc.dataset.squad SquadReader SquadEvaluator<import_from_stmt>sogou_mrc.dataset.cmrc CMRCReader CMRCEvaluator<import_from_stmt>sogou_mrc.model.bidaf BiDAF<import_stmt>tensorflow<as>tf<import_stmt>logging<import_from_stmt>sogou_mrc.data.batch_generator BatchGenerator<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<line_sep>logging.basicConfig(level=logging.INFO format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')<line_sep>data_folder=''<line_sep>embedding_folder=''<line_sep>train_file=data_folder+"cmrc2018_train.json"<line_sep>dev_file=data_folder+"cmrc2018_dev.json"<line_sep>reader=CMRCReader()<line_sep>train_data=reader.read(train_file)<line_sep>eval_data=reader.read(dev_file)<line_sep>evaluator=CMRCEvaluator(dev_file)<line_sep>vocab=Vocabulary(do_lowercase=<false>)<line_sep>vocab.build_vocab(train_data+eval_data min_word_count=3 min_char_count=10)<line_sep>word_embedding=vocab.make_word_embedding(embedding_folder)<line_sep>train_batch_generator=BatchGenerator(vocab train_data batch_size=32 training=<true>)<line_sep>eval_batch_generator=BatchGenerator(vocab eval_data batch_size=60)<line_sep>model=BiDAF(vocab pretrained_word_embedding=word_embedding word_embedding_size=300)<line_sep>model.compile(tf.train.AdamOptimizer 0.001)<line_sep>model.train_and_evaluate(train_batch_generator eval_batch_generator evaluator epochs=50 eposides=2)<line_sep>
<import_stmt>pynes<import_from_stmt>pynes.bitbag *<if_stmt>__name__<eq>"__main__"<block_start>pynes.press_start()<line_sep>exit()<block_end>palette=[0x22 0x29 0x1A 0x0F 0x22 0x36 0x17 0x0F 0x22 0x30 0x21 0x0F 0x22 0x27 0x17 0x0F 0x22 0x16 0x27 0x18 0x22 0x1A 0x30 0x27 0x22 0x16 0x30 0x27 0x22 0x0F 0x36 0x17]<line_sep>chr_asset=import_chr('mario.chr')<line_sep>tinymario=define_sprite(108 144 [50 51 52 53] 0)<line_sep>mario=define_sprite(128 128 [0 1 2 3 4 5 6 7] 0)<line_sep>firemario=define_sprite(164 128 [0 1 2 3 4 5 6 7] 0)<def_stmt>reset <block_start>wait_vblank()<line_sep>clearmem()<line_sep>wait_vblank()<line_sep>load_palette(palette)<line_sep>load_sprite(tinymario 0)<line_sep>load_sprite(mario 4)<line_sep>load_sprite(firemario 12)<block_end><def_stmt>joypad1_up <block_start>get_sprite(mario).y<augsub>1<block_end><def_stmt>joypad1_down <block_start>get_sprite(mario).y<augadd>1<block_end><def_stmt>joypad1_left <block_start>get_sprite(mario).x<augsub>1<block_end><def_stmt>joypad1_right <block_start>get_sprite(mario).x<augadd>1<block_end>
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>asyncio<def_stmt>run_async t<block_start>""" A wrapper that ensures that a test is run in an asyncio context. :param t: The test case to wrap. """<def_stmt>async_wrapper *args **kwargs<block_start>asyncio.run(t(*args **kwargs) debug=<true>)<block_end><return>async_wrapper<block_end>
# pragma: no cover <import_stmt>sys<import_stmt>asyncio<import_stmt>synapse.lib.aha<as>s_aha<if_stmt>__name__<eq>'__main__'# pragma: no cover <block_start>asyncio.run(s_aha.AhaCell.execmain(sys.argv[1:]))<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=wrong-import-position,invalid-name """ Test the cascader in the compilation flow. """<import_stmt>pytest<line_sep>pytest.importorskip("ethosu.vela")<import_stmt>numpy<as>np<import_stmt>tvm<import_from_stmt>tvm relay<import_from_stmt>tvm.relay.backend.contrib.ethosu.codegen _create_cascader<import_from_stmt>tvm.relay.backend.contrib.ethosu.tir.compiler _lower_to_tir<import_from_stmt>tvm.contrib.ethosu.cascader MemoryRegion EthosuDeviceConfig<import_from_stmt>.. infra<as>test_infra<import_from_stmt>. infra<as>cascader_test_infra<def_stmt>_ethos_u55_cascader <block_start>sram=MemoryRegion(name="SRAM" size=10<power>6 read_bandwidth=16 write_bandwidth=16 read_latency=0 write_latency=0 burst_length=1 )<line_sep>flash=MemoryRegion(name="FLASH" size=10<power>7 read_bandwidth=4 write_bandwidth=4)<line_sep>device_config=EthosuDeviceConfig("ethos-u55-256")<line_sep>cascader_options=cascader_test_infra.make_options(cascade_region=sram max_proposals=64 stripe_factors=4 max_plan_size=10 max_open_plans=8 max_closed_plans=32 always_copy_size=1024 disable_pareto_plans=<false> disable_pareto_proposals=<false> enable_striping=<false> )<line_sep><return>_create_cascader(options=cascader_options io_region=sram constant_region=flash working_regions=[sram] device_config=device_config )<block_end><def_stmt>_compile_model relay_function<block_start>mod=tvm.IRModule()<line_sep>mod["main"]=relay_function<line_sep>mod=relay.transform.InferType()(mod)<line_sep>tir_mod=_lower_to_tir(mod["main"] _ethos_u55_cascader())[0]<line_sep><return>tir_mod["main"]<block_end><def_stmt>_create_single_conv2d <block_start>ifm=relay.var("x" shape=(1 8 8 4) dtype="int8")<line_sep>conv1=test_infra.make_ethosu_conv2d(ifm 4 4 (3 3) (1 1) (1 1) (1 1))<line_sep>func=relay.Function(relay.analysis.free_vars(conv1) conv1)<line_sep><return>func<block_end><def_stmt>_create_double_conv2d <block_start>ifm=relay.var("x" shape=(1 8 8 4) dtype="int8")<line_sep>conv1=test_infra.make_ethosu_conv2d(ifm 4 4 (3 3) (1 1) (1 1) (1 1))<line_sep>conv2=test_infra.make_ethosu_conv2d(conv1 4 4 (1 3) (1 1) (1 1) (1 1))<line_sep>func=relay.Function(relay.analysis.free_vars(conv2) conv2)<line_sep><return>func<block_end><def_stmt>_create_scalar_add <block_start>ifm=relay.var("x" shape=(1 5 4 3) dtype="int8")<line_sep>ifm2=relay.const(np.ones((1 1 1 1)) dtype="int8")<line_sep>add=test_infra.make_ethosu_binary_elementwise(ifm ifm2 ifm_channels=3 ifm2_channels=1 operator_type="ADD" ofm_dtype="int8")<line_sep>func=relay.Function(relay.analysis.free_vars(add) add)<line_sep><return>func<block_end><def_stmt>test_single_conv_compute_cycles_hint <block_start>""" Check the "compute_cycles_hint" annotation remains in the lowering flow for single convolution. """<line_sep>primfunc=_compile_model(_create_single_conv2d())<line_sep>ops=primfunc.body.body.body.seq<line_sep>compute_cycles_hints=[2304 640 320]<for_stmt>op,compute_cycle_hint zip(ops compute_cycles_hints)<block_start><assert_stmt>op.attr_key<eq>"pragma_compute_cycles_hint"<assert_stmt>op.value<eq>compute_cycle_hint<block_end><block_end><def_stmt>test_double_conv_compute_cycles_hint <block_start>""" Check the "compute_cycles_hint" annotation remains in the lowering flow for double convolution. """<line_sep>primfunc=_compile_model(_create_double_conv2d())<line_sep>ops=primfunc.body.body.body.body.body.body.seq<line_sep>compute_cycles_hints=[2304 640 768 640 320 240]<for_stmt>op,compute_cycle_hint zip(ops compute_cycles_hints)<block_start><assert_stmt>op.attr_key<eq>"pragma_compute_cycles_hint"<assert_stmt>op.value<eq>compute_cycle_hint<block_end><block_end><def_stmt>test_scalar_add_compute_cycles_hint <block_start>""" Check the "compute_cycles_hint" annotation remains in the lowering flow for add with scalar values. """<line_sep>primfunc=_compile_model(_create_scalar_add())<line_sep>ops=primfunc.body.body.seq<line_sep>compute_cycles_hints=[16 24]<for_stmt>op,compute_cycle_hint zip(ops compute_cycles_hints)<block_start><assert_stmt>op.attr_key<eq>"pragma_compute_cycles_hint"<assert_stmt>op.value<eq>compute_cycle_hint<block_end><block_end>
__key_mapping={'return':'enter' 'up_arrow':'up' 'down_arrow':'down' 'left_arrow':'left' 'right_arrow':'right' 'page_up':'pageup' 'page_down':'pagedown' }<def_stmt>translate_key e<block_start><if_stmt>len(e.key)<g>0<block_start><return>__key_mapping[e.key]<if>e.key<in>__key_mapping<else>e.key<block_end><else_stmt><block_start><if_stmt>e.char<eq>'\x08'<block_start><return>'backspace'<block_end><elif_stmt>e.char<eq>'\t'<block_start><return>'tab'<block_end><else_stmt><block_start><return>e.key<block_end><block_end><block_end>
"""Testing handling with CoreState."""<import_from_stmt>supervisor.const CoreState<import_from_stmt>supervisor.coresys CoreSys<def_stmt>test_write_state run_dir coresys:CoreSys<block_start>"""Test write corestate to /run/supervisor."""<line_sep>coresys.core.state=CoreState.RUNNING<assert_stmt>run_dir.read_text()<eq>CoreState.RUNNING.value<line_sep>coresys.core.state=CoreState.SHUTDOWN<assert_stmt>run_dir.read_text()<eq>CoreState.SHUTDOWN.value<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>keras.models load_model<import_stmt>numpy<as>np<import_stmt>os<import_stmt>cv2<import_from_stmt>FaceQNet load_Qnet_model face_quality<line_sep># Loading the pretrained model model=load_Qnet_model()<line_sep>IMG_PATH='/home/sai/YANG/image/video/nanning/haha'<line_sep>dir=os.listdir(IMG_PATH)<line_sep>count=len(dir)<line_sep>print('count:' count)<for_stmt>i dir<block_start>count<augsub>1<if_stmt>count%1000<eq>0<block_start>print('count:' count)<block_end>dir_path=os.path.join(IMG_PATH i)<line_sep>imgs_dir=os.listdir(dir_path)<for_stmt>j imgs_dir<block_start>img_path=os.path.join(dir_path j)<line_sep>img=cv2.imread(img_path)<line_sep>score=face_quality(model img)<line_sep># img = [cv2.resize(cv2.imread(img_path, cv2.IMREAD_COLOR), (224, 224))] # test_data = np.array(img, copy=False, dtype=np.float32) # score = model.predict(test_data, batch_size=1, verbose=1) path1=str(score[0][0])+'@'<line_sep>rename=path1+j<line_sep>os.rename(img_path os.path.join(dir_path rename))<block_end><block_end>
<import_from_stmt>django forms<import_from_stmt>imagetagger.annotations.models AnnotationType<class_stmt>AnnotationTypeCreationForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=AnnotationType<line_sep>fields=['name' 'active' 'node_count' 'vector_type' 'enable_concealed' 'enable_blurred' ]<block_end><block_end><class_stmt>AnnotationTypeEditForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=AnnotationType<line_sep>fields=['name' 'active' 'enable_concealed' 'enable_blurred' ]<block_end><block_end>
"""! @brief Test templates for ROCK clustering module. @authors <NAME> (<EMAIL>) @date 2014-2020 @copyright BSD-3-Clause """<import_from_stmt>pyclustering.cluster.rock rock<import_from_stmt>pyclustering.utils read_sample<import_from_stmt>random random<class_stmt>RockTestTemplates<block_start>@staticmethod<def_stmt>templateLengthProcessData path_to_file radius cluster_numbers threshold expected_cluster_length ccore<block_start>sample=read_sample(path_to_file)<line_sep>rock_instance=rock(sample radius cluster_numbers threshold ccore)<line_sep>rock_instance.process()<line_sep>clusters=rock_instance.get_clusters()<line_sep>length=sum([len(cluster)<for>cluster clusters])<assert_stmt>len(sample)<eq>length<line_sep>obtained_cluster_sizes=[len(cluster)<for>cluster clusters]<line_sep>obtained_cluster_sizes.sort()<line_sep>expected_cluster_length.sort()<assert_stmt>obtained_cluster_sizes<eq>expected_cluster_length<line_sep><block_end>@staticmethod<def_stmt>templateClusterAllocationOneDimensionData ccore_flag<block_start>input_data=[[random()]<for>i range(10)]+[[random()+3]<for>i range(10)]+[[random()+5]<for>i range(10)]+[[random()+8]<for>i range(10)]<line_sep>rock_instance=rock(input_data 1 4 0.5 ccore_flag)<line_sep>rock_instance.process()<line_sep>clusters=rock_instance.get_clusters()<assert_stmt>len(clusters)<eq>4<for_stmt>cluster clusters<block_start><assert_stmt>len(cluster)<eq>10<line_sep><block_end><block_end><block_end>
<for_stmt>i range(int(input()))<block_start>fact=1<line_sep>a=int(input())<for_stmt>j range(1 a+1 1)<block_start>fact=fact<times>j<block_end>print(fact)<block_end><def_stmt>factorial n<block_start><return>1<if>(n<eq>1<or>n<eq>0)<else>n<times>factorial(n-1)<line_sep><block_end>num=int(input('Enter number'))<line_sep>print("Factorial of" num "is" factorial(num))<line_sep>
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' mpirun -np 8 --H localhost:8 \ -bind-to none -map-by slot -mca pml ob1 -mca -x TF_CUDNN_USE_AUTOTUNE=0 \ -x TF_ENABLE_NHWC=1 -x FI_OFI_RXR_INLINE_MR_ENABLE=1 -x NCCL_TREE_THRESHOLD=4294967296 \ -x PATH -x NCCL_SOCKET_IFNAME=^docker0,lo -x NCCL_MIN_NRINGS=13 -x NCCL_DEBUG=INFO \ -x HOROVOD_CYCLE_TIME=0.5 -x HOROVOD_FUSION_THRESHOLD=67108864 python new_resnet.py --synthetic source activate tensorflow2_p36 && \ mpirun -np 8 --H localhost:8 -mca plm_rsh_no_tree_spawn 1 \ -bind-to socket -map-by slot \ -x HOROVOD_HIERARCHICAL_ALLREDUCE=1 -x HOROVOD_FUSION_THRESHOLD=16777216 \ -x NCCL_MIN_NRINGS=4 -x LD_LIBRARY_PATH -x PATH -mca pml ob1 -mca btl ^openib \ -x NCCL_SOCKET_IFNAME=$INTERFACE -mca btl_tcp_if_exclude lo,docker0 \ -x TF_CPP_MIN_LOG_LEVEL=0 \ python -W ignore ~/new_resnet.py \ --synthetic --batch_size 128 --num_batches 100 --clear_log 2 > train.log '''<import_stmt>os<import_stmt>numpy<as>np<import_stmt>getpass<import_stmt>tensorflow<as>tf<import_stmt>horovod.tensorflow<as>hvd<import_from_stmt>tensorflow.python.util nest<import_stmt>argparse<import_from_stmt>time time sleep<line_sep>@tf.function<def_stmt>parse record<block_start>features={'image/encoded':tf.io.FixedLenFeature(() tf.string) 'image/class/label':tf.io.FixedLenFeature(() tf.int64)}<line_sep>parsed=tf.io.parse_single_example(record features)<line_sep>image=tf.image.decode_jpeg(parsed['image/encoded'])<line_sep>image=tf.image.resize(image (224 224))<line_sep>image=tf.image.random_brightness(image .1)<line_sep>image=tf.image.random_jpeg_quality(image 70 100)<line_sep>image=tf.image.random_flip_left_right(image)<line_sep>image=tf.cast(image tf.float32)<line_sep>label=tf.cast(parsed['image/class/label']-1 tf.int32)<line_sep><return>image label<block_end><def_stmt>data_gen <block_start>input_shape=[224 224 3]<while_stmt><true><block_start>image=tf.random.uniform(input_shape)<line_sep>label=tf.random.uniform(minval=0 maxval=999 shape=[1] dtype=tf.int32)<line_sep><yield>image label<block_end><block_end><def_stmt>create_data data_dir=<none> synthetic=<false> batch_size=256<block_start><if_stmt>synthetic<block_start>ds=tf.data.Dataset.from_generator(data_gen output_types=(tf.float32 tf.int32))<block_end><else_stmt><block_start>filenames=[os.path.join(data_dir i)<for>i os.listdir(data_dir)]<line_sep>ds=tf.data.Dataset.from_tensor_slices(filenames).shard(hvd.size() hvd.rank())<line_sep>ds=ds.shuffle(1000 seed=7<times>(1+hvd.rank()))<line_sep>ds=ds.interleave(tf.data.TFRecordDataset cycle_length=1 block_length=1)<line_sep>ds=ds.map(parse num_parallel_calls=10)<line_sep>ds=ds.apply(tf.data.experimental.shuffle_and_repeat(10000 seed=5<times>(1+hvd.rank())))<block_end>ds=ds.batch(batch_size)<line_sep><return>ds<block_end>@tf.function<def_stmt>train_step model opt loss_func images labels first_batch<block_start><with_stmt>tf.GradientTape()<as>tape<block_start>probs=model(images training=<true>)<line_sep>loss_value=loss_func(labels probs)<block_end>tape=hvd.DistributedGradientTape(tape compression=hvd.Compression.fp16)<line_sep>grads=tape.gradient(loss_value model.trainable_variables)<line_sep>opt.apply_gradients(zip(grads model.trainable_variables))<if_stmt>first_batch<block_start>hvd.broadcast_variables(model.variables root_rank=0)<line_sep>hvd.broadcast_variables(opt.variables() root_rank=0)<block_end><return>loss_value<block_end><def_stmt>add_bool_argument cmdline shortname longname=<none> default=<false> help=<none><block_start><if_stmt>longname<is><none><block_start>shortname,longname=<none> shortname<block_end><elif_stmt>default<eq><true><block_start><raise>ValueError("""Boolean arguments that are True by default should not have short names.""")<block_end>name=longname[2:]<line_sep>feature_parser=cmdline.add_mutually_exclusive_group(required=<false>)<if_stmt>shortname<is><not><none><block_start>feature_parser.add_argument(shortname '--'+name dest=name action='store_true' help=help default=default)<block_end><else_stmt><block_start>feature_parser.add_argument('--'+name dest=name action='store_true' help=help default=default)<block_end>feature_parser.add_argument('--no'+name dest=name action='store_false')<line_sep><return>cmdline<block_end><def_stmt>add_cli_args <block_start>cmdline=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)<line_sep>cmdline.add_argument('--data_dir' default='' help="""Path to dataset in TFRecord format (aka Example protobufs). Files should be named 'train-*' and 'validation-*'.""")<line_sep>cmdline.add_argument('-b' '--batch_size' default=128 type=int help="""Size of each minibatch per GPU""")<line_sep>cmdline.add_argument('--num_batches' default=100 type=int help="""Number of batches to run. Ignored during eval or if num epochs given""")<line_sep>cmdline.add_argument('-lr' '--learning_rate' default=0.01 type=float help="""Start learning rate""")<line_sep>cmdline.add_argument('--momentum' default=0.01 type=float help="""Start learning rate""")<line_sep>add_bool_argument(cmdline '--synthetic' help="""Whether to use synthetic data for training""")<line_sep><return>cmdline<block_end><def_stmt>main # setup horovod <block_start>start=time()<line_sep>hvd.init()<line_sep>gpus=tf.config.experimental.list_physical_devices('GPU')<for_stmt>gpu gpus<block_start>tf.config.experimental.set_memory_growth(gpu <true>)<block_end><if_stmt>gpus<block_start>tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()] 'GPU')<block_end>os.environ['TF_CUDNN_DETERMINISTIC']='1'<line_sep># get command line args cmdline=add_cli_args()<line_sep>FLAGS,unknown_args=cmdline.parse_known_args()<line_sep>ds=create_data(FLAGS.data_dir FLAGS.synthetic FLAGS.batch_size)<line_sep>model=tf.keras.applications.ResNet50(weights=<none> classes=1000)<line_sep>opt=tf.keras.optimizers.SGD(learning_rate=FLAGS.learning_rate<times>hvd.size() momentum=0.1)<line_sep>loss_func=tf.keras.losses.SparseCategoricalCrossentropy()<line_sep>loop_time=time()<if_stmt>hvd.local_rank()<eq>0<block_start>print("Step \t Throughput \t Loss")<block_end><for_stmt>batch,(images labels) enumerate(ds)<block_start>loss=train_step(model opt loss_func images labels batch<eq>0)<if_stmt>hvd.local_rank()<eq>0<block_start>duration=time()-loop_time<line_sep>loop_time=time()<line_sep>throughput=(hvd.size()<times>FLAGS.batch_size)/duration<line_sep>print("{} \t images/sec: {} \t {}".format(batch throughput loss))<block_end><if_stmt>batch<eq>FLAGS.num_batches<block_start><break><block_end><block_end><if_stmt>hvd.rank()<eq>0<block_start>print("\nFinished in {}".format(time()-start))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>time<import_stmt>pytest<line_sep>@pytest.mark.parametrize("index" range(7))<def_stmt>test_cat index<block_start>"""Perform several tests with varying execution times."""<line_sep>time.sleep(0.2+(index<times>0.1))<assert_stmt><true><block_end>
<import_stmt>networkx<as>nx<import_stmt>random<import_from_stmt>osp.common.utils query_bar<import_from_stmt>osp.graphs.graph Graph<import_from_stmt>osp.citations.models Text Citation Text_Index<import_from_stmt>osp.corpus.models Document<import_from_stmt>itertools combinations<import_from_stmt>peewee fn<import_from_stmt>clint.textui progress<class_stmt>OSP_Graph(Graph)<block_start><def_stmt>add_edges self max_texts=20<block_start>""" For each syllabus, register citation pairs as edges. Args: max_texts (int): Ignore docs with > than N citations. """<line_sep>text_ids=(fn.array_agg(Text.id).coerce(<false>).alias('text_ids'))<line_sep>docs=(Citation.select(Citation.document text_ids).join(Text).having(fn.count(Text.id)<le>max_texts).where(Text.display<eq><true>).where(Text.valid<eq><true>).group_by(Citation.document))<for_stmt>row query_bar(docs)<block_start><for_stmt>tid1,tid2 combinations(row.text_ids 2)# If the edge exists, increment the weight. <block_start><if_stmt>self.graph.has_edge(tid1 tid2)<block_start>self.graph[tid1][tid2]['weight']<augadd>1<block_end># Otherwise, initialize the edge. <else_stmt><block_start>self.graph.add_edge(tid1 tid2 weight=1)<block_end><block_end><block_end><block_end><def_stmt>add_nodes self<block_start>""" Register displayed texts. """<for_stmt>t progress.bar(Text_Index.rank_texts())<block_start>text=t['text']<line_sep>self.graph.add_node(text.id dict(label=text.pretty('title') author=text.pretty('surname') count=text.count score=t['score'] ))<block_end><block_end><def_stmt>trim_unconnected_components self<block_start>""" Remove all but the largest connected component. """<line_sep>subgraphs=sorted(nx.connected_component_subgraphs(self.graph) key=len reverse=<true>)<line_sep>self.graph=subgraphs[0]<block_end><def_stmt>trim_texts_by_count self min_count=100<block_start>""" Remove all texts with counts below a threshold. Args: min_count (int) """<for_stmt>tid,text self.graph.nodes(data=<true>)<block_start><if_stmt>text['count']<l>min_count<block_start>self.graph.remove_node(tid)<block_end><block_end><block_end><def_stmt>trim_edges self keep=0.5<block_start>""" Randomly prune a certain percentage of edges. Args: keey (float) """<for_stmt>tid1,tid2 self.graph.edges()<block_start><if_stmt>random.random()<g>keep<block_start>self.graph.remove_edge(tid1 tid2)<block_end><block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>RecoJets.JetProducers.PFClusterJetParameters_cfi *<import_from_stmt>RecoJets.JetProducers.AnomalousCellParameters_cfi *<line_sep>ak4PFClusterJets=cms.EDProducer("FastjetJetProducer" PFClusterJetParameters AnomalousCellParameters jetAlgorithm=cms.string("AntiKt") rParam=cms.double(0.4))<line_sep>
<class_stmt>ProjectListing(object)<block_start>@staticmethod<def_stmt>list_projects redis_connection<block_start>"""Returns a list of projects store in redis with their creation timestamps Arguments: redis_connection {RedisConnection} -- Redis connection to use as a provider for data Returns: list -- The list of project names and creation dates """<import_from_stmt>foundations_contrib.utils string_from_bytes<line_sep>projects=redis_connection.zrange('projects' 0 -1 withscores=<true>)<line_sep><return>[{'name':string_from_bytes(name) 'created_at':created_at}<for>name,created_at projects]<block_end>@staticmethod<def_stmt>find_project redis_connection project_name<block_start>"""Returns a single of projects store in redis with it's creation timestamp Arguments: redis_connection {RedisConnection} -- Redis connection to use as a provider for data project_name {str} -- Name of the project to find Returns: dict -- The dictionary of the 2 attribute from the description above or None if the project does not exist """<line_sep>created_at=redis_connection.zscore('projects' project_name)<if_stmt>created_at<is><none><block_start><return><none><block_end><return>{'name':project_name 'created_at':created_at}<block_end><block_end>
""" developed by Quantsbin - Jun'18 """<import_from_stmt>enum Enum<class_stmt>AssetClass(Enum)<block_start>EQOPTION='EqOption'<line_sep>FXOPTION='FXOption'<line_sep>FUTOPTION='FutOption'<line_sep>COMOPTION='ComOption'<block_end><class_stmt>DerivativeType(Enum)<block_start>VANILLA_OPTION='Vanilla Option'<block_end><class_stmt>PricingModel(Enum)<block_start>BLACKSCHOLESMERTON='BSM'<line_sep>BLACK76='B76'<line_sep>GK='GK'<line_sep>MC_GBM="MC_GBM"<line_sep>MC_GBM_LSM="MC_GBM_LSM"<line_sep>BINOMIAL="Binomial"<block_end><class_stmt>UnderlyingParameters(Enum)<block_start>SPOT="spot0"<line_sep>VOLATILITY="volatility"<line_sep>PRICEDATE="_pricing_date"<line_sep>RF_RATE="rf_rate"<line_sep>CNV_YIELD="cnv_yield"<line_sep>COST_YIELD="cost_yield"<line_sep>UNEXPLAINED="unexplained"<block_end><class_stmt>RiskParameter(Enum)<block_start>DELTA='delta'<line_sep>GAMMA='gamma'<line_sep>THETA='theta'<line_sep>VEGA='vega'<line_sep>RHO='rho'<line_sep>PHI='phi'<line_sep>RHO_FOREIGN='rho_foreign'<line_sep>RHO_CONV='rho_conv_yield'<block_end><class_stmt>VanillaOptionType(Enum)<block_start>CALL='Call'<line_sep>PUT='Put'<block_end><class_stmt>ExpiryType(Enum)<block_start>AMERICAN='American'<line_sep>EUROPEAN='European'<block_end><class_stmt>UdlType(Enum)<block_start>INDEX='Index'<line_sep>STOCK='Stock'<line_sep>FX='Currency'<line_sep>COMMODITY='Commodity'<line_sep>FUTURES='Futures'<block_end><class_stmt>DivType(Enum)<block_start>DISCRETE='Discrete'<line_sep>YIELD='Yield'<block_end>OBJECT_MODEL={UdlType.STOCK.value:{ExpiryType.EUROPEAN.value:[PricingModel.BLACKSCHOLESMERTON.value PricingModel.MC_GBM.value PricingModel.BINOMIAL.value] ExpiryType.AMERICAN.value:[PricingModel.MC_GBM.value PricingModel.BINOMIAL.value]} UdlType.FUTURES.value:{ExpiryType.EUROPEAN.value:[PricingModel.BLACK76.value PricingModel.MC_GBM.value PricingModel.BINOMIAL.value] ExpiryType.AMERICAN.value:[PricingModel.MC_GBM.value PricingModel.BINOMIAL.value]} UdlType.FX.value:{ExpiryType.EUROPEAN.value:[PricingModel.GK.value PricingModel.MC_GBM.value PricingModel.BINOMIAL.value] ExpiryType.AMERICAN.value:[PricingModel.MC_GBM.value PricingModel.BINOMIAL.value]} UdlType.COMMODITY.value:{ExpiryType.EUROPEAN.value:[PricingModel.GK.value PricingModel.MC_GBM.value PricingModel.BINOMIAL.value] ExpiryType.AMERICAN.value:[PricingModel.MC_GBM.value PricingModel.BINOMIAL.value]}}<line_sep>DEFAULT_MODEL={UdlType.STOCK.value:{DerivativeType.VANILLA_OPTION.value:{ExpiryType.EUROPEAN.value:PricingModel.BLACKSCHOLESMERTON.value ExpiryType.AMERICAN.value:PricingModel.BINOMIAL.value} } UdlType.FUTURES.value:{DerivativeType.VANILLA_OPTION.value:{ExpiryType.EUROPEAN.value:PricingModel.BLACK76.value ExpiryType.AMERICAN.value:PricingModel.BINOMIAL.value} } UdlType.FX.value:{DerivativeType.VANILLA_OPTION.value:{ExpiryType.EUROPEAN.value:PricingModel.GK.value ExpiryType.AMERICAN.value:PricingModel.BINOMIAL.value} } UdlType.COMMODITY.value:{DerivativeType.VANILLA_OPTION.value:{ExpiryType.EUROPEAN.value:PricingModel.GK.value ExpiryType.AMERICAN.value:PricingModel.BINOMIAL.value} }}<line_sep>IV_MODELS=[PricingModel.BLACKSCHOLESMERTON.value PricingModel.BLACK76.value PricingModel.GK.value]<line_sep>ANALYTICAL_GREEKS=[PricingModel.BLACKSCHOLESMERTON.value PricingModel.BLACK76.value PricingModel.GK.value]<import_from_stmt>. pricingmodels<as>pm<line_sep>MODEL_MAPPER={PricingModel.BLACKSCHOLESMERTON.value:pm.BSM PricingModel.BLACK76.value:pm.B76 PricingModel.GK.value:pm.GK PricingModel.MC_GBM.value:pm.MonteCarloGBM PricingModel.BINOMIAL.value:pm.BinomialModel}<line_sep>
<import_stmt>os<import_from_stmt>oic.utils.jwt JWT<import_from_stmt>oic.utils.keyio build_keyjar<import_from_stmt>oic.utils.keyio keybundle_from_local_file<line_sep>__author__="roland"<line_sep>BASE_PATH=os.path.abspath(os.path.join(os.path.dirname(__file__) "data/keys"))<line_sep>keys=[{"type":"RSA" "key":os.path.join(BASE_PATH "cert.key") "use":["enc" "sig"]} {"type":"EC" "crv":"P-256" "use":["sig"]} {"type":"EC" "crv":"P-256" "use":["enc"]} ]<line_sep>jwks,keyjar,kidd=build_keyjar(keys)<line_sep>issuer="https://fedop.example.org"<def_stmt>_eq l1 l2<block_start><return>set(l1)<eq>set(l2)<block_end><def_stmt>test_jwt_pack <block_start>_jwt=JWT(keyjar lifetime=3600 iss=issuer).pack()<assert_stmt>_jwt<assert_stmt>len(_jwt.split("."))<eq>3<block_end><def_stmt>test_jwt_pack_and_unpack <block_start>srv=JWT(keyjar iss=issuer)<line_sep>_jwt=srv.pack(sub="sub")<line_sep>info=srv.unpack(_jwt)<assert_stmt>_eq(info.keys() ["jti" "iat" "exp" "iss" "sub" "kid"])<block_end><class_stmt>TestJWT(object)<block_start>"""Tests for JWT."""<def_stmt>test_unpack_verify_key self<block_start>srv=JWT(keyjar iss=issuer)<line_sep>_jwt=srv.pack(sub="sub")<line_sep># Remove the signing key from keyjar keyjar.remove_key("" "RSA" "")<line_sep># And add it back as verify kb=keybundle_from_local_file(os.path.join(BASE_PATH "cert.key") "RSA" ["ver"])<line_sep># keybundle_from_local_file doesn'assign kid, so assign manually kb._keys[0].kid=kidd["sig"]["RSA"]<line_sep>keyjar.add_kb("" kb)<line_sep>info=srv.unpack(_jwt)<assert_stmt>info["sub"]<eq>"sub"<block_end><block_end>
""" Queries of issue queries """<def_stmt>gql_issues fragment<block_start>""" Return the GraphQL issues query """<line_sep><return>f''' query ($where: IssueWhere!, $first: PageSize!, $skip: Int!) {{ data: issues(where: $where, first: $first, skip: $skip) {{ {fragment} }} }} '''<block_end>GQL_ISSUES_COUNT=''' query($where: IssueWhere!) { data: countIssues(where: $where) } '''<line_sep>
<import_from_stmt>sportsdb_setup HGETestSetup HGETestSetupArgs<import_from_stmt>run_hge HGE<import_stmt>graphql<import_stmt>multiprocessing<import_stmt>json<import_stmt>os<import_stmt>docker<import_stmt>ruamel.yaml<as>yaml<import_stmt>cpuinfo<import_stmt>subprocess<import_stmt>threading<import_stmt>time<import_stmt>datetime<import_from_stmt>colorama Fore Style<import_from_stmt>plot run_dash_server<import_stmt>webbrowser<import_stmt>pathlib<import_from_stmt>urllib.parse urlparse urlunparse<import_stmt>boto3<line_sep>fileLoc=os.path.dirname(os.path.abspath(__file__))<def_stmt>uri_path_join uri *paths<block_start>p=urlparse(uri)<line_sep>new_path=os.path.join(p.path *paths)<line_sep><return>urlunparse(p._replace(path=new_path))<block_end><class_stmt>HGEWrkBench(HGETestSetup)<block_start>wrk_docker_image='hasura/wrk:v0.3'<line_sep># We'll bind mount the lua script dir to this directory within the wrk container: lua_dir='/tmp/bench_scripts'<line_sep>rps_steps=[10 20 50 100 200 500 1000 2000 5000]<def_stmt>__init__ self pg_url remote_pg_url pg_docker_image hge_url=<none> remote_hge_url=<none> hge_docker_image=<none> hge_args=[] skip_stack_build=<false> graphql_queries_file='queries.graphql' connections=50 duration=300 results_hge_url=<none> results_hge_admin_secret=<none><block_start>self.load_queries(graphql_queries_file)<line_sep>super().__init__(pg_url=pg_url remote_pg_url=remote_pg_url pg_docker_image=pg_docker_image hge_url=hge_url remote_hge_url=remote_hge_url hge_docker_image=hge_docker_image hge_args=hge_args skip_stack_build=skip_stack_build)<line_sep>self.connections=connections<line_sep>self.duration=duration<line_sep>self.results_hge_url=results_hge_url<line_sep>self.results_hge_admin_secret=results_hge_admin_secret<line_sep>self.extract_cpu_info()<line_sep># NOTE: we generally want to do this just once; otherwise if we happen # to be editing the tree while this script is running the shasum will # keep changing: self.server_shasum=self.get_server_shasum()<block_end><def_stmt>load_queries self graphql_queries_file<block_start>self.graphql_queries_file=graphql_queries_file<with_stmt>open(self.graphql_queries_file)<as>f<block_start>queries=f.read()<block_end>self.query_names=[]<line_sep>self.queries=[]<for_stmt>oper graphql.parse(queries).definitions<block_start>self.query_names.append(oper.name.value)<line_sep>self.queries.append(oper)<block_end><block_end><def_stmt>get_wrk2_params self<block_start>cpu_count=multiprocessing.cpu_count()<line_sep><return>{'threads':cpu_count 'connections':self.connections 'duration':self.duration}<block_end><def_stmt>get_current_user self<block_start><return>'{}:{}'.format(os.geteuid() os.getegid())<block_end><def_stmt>wrk2_test self query rps<block_start><def_stmt>upload_files files<block_start><if_stmt>self.upload_root_uri<block_start>p=urlparse(self.upload_root_uri)<if_stmt>p.scheme<eq>'s3'<block_start>bucket=p.netloc<line_sep>key=p.path.lstrip('/')<line_sep>s3_client=boto3.client('s3')<for_stmt>(f f_key) files<block_start>s3_client.upload_file(f bucket os.path.join(key f_key))<block_end><block_end><block_end><block_end>query_str=graphql.print_ast(query)<line_sep>params=self.get_wrk2_params()<line_sep>print(Fore.GREEN+"Running benchmark wrk2 for at {} req/s (duration: {}) for query\n".format(rps params['duration']) query_str+Style.RESET_ALL)<line_sep>bench_script=os.path.join(self.lua_dir 'bench-wrk2.lua')<line_sep>graphql_url=self.hge.url+'/v1/graphql'<line_sep>timestamp=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')<line_sep>results_dir=self.results_root_dir<line_sep>tests_path=[str(rps) timestamp]<line_sep>results_dir=os.path.join(results_dir *tests_path)<line_sep>os.makedirs(results_dir exist_ok=<true>)<line_sep>wrk2_command=['wrk2' '-R' str(rps) '-t' str(params['threads']) '-c' str(params['connections']) '-d' str(params['duration']) '--latency' '-s' bench_script graphql_url query_str results_dir]<line_sep>volumes=self.get_scripts_vol()<line_sep>volumes[results_dir]={'bind':results_dir 'mode':'rw'}<line_sep>self.docker_client=docker.from_env()<line_sep>result=self.docker_client.containers.run(self.wrk_docker_image detach=<false> stdout=<true> stderr=<false> command=wrk2_command network_mode='host' environment=self.get_lua_env() volumes=volumes remove=<true> user=self.get_current_user()).decode('ascii')<line_sep>histogram_file=os.path.join(results_dir 'latencies.hgrm')<line_sep>histogram=self.get_latency_histogram(result histogram_file)<line_sep>summary_file=os.path.join(results_dir 'summary.json')<with_stmt>open(summary_file)<as>f<block_start>summary=json.load(f)<block_end>latencies_file=os.path.join(results_dir 'latencies')<def_stmt>extract_data v<block_start><return>v['data']<if>isinstance(v dict)<and>'data'<in>v<else>v<block_end>tests_info={k:extract_data(v)<for>(k v) self.gen_test_info(query rps).items()}<line_sep>tests_setup_file=os.path.join(results_dir 'test_setup.json')<with_stmt>open(tests_setup_file 'w')<as>f<block_start>f.write(json.dumps(tests_info indent=2))<block_end>upload_files([(x os.path.join(*tests_path y))<for>(x y) [(summary_file 'summary.json') (latencies_file 'latencies') (histogram_file 'latencies.hgrm') (tests_setup_file 'test_setup.json')]])<if_stmt>self.upload_root_uri<block_start>latencies_uri=uri_path_join(self.upload_root_uri *tests_path 'latencies')<block_end><else_stmt><block_start>latencies_uri=pathlib.Path(latencies_file).as_uri()<block_end>self.insert_result(query rps summary histogram latencies_uri)<line_sep><return>(summary histogram)<block_end><def_stmt>get_latency_histogram self result write_histogram_file<block_start>const_true=<lambda>l:<true><line_sep>state_changes={'start':{(<lambda>l:'Detailed Percentile spectrum'<in>l):'histogram_start'} 'histogram_start':{(<lambda>l:'Value'<in>l<and>'Percentile'<in>l):'histogram_headers'} 'histogram_headers':{const_true:'histogram_empty_line'} 'histogram_empty_line':{const_true:'histogram_values'} 'histogram_values':{(<lambda>l:l.strip().startswith('#')):'histogram_summary'} 'histogram_summary':{(<lambda>l:<not>l.strip().startswith('#')):'histogram_end'}}<line_sep>state='start'<line_sep>histogram=[]<line_sep>print(Fore.CYAN+"Latency histogram summary"+Style.RESET_ALL)<with_stmt>open(write_histogram_file 'w')<as>f<block_start><for_stmt>line result.splitlines()# Change the state <block_start><for_stmt>(check next_state) state_changes[state].items()<block_start><if_stmt>check(line)<block_start>state=next_state<line_sep><break><block_end><block_end><if_stmt>state<eq>'start'<block_start><continue><block_end><elif_stmt>state<eq>'histogram_end'<block_start><break><block_end><if_stmt>state<eq>'histogram_summary'<block_start>print(Fore.CYAN+line+Style.RESET_ALL)<block_end><if_stmt>state<in>['histogram_headers' 'histogram_values' 'histogram_summary']<block_start>f.write(line+'\n')<block_end><if_stmt>state<eq>'histogram_values'<block_start>(val percentile total_count _)=line.strip().split()<line_sep>histogram.append({'percentile':float(percentile) 'latency':float(val) 'total_count':float(total_count)})<block_end><block_end><block_end><return>histogram<block_end># The appropriate Lua env vars for execution within wrk container: <def_stmt>get_lua_env self<block_start><return>{'LUA_PATH':'/usr/share/lua/5.1/?.lua;'+os.path.join(self.lua_dir '?.lua')+';;' 'LUA_CPATH':'/usr/lib/lua/5.1/?.so;/usr/lib/x86_64-linux-gnu/lua/5.1/?.so;;'}<block_end><def_stmt>get_scripts_vol self<block_start><return>{os.path.join(fileLoc 'wrk-websocket-server' 'bench_scripts'):{'bind':self.lua_dir 'mode':'ro'}}<block_end><def_stmt>max_rps_test self query<block_start>query_str=graphql.print_ast(query)<line_sep>print(Fore.GREEN+"(Compute maximum Request per second) Running wrk benchmark for query\n" query_str+Style.RESET_ALL)<line_sep>self.hge.graphql_q(query_str)# Test query once for errors bench_script=os.path.join(self.lua_dir+'/bench-wrk.lua')<line_sep>graphql_url=self.hge.url+'/v1/graphql'<line_sep>params=self.get_wrk2_params()<line_sep>duration=30<line_sep>wrk_command=['wrk' '-t' str(params['threads']) '-c' str(params['connections']) '-d' str(duration) '--latency' '-s' bench_script graphql_url query_str]<line_sep>self.docker_client=docker.from_env()<line_sep>result=self.docker_client.containers.run(self.wrk_docker_image detach=<false> stdout=<false> stderr=<true> command=wrk_command network_mode='host' environment=self.get_lua_env() volumes=self.get_scripts_vol() remove=<true> user=self.get_current_user())<line_sep>summary=json.loads(result)['summary']<line_sep># TODO explain this calculation. Why aren't we using wrk's reported 'max'? Should we call this avg_sustained_rps or something? max_rps=round(summary['requests']/float(duration))<line_sep>self.insert_max_rps_result(query max_rps)<line_sep>print("Max RPS" max_rps)<line_sep><return>max_rps<block_end><def_stmt>get_version self<block_start>script=os.path.join(fileLoc 'gen-version.sh')<line_sep><return>subprocess.check_output([script]).decode('ascii').strip()<block_end><def_stmt>get_server_shasum self<block_start>script=os.path.join(fileLoc 'get-server-sha.sh')<line_sep><return>subprocess.check_output([script]).decode('ascii').strip()<block_end><def_stmt>extract_cpu_info self<block_start>self.cpu_info=cpuinfo.get_cpu_info()<for_stmt>k ['flags' 'python_version' 'hz_actual' 'hz_actual_raw']<block_start><if_stmt>self.cpu_info.get(k)<block_start><del_stmt>self.cpu_info[k]<block_end><block_end><block_end><def_stmt>get_results self<block_start>query=''' query results { latency: hge_bench_latest_results { query_name requests_per_sec docker_image version latencies_uri latency_histogram { percentile latency } } max_rps: hge_bench_avg_query_max_rps { query_name docker_image version max_rps } } '''<line_sep>output=self.results_hge.graphql_q(query)<line_sep><return>output['data']<block_end><def_stmt>set_cpu_info self insert_var<block_start>cpu_key=self.cpu_info['brand']+' vCPUs: '+str(self.cpu_info['count'])<line_sep>insert_var['cpu']={'data':{'info':self.cpu_info 'key':cpu_key} "on_conflict":{"constraint":"cpu_info_pkey" "update_columns":"key"}}<block_end><def_stmt>set_query_info self insert_var query<block_start>insert_var["query"]={"data":{"name":query.name.value "query":graphql.print_ast(query)} "on_conflict":{"constraint":"gql_query_query_key" "update_columns":"query"}}<block_end>#TODO add executable shasum also <def_stmt>set_version_info self insert_var<block_start><if_stmt>self.hge_docker_image<block_start>insert_var["docker_image"]=self.hge_docker_image<block_end><else_stmt><block_start>insert_var["version"]=self.get_version()<line_sep>insert_var["server_shasum"]=self.server_shasum<block_end>insert_var['postgres_version']=self.pg.get_server_version()<if_stmt>self.scenario_name<block_start>insert_var['scenario_name']=self.scenario_name<block_end><block_end><def_stmt>set_hge_args_env_vars self insert_var<block_start>to_hide_env=['HASURA_GRAPHQL_'+env<for>env ['ADMIN_SECRET' 'DATABASE_URL' 'JWT_SECRET']]<line_sep>env={k:v<for>(k v) self.hge.get_hge_env().items()<if>(k.startswith('HASURA_GRAPHQL')<and>k<not><in>to_hide_env)<or>k<in>['GHCRTS']}<line_sep>args=self.hge.args<line_sep>insert_var['hge_conf']={'env':env 'args':args}<block_end><def_stmt>gen_max_rps_insert_var self query max_rps<block_start>insert_var=dict()<line_sep>self.set_cpu_info(insert_var)<line_sep>self.set_query_info(insert_var query)<line_sep>self.set_version_info(insert_var)<line_sep>self.set_hge_args_env_vars(insert_var)<line_sep>insert_var['max_rps']=max_rps<line_sep>insert_var['wrk_parameters']=self.get_wrk2_params()<line_sep><return>insert_var<block_end><def_stmt>plot_results self<block_start><def_stmt>open_plot_in_browser <block_start>time.sleep(1)<line_sep>webbrowser.open_new_tab('http://127.0.0.1:8050/')<block_end>threading.Thread(target=open_plot_in_browser).start()<line_sep>run_dash_server(self.get_results())<block_end># Collect info about the test environment <def_stmt>gen_test_info self query rps<block_start>test_info=dict()<line_sep>self.set_cpu_info(test_info)<line_sep>self.set_query_info(test_info query)<line_sep>self.set_version_info(test_info)<line_sep>self.set_hge_args_env_vars(test_info)<line_sep>test_info["requests_per_sec"]=rps<line_sep>test_info['wrk2_parameters']=self.get_wrk2_params()<line_sep><return>test_info<block_end><def_stmt>gen_result_insert_var self query rps summary latency_histogram latencies_uri<block_start>insert_var=self.gen_test_info(query rps)<line_sep>insert_var["summary"]=summary<line_sep>insert_var['latency_histogram']={'data':latency_histogram}<line_sep>insert_var['latencies_uri']=latencies_uri<line_sep><return>insert_var<block_end><def_stmt>insert_result self query rps summary latency_histogram latencies_uri<block_start>result_var=self.gen_result_insert_var(query rps summary latency_histogram latencies_uri)<line_sep>insert_query=""" mutation insertResult($result: hge_bench_results_insert_input!) { insert_hge_bench_results(objects: [$result]){ affected_rows } }"""<line_sep>variables={'result':result_var}<line_sep>self.results_hge.graphql_q(insert_query variables)<block_end><def_stmt>insert_max_rps_result self query max_rps<block_start>result_var=self.gen_max_rps_insert_var(query max_rps)<line_sep>insert_query=""" mutation insertMaxRps($result: hge_bench_query_max_rps_insert_input!) { insert_hge_bench_query_max_rps(objects: [$result]){ affected_rows } }"""<line_sep>variables={'result':result_var}<line_sep>self.results_hge.graphql_q(insert_query variables)<block_end><def_stmt>setup_results_schema self<block_start><if_stmt><not>self.results_hge_url<block_start>self.results_hge_url=self.hge.url<line_sep>self.results_hge_admin_secret=self.hge.admin_secret()<block_end><if_stmt>self.results_hge_admin_secret<block_start>results_hge_args=['--admin-secret' self.results_hge_admin_secret]<block_end><else_stmt><block_start>results_hge_args=[]<block_end>self.results_hge=HGE(<none> <none> args=results_hge_args log_file=<none> url=self.results_hge_url)<line_sep>results_table={'name':'results' 'schema':'hge_bench'}<if_stmt>results_table<in>self.results_hge.get_all_tracked_tables()<block_start><return><block_end>schema_file=os.path.join(fileLoc 'results_schema.yaml')<with_stmt>open(schema_file)<as>f<block_start>queries=yaml.safe_load(f)<block_end>self.results_hge.run_bulk(queries)<block_end><def_stmt>run_query_benchmarks self<block_start><def_stmt>get_results_root_dir query<block_start><if_stmt>self.hge_docker_image<block_start>ver_info='docker-tag-'+self.hge_docker_image.split(':')[1]<block_end><else_stmt><block_start>ver_info=self.get_version()<block_end>query_name=query.name.value<line_sep># Store versioned runs under e.g. test_output/benchmark_runs/<hge_version>/ results_root_dir=os.path.abspath(os.path.join(self.work_dir 'benchmark_runs'))<line_sep><return>os.path.join(results_root_dir ver_info query_name)<block_end><for_stmt>query self.queries<block_start><try_stmt><block_start>self.results_root_dir=get_results_root_dir(query)<line_sep>max_rps=self.max_rps_test(query)<line_sep># The tests should definitely not be running very close to or higher than maximum requests per second rps_steps=[r<for>r self.rps_steps<if>r<l>0.6<times>max_rps]<line_sep>print("Benchmarking queries with wrk2 for the following requests/sec" rps_steps)<for_stmt>rps rps_steps<block_start><if_stmt>rps<l>int(0.6<times>max_rps)<block_start>self.wrk2_test(query rps)<block_end><block_end><block_end><except_stmt>Exception<block_start>print(Fore.RED+"Benchmarking Graphql Query '"+query.name.value+"' failed"+Style.RESET_ALL)<line_sep><raise><block_end><block_end><block_end><def_stmt>run_tests self<block_start><with_stmt>self.graphql_engines_setup()<block_start>self.setup_results_schema()<if_stmt>self.run_benchmarks<block_start>self.run_query_benchmarks()<block_end><if_stmt><not>self.skip_plots<block_start>self.plot_results()<block_end><block_end><block_end><block_end><class_stmt>HGEWrkBenchArgs(HGETestSetupArgs)<block_start><def_stmt>__init__ self<block_start>self.set_arg_parse_options()<line_sep>self.parse_args()<block_end><def_stmt>set_arg_parse_options self<block_start>HGETestSetupArgs.set_arg_parse_options(self)<line_sep>self.set_wrk_options()<block_end><def_stmt>parse_args self<block_start>HGETestSetupArgs.parse_args(self)<line_sep>self.parse_wrk_options()<block_end><def_stmt>set_wrk_options self<block_start><def_stmt>boolean_string s<block_start>s=s.lower()<if_stmt>s<not><in>{'false' 'true'}<block_start><raise>ValueError('Not a valid boolean string')<block_end><return>s<eq>'true'<block_end>wrk_opts=self.arg_parser.add_argument_group('wrk')<line_sep>wrk_opts.add_argument('--queries-file' metavar='HASURA_BENCH_QUERIES_FILE' help='Queries file for benchmarks' default='queries.graphql')<line_sep>wrk_opts.add_argument('--connections' metavar='HASURA_BENCH_CONNECTIONS' help='Total number of open connections' default=50)<line_sep>wrk_opts.add_argument('--duration' metavar='HASURA_BENCH_DURATION' help='Duration of tests in seconds' default=300)<line_sep>wrk_opts.add_argument('--upload-root-uri' metavar='HASURA_BENCH_UPLOAD_ROOT_URI' help='The URI to which the latency results should be uploaded. Curently only s3 is supported' required=<false>)<line_sep>wrk_opts.add_argument('--set-scenario-name' metavar='HASURA_BENCH_SCENARIO_NAME' help='Set a name for the test scenario. This will be shown in logs' required=<false>)<line_sep>wrk_opts.add_argument('--results-hge-url' metavar='HASURA_BENCH_RESULTS_HGE_URL' help='The GraphQL engine to which the results should be uploaded' required=<false>)<line_sep>wrk_opts.add_argument('--results-hge-admin-secret' metavar='HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET' help='Admin secret of the GraphQL engine to which the results should be uploaded' required=<false>)<line_sep>wrk_opts.add_argument('--skip-plots' help='Skip plotting' action='store_true' required=<false>)<line_sep>wrk_opts.add_argument('--run-benchmarks' metavar='HASURA_BENCH_RUN_BENCHMARKS' help='Whether benchmarks should be run or not' default=<true> type=boolean_string)<block_end><def_stmt>get_s3_caller_identity self<block_start><return>boto3.client('sts').get_caller_identity()<block_end><def_stmt>parse_wrk_options self<block_start>self.connections,self.duration,self.graphql_queries_file,self.res_hge_url,upload_root_uri,self.res_hge_admin_secret,self.run_benchmarks,self.scenario_name=self.get_params([('connections' 'HASURA_BENCH_CONNECTIONS') ('duration' 'HASURA_BENCH_DURATION') ('queries_file' 'HASURA_BENCH_QUERIES_FILE') ('results_hge_url' 'HASURA_BENCH_RESULTS_HGE_URL') ('upload_root_uri' 'HASURA_BENCH_UPLOAD_ROOT_URI') ('results_hge_admin_secret' 'HASURA_BENCH_RESULTS_HGE_ADMIN_SECRET') ('run_benchmarks' 'HASURA_BENCH_RUN_BENCHMARKS') ('set_scenario_name' 'HASURA_BENCH_SCENARIO_NAME') ])<line_sep>self.upload_root_uri=<none><if_stmt>upload_root_uri<block_start>p=urlparse(upload_root_uri)<if_stmt>p.scheme<eq>'s3'# Check if aws credentials are set <block_start>self.get_s3_caller_identity()<block_end>self.upload_root_uri=upload_root_uri<block_end>self.skip_plots=self.parsed_args.skip_plots<block_end><block_end><class_stmt>HGEWrkBenchWithArgs(HGEWrkBenchArgs HGEWrkBench)<block_start><def_stmt>__init__ self<block_start>HGEWrkBenchArgs.__init__(self)<line_sep>HGEWrkBench.__init__(self pg_url=self.pg_url remote_pg_url=self.remote_pg_url pg_docker_image=self.pg_docker_image hge_url=self.hge_url remote_hge_url=self.remote_hge_url hge_docker_image=self.hge_docker_image hge_args=self.hge_args skip_stack_build=self.skip_stack_build graphql_queries_file=self.graphql_queries_file connections=self.connections duration=self.duration)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>bench=HGEWrkBenchWithArgs()<line_sep>bench.run_tests()<block_end>
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. # include parent path <import_stmt>numpy<as>np<import_stmt>math<import_stmt>warp<as>wp<import_from_stmt>warp.tests.test_base *<import_stmt>unittest<line_sep>wp.init()<line_sep>#from test_func import sqr <import_stmt>warp.tests.test_func<as>test_func<line_sep>@wp.kernel<def_stmt>test_import_func # test a cross-module function reference is resolved correctly <block_start>x=test_func.sqr(2.0)<line_sep>y=test_func.cube(2.0)<line_sep>wp.expect_eq(x 4.0)<line_sep>wp.expect_eq(y 8.0)<block_end><def_stmt>register parent<block_start>devices=wp.get_devices()<class_stmt>TestImport(parent)<block_start><pass><block_end>add_kernel_test(TestImport kernel=test_import_func name="test_import_func" dim=1 devices=devices)<line_sep><return>TestImport<block_end><if_stmt>__name__<eq>'__main__'<block_start>c=register(unittest.TestCase)<line_sep>#unittest.main(verbosity=2) wp.force_load()<line_sep>loader=unittest.defaultTestLoader<line_sep>testSuite=loader.loadTestsFromTestCase(c)<line_sep>testSuite.debug()<block_end>
# 9.3.2 # 11.2.1 # 12.4.3 <import_stmt>numpy<as>np<import_from_stmt>typing List Union Optional<import_from_stmt>.individual Individual<def_stmt>gaussian_mutation chromosome:np.ndarray prob_mutation:float mu:List[float]=<none> sigma:List[float]=<none> scale:Optional[float]=<none><arrow><none><block_start>""" Perform a gaussian mutation for each gene in an individual with probability, prob_mutation. If mu and sigma are defined then the gaussian distribution will be drawn from that, otherwise it will be drawn from N(0, 1) for the shape of the individual. """<line_sep># Determine which genes will be mutated mutation_array=np.random.random(chromosome.shape)<l>prob_mutation<line_sep># If mu and sigma are defined, create gaussian distribution around each one <if_stmt>mu<and>sigma<block_start>gaussian_mutation=np.random.normal(mu sigma)<block_end># Otherwise center around N(0,1) <else_stmt><block_start>gaussian_mutation=np.random.normal(size=chromosome.shape)<block_end><if_stmt>scale<block_start>gaussian_mutation[mutation_array]<augmul>scale<block_end># Update chromosome[mutation_array]<augadd>gaussian_mutation[mutation_array]<block_end><def_stmt>random_uniform_mutation chromosome:np.ndarray prob_mutation:float low:Union[List[float] float] high:Union[List[float] float]<arrow><none><block_start>""" Randomly mutate each gene in an individual with probability, prob_mutation. If a gene is selected for mutation it will be assigned a value with uniform probability between [low, high). @Note [low, high) is defined for each gene to help get the full range of possible values @TODO: Eq 11.4 """<assert_stmt>type(low)<eq>type(high) 'low and high must have the same type'<line_sep>mutation_array=np.random.random(chromosome.shape)<l>prob_mutation<if_stmt>isinstance(low list)<block_start>uniform_mutation=np.random.uniform(low high)<block_end><else_stmt><block_start>uniform_mutation=np.random.uniform(low high size=chromosome.shape)<block_end>chromosome[mutation_array]=uniform_mutation[mutation_array]<block_end><def_stmt>uniform_mutation_with_respect_to_best_individual chromosome:np.ndarray best_chromosome:np.ndarray prob_mutation:float<arrow><none><block_start>""" Ranomly mutate each gene in an individual with probability, prob_mutation. If a gene is selected for mutation it will nudged towards the gene from the best individual. @TODO: Eq 11.6 """<line_sep>mutation_array=np.random.random(chromosome.shape)<l>prob_mutation<line_sep>uniform_mutation=np.random.uniform(size=chromosome.shape)<line_sep>chromosome[mutation_array]<augadd>uniform_mutation[mutation_array]<times>(best_chromosome[mutation_array]-chromosome[mutation_array])<block_end><def_stmt>cauchy_mutation individual:np.ndarray scale:float<arrow>np.ndarray<block_start><pass><block_end><def_stmt>exponential_mutation chromosome:np.ndarray xi:Union[float np.ndarray] prob_mutation:float<arrow><none><block_start>mutation_array=np.random.random(chromosome.shape)<l>prob_mutation<line_sep># Fill xi if necessary <if_stmt><not>isinstance(xi np.ndarray)<block_start>xi_val=xi<line_sep>xi=np.empty(chromosome.shape)<line_sep>xi.fill(xi_val)<block_end># Change xi so we get E(0, 1), instead of E(0, xi) xi_div=1.0/xi<line_sep>xi.fill(1.0)<line_sep># Eq 11.17 y=np.random.uniform(size=chromosome.shape)<line_sep>x=np.empty(chromosome.shape)<line_sep>x[y<le>0.5]=(1.0/xi[y<le>0.5])<times>np.log(2<times>y[y<le>0.5])<line_sep>x[y<g>0.5]=-(1.0/xi[y<g>0.5])<times>np.log(2<times>(1-y[y<g>0.5]))<line_sep># Eq 11.16 delta=np.empty(chromosome.shape)<line_sep>delta[mutation_array]=(xi[mutation_array]/2.0)<times>np.exp(-xi[mutation_array]<times>np.abs(x[mutation_array]))<line_sep># Update delta such that E(0, xi) = (1 / xi) * E(0 , 1) delta[mutation_array]=xi_div[mutation_array]<times>delta[mutation_array]<line_sep># Update individual chromosome[mutation_array]<augadd>delta[mutation_array]<block_end><def_stmt>mmo_mutation chromosome:np.ndarray prob_mutation:float<arrow><none><block_start><import_from_stmt>scipy stats<line_sep>mutation_array=np.random.random(chromosome.shape)<l>prob_mutation<line_sep>normal=np.random.normal(size=chromosome.shape)# Eq 11.21 cauchy=stats.cauchy.rvs(size=chromosome.shape)# Eq 11.22 # Eq 11.20 delta=np.empty(chromosome.shape)<line_sep>delta[mutation_array]=normal[mutation_array]+cauchy[mutation_array]<line_sep># Update individual chromosome[mutation_array]<augadd>delta[mutation_array]<block_end>
<import_stmt>pytest<import_from_stmt>backend.common.models.team Team<import_from_stmt>backend.common.models.tests.util CITY_STATE_COUNTRY_PARAMETERS LOCATION_PARAMETERS <line_sep>@pytest.mark.parametrize("key" ["frc177" "frc1"])<def_stmt>test_valid_key_names key:str<arrow><none><block_start><assert_stmt>Team.validate_key_name(key)<is><true><block_end>@pytest.mark.parametrize("key" ["bcr077" "frc 011" "frc711\\"])<def_stmt>test_invalid_key_names key:str<arrow><none><block_start><assert_stmt>Team.validate_key_name(key)<is><false><block_end><def_stmt>test_key_name <arrow><none><block_start>team=Team(id="frc254" team_number=254)<assert_stmt>team.key_name<eq>"frc254"<block_end>@pytest.mark.parametrize(LOCATION_PARAMETERS[0] LOCATION_PARAMETERS[1])<def_stmt>test_location city:str state:str country:str postalcode:str output:str<arrow><none><block_start>team=Team(city=city state_prov=state country=country postalcode=postalcode )<assert_stmt>team.location<eq>output<block_end>@pytest.mark.parametrize(CITY_STATE_COUNTRY_PARAMETERS[0] CITY_STATE_COUNTRY_PARAMETERS[1])<def_stmt>test_city_state_country city:str state:str country:str output:str<arrow><none><block_start>team=Team(city=city state_prov=state country=country )<assert_stmt>team.city_state_country<eq>output<block_end><def_stmt>test_details_url <arrow><none><block_start>team=Team(id="frc254" team_number=254 )<assert_stmt>team.details_url<eq>"/team/254"<block_end>
""" ========================================================================== bitstruct.py ========================================================================== APIs to generate a bitstruct type. Using decorators and type annotations to create bit struct is much inspired by python3 dataclass implementation. Note that the implementation (such as the _CAPITAL constants to add some private metadata) in this file is very similar to the **original python3 dataclass implementation**. The syntax of creating bit struct is very similar to that of python3 dataclass. https://github.com/python/cpython/blob/master/Lib/dataclasses.py For example, @bitstruct class Point: x : Bits4 y : Bits4 will automatically generate some methods, such as __init__, __str__, __repr__, for the Point class. Similar to the built-in dataclasses module, we also provide a mk_bitstruct function for user to dynamically generate bit struct types. For example, mk_bitstruct( 'Pixel',{ 'r' : Bits4, 'g' : Bits4, 'b' : Bits4, }, name_space = { '__str__' : lambda self: f'({self.r},{self.g},{self.b})' } ) is equivalent to: @bitstruct class Pixel: r : Bits4 g : Bits4 b : Bits4 def __str__( self ): return f'({self.r},{self.g},{self.b})' Author : <NAME>, <NAME> Date : Oct 19, 2019 """<import_stmt>functools<import_stmt>keyword<import_stmt>operator<import_stmt>types<import_stmt>warnings<import_stmt>py<import_from_stmt>pymtl3.extra.pypy custom_exec<import_from_stmt>.bits_import *<import_from_stmt>.helpers concat<line_sep>#------------------------------------------------------------------------- # Constants #------------------------------------------------------------------------- # Object with this attribute is considered as bit struct, as we assume # only the bitstruct decorator will stamp this attribute to a class. This # attribute also stores the field information and can be used for # translation. # # The original dataclass use hasattr( cls, _FIELDS ) to check dataclass. # We do this here as well _FIELDS='__bitstruct_fields__'<def_stmt>is_bitstruct_inst obj<block_start>"""Returns True if obj is an instance of a dataclass."""<line_sep><return>hasattr(type(obj) _FIELDS)<block_end><def_stmt>is_bitstruct_class cls<block_start>"""Returns True if obj is a dataclass ."""<line_sep><return>isinstance(cls type)<and>hasattr(cls _FIELDS)<block_end><def_stmt>get_bitstruct_inst_all_classes obj# list: put all types together <block_start><if_stmt>isinstance(obj list)<block_start><return>functools.reduce(operator.or_ [get_bitstruct_inst_all_classes(x)<for>x obj])<block_end>ret={obj.__class__}<line_sep># BitsN or int <if_stmt>isinstance(obj (Bits int))<block_start><return>ret<block_end># BitStruct <assert_stmt>is_bitstruct_inst(obj) f"{obj} is not a valid PyMTL Bitstruct!"<line_sep><return>ret|functools.reduce(operator.or_ [get_bitstruct_inst_all_classes(getattr(obj v))<for>v obj.__bitstruct_fields__.keys()])<block_end>_DEFAULT_SELF_NAME='s'<line_sep>_ANTI_CONFLICT_SELF_NAME='__bitstruct_self__'<line_sep>#------------------------------------------------------------------------- # _create_fn #------------------------------------------------------------------------- # A helper function that creates a function based on # - fn_name : name of the function # - args_lst : a list of arguments in string # - body_lst : a list of statement of the function body in string # Also note that this whole _create_fn thing is similar to the original # dataclass implementation! <def_stmt>_create_fn fn_name args_lst body_lst _globals=<none># Assemble argument string and body string <block_start>args=', '.join(args_lst)<line_sep>body='\n'.join(f' {statement}'<for>statement body_lst)<line_sep># Assemble the source code and execute it src=f'def {fn_name}({args}):\n{body}'<if_stmt>_globals<is><none><block_start>_globals={}<block_end>_locals={}<line_sep>custom_exec(py.code.Source(src).compile() _globals _locals)<line_sep><return>_locals[fn_name]<block_end>#------------------------------------------------------------------------- # _mk_init_arg #------------------------------------------------------------------------- # Creates a init argument string from a field. # # Shunning: I revamped the whole thing because they are indeed mutable # objects. <def_stmt>_mk_init_arg name type_# default is always None <block_start><if_stmt>isinstance(type_ list)<or>is_bitstruct_class(type_)<block_start><return>f'{name} = None'<block_end><return>f'{name} = 0'<block_end>#------------------------------------------------------------------------- # _mk_init_body #------------------------------------------------------------------------- # Creates one line of __init__ body from a field # to globals. <def_stmt>_mk_init_body self_name name type_<block_start><def_stmt>_recursive_generate_init x<block_start><if_stmt>isinstance(x list)<block_start><return>f"[{', '.join([_recursive_generate_init(x[0])]<times>len(x))}]"<block_end><return>f"_type_{name}()"<block_end><if_stmt>isinstance(type_ list)<or>is_bitstruct_class(type_)<block_start><return>f'{self_name}.{name} = {name} or {_recursive_generate_init(type_)}'<block_end><assert_stmt>issubclass(type_ Bits)<line_sep><return>f'{self_name}.{name} = _type_{name}({name})'<block_end>#------------------------------------------------------------------------- # _mk_tuple_str #------------------------------------------------------------------------- # Creates a tuple of string representations of each field. For example, # if the self_name is 'self' and fields is [ 'x', 'y' ], it will return # ('self.x', 'self.y'). This is used for creating the default __eq__ and # __hash__ function. <def_stmt>_mk_tuple_str self_name fields<block_start><return>f'({",".join([f"{self_name}.{name}"<for>name fields])},)'<block_end>#------------------------------------------------------------------------- # _mk_init_fn #------------------------------------------------------------------------- # Creates a __init__ function based on fields. For example, if fields # contains two field x (Bits4) and y (Bits4), _mk_init_fn will return a # function that looks like the following: # # def __init__( s, x = 0, y = 0, z = None, p = None ): # s.x = _type_x(x) # s.y = _type_y(y) # s.z = z or _type_z() # s.p = p or [ _type_p(), _type_p() ] # # NOTE: # _mk_init_fn also takes as argument the name of self in case there is a # field with name 's' or 'self'. # # TODO: should we provide a __post_init__ function like dataclass does? <def_stmt>_mk_init_fn self_name fields# Register necessary types in _globals <block_start>_globals={}<for_stmt>name,type_ fields.items()<block_start><if_stmt>isinstance(type_ list)<block_start>x=type_[0]<while_stmt>isinstance(x list)<block_start>x=x[0]<block_end>_globals[f"_type_{name}"]=x<block_end><else_stmt><block_start><assert_stmt>issubclass(type_ Bits)<or>is_bitstruct_class(type_)<line_sep>_globals[f"_type_{name}"]=type_<block_end><block_end><return>_create_fn('__init__' [self_name]+[_mk_init_arg(*field)<for>field fields.items()] [_mk_init_body(self_name *field)<for>field fields.items()] _globals=_globals )<block_end>#------------------------------------------------------------------------- # _mk_str_fn #------------------------------------------------------------------------- # Creates a __str__ function based on fields. For example, if fields # contains two field x (Bits4) and y (Bits4), _mk_str_fn will return a # function that looks like the following: # # def __str__( self ): # return f'{self.x}:{self.y}' <def_stmt>_mk_str_fn fields<block_start><return>_create_fn('__str__' ['self'] ['return f"'+':'.join([f'{{self.{name}}}'<for>name fields])+'"'])<block_end>#------------------------------------------------------------------------- # _mk_repr_fn #------------------------------------------------------------------------- # Creates a __repr__ function based on fields. For example, if fields # contains two field x (Bits4) and y (Bits4), _mk_repr_fn will return a # function that looks like the following: # # def __repr__( self ): # return self.__class__.__name__ + f'(x={self.x!r}, y={self.y!r})' <def_stmt>_mk_repr_fn fields<block_start><return>_create_fn('__repr__' ['self'] ['return self.__class__.__name__ + f"('+','.join([f'{{self.{name}!r}}'<for>name fields])+')"'])<block_end>#------------------------------------------------------------------------- # _mk_eq_fn #------------------------------------------------------------------------- # Creates a __eq__ function based on fields. By default it just compares # each field. For example, if fields contains two field x (Bits4) and y # (Bits4), _mk_eq_fn will return a function that looks like the # following: # # def __eq__( self, other ): # if other.__class__ is self.__class__: # return (self.x,self.y,) == (other.x,other.y,) # else: # raise NotImplemented <def_stmt>_mk_eq_fn fields<block_start>self_tuple=_mk_tuple_str('self' fields)<line_sep>other_tuple=_mk_tuple_str('other' fields)<line_sep><return>_create_fn('__eq__' ['self' 'other'] [f'return (other.__class__ is self.__class__) and {self_tuple} == {other_tuple}'])<block_end>#------------------------------------------------------------------------- # _mk_hash_fn #------------------------------------------------------------------------- # Creates a __hash__ function based on fields. By default it just hashes # all fields. For example, if fields contains two field x (Bits4) and y # (Bits4), _mk_hash_fn will return a function that looks like the # following: # # def __hash__( self ): # return hash((self.x,self.y,)) <def_stmt>_mk_hash_fn fields<block_start>self_tuple=_mk_tuple_str('self' fields)<line_sep><return>_create_fn('__hash__' ['self'] [f'return hash({self_tuple})'])<block_end>#--------------------------PyMTL3 specific-------------------------------- #------------------------------------------------------------------------- # _mk_ff_fn #------------------------------------------------------------------------- # Creates __ilshift__ and _flip functions that looks like the following: # # def __ilshift__( self, other ): # if self.__class__ is not other.__class__: # other = self.__class__.from_bits( other.to_bits() ) # self.x <<= other.x # self.y[0][0] <<= other.y[0][0] # # def _flip( self ): # self.x._flip() # self.y[i][j]._flip() <def_stmt>_mk_ff_fn fields<block_start><def_stmt>_gen_list_ilshift_strs type_ prefix=''<block_start><if_stmt>isinstance(type_ list)<block_start>ilshift_strs,flip_strs=[] []<for_stmt>i range(len(type_))<block_start>ils,fls=_gen_list_ilshift_strs(type_[0] f"{prefix}[{i}]")<line_sep>ilshift_strs.extend(ils)<line_sep>flip_strs.extend(fls)<block_end><return>ilshift_strs flip_strs<block_end><else_stmt><block_start><return>[f"self.{prefix} <<= other.{prefix}"] [f"self.{prefix}._flip()"]<block_end><block_end>ilshift_strs=['if self.__class__ is not other.__class__:' ' other = self.__class__.from_bits( other.to_bits() )']<line_sep>flip_strs=[]<for_stmt>name,type_ fields.items()<block_start>ils,fls=_gen_list_ilshift_strs(type_ name)<line_sep>ilshift_strs.extend(ils)<line_sep>flip_strs.extend(fls)<block_end><return>_create_fn('__ilshift__' ['self' 'other'] ilshift_strs+["return self"] ) _create_fn('_flip' ['self'] flip_strs ) <block_end>#------------------------------------------------------------------------- # _mk_clone_fn #------------------------------------------------------------------------- # Creates clone function that looks like the following: # Use this clone function in any place that you need to perform a # deepcopy on a bitstruct. # # def clone( self ): # return self.__class__( self.x.clone(), [ self.y[0].clone(), self.y[1].clone() ] ) <def_stmt>_gen_list_clone_strs type_ prefix=''<block_start><if_stmt>isinstance(type_ list)<block_start><return>"["+",".join([_gen_list_clone_strs(type_[0] f"{prefix}[{i}]")<for>i range(len(type_))])+"]"<block_end><else_stmt><block_start><return>f"{prefix}.clone()"<block_end><block_end><def_stmt>_mk_clone_fn fields<block_start>clone_strs=['return self.__class__(']<for_stmt>name,type_ fields.items()<block_start>clone_strs.append(" "+_gen_list_clone_strs(type_ f'self.{name}')+",")<block_end><return>_create_fn('clone' ['self'] clone_strs+[')'] )<block_end><def_stmt>_mk_deepcopy_fn fields<block_start>clone_strs=['return self.__class__(']<for_stmt>name,type_ fields.items()<block_start>clone_strs.append(" "+_gen_list_clone_strs(type_ f'self.{name}')+",")<block_end><return>_create_fn('__deepcopy__' ['self' 'memo'] clone_strs+[')'] )<block_end>#------------------------------------------------------------------------- # _mk_imatmul_fn #------------------------------------------------------------------------- # Creates @= function that copies the value over ... # TODO create individual from_bits for imatmul and ilshift # def __imatmul__( self, other ): # if self.__class__ is not other.__class__: # other = self.__class__.from_bits( other.to_bits() ) # self.x @= other.x # self.y[0] @= other.y[0] # self.y[1] @= other.y[1] <def_stmt>_mk_imatmul_fn fields<block_start><def_stmt>_gen_list_imatmul_strs type_ prefix=''<block_start><if_stmt>isinstance(type_ list)<block_start>ret=[]<for_stmt>i range(len(type_))<block_start>ret.extend(_gen_list_imatmul_strs(type_[0] f"{prefix}[{i}]"))<block_end><return>ret<block_end><else_stmt><block_start><return>[f"self.{prefix} @= other.{prefix}"]<block_end><block_end>imatmul_strs=['if self.__class__ is not other.__class__:' ' other = self.__class__.from_bits( other.to_bits() )']<for_stmt>name,type_ fields.items()<block_start>imatmul_strs.extend(_gen_list_imatmul_strs(type_ name))<block_end><return>_create_fn('__imatmul__' ['self' 'other'] imatmul_strs+["return self"] )<block_end>#------------------------------------------------------------------------- # _mk_nbits_to_bits_fn #------------------------------------------------------------------------- # Creates nbits, to_bits function that copies the value over ... # # def to_bits( self ): # return concat( self.x, self.y[0], self.y[1] ) # # TODO packing order of array? x[0] is LSB or MSB of a list # current we do LSB <def_stmt>_mk_nbits_to_bits_fn fields<block_start><def_stmt>_gen_to_bits_strs type_ prefix start_bit<block_start><if_stmt>isinstance(type_ list)<block_start>to_strs=[]<line_sep># The packing order is LSB, so we need to reverse the list to make x[-1] higher bits <for_stmt>i reversed(range(len(type_)))<block_start>start_bit,tos=_gen_to_bits_strs(type_[0] f"{prefix}[{i}]" start_bit)<line_sep>to_strs.extend(tos)<block_end><return>start_bit to_strs<block_end><elif_stmt>is_bitstruct_class(type_)<block_start>to_strs=[]<for_stmt>name,typ getattr(type_ _FIELDS).items()<block_start>start_bit,tos=_gen_to_bits_strs(typ f"{prefix}.{name}" start_bit)<line_sep>to_strs.extend(tos)<block_end><return>start_bit to_strs<block_end><else_stmt><block_start>end_bit=start_bit+type_.nbits<line_sep><return>end_bit [f"self.{prefix}"]<block_end><block_end>to_bits_strs=[]<line_sep>total_nbits=0<for_stmt>name,type_ fields.items()<block_start>total_nbits,tos=_gen_to_bits_strs(type_ name total_nbits)<line_sep>to_bits_strs.extend(tos)<block_end><return>total_nbits _create_fn('to_bits' ['self'] [f"return concat({', '.join(to_bits_strs)})"] _globals={'concat':concat})<block_end>#------------------------------------------------------------------------- # _mk_from_bits_fn #------------------------------------------------------------------------- # Creates static method from_bits that creates a new bitstruct based on Bits # and instance method _from_bits that copies the value over # # @staticmethod # def from_bits( other ): # return self.__class__( other[16:32], other[0:16] ) <def_stmt>_mk_from_bits_fns fields total_nbits<block_start><def_stmt>_gen_from_bits_strs type_ end_bit<block_start><if_stmt>isinstance(type_ list)<block_start>from_strs=[]<line_sep># Since we are doing LSB for x[0], we need to unpack from the last # element of the list, and then reverse it again to construct a list ... <for_stmt>i range(len(type_))<block_start>end_bit,fs=_gen_from_bits_strs(type_[0] end_bit)<line_sep>from_strs.extend(fs)<block_end><return>end_bit [f"[{','.join(reversed(from_strs))}]"]<block_end><elif_stmt>is_bitstruct_class(type_)<block_start><if_stmt>type_<in>type_name_mapping<block_start>type_name=type_name_mapping[type_]<block_end><else_stmt><block_start>type_name=f"_type{len(type_name_mapping)}"<line_sep>type_name_mapping[type_]=type_name<block_end>from_strs=[]<for_stmt>name,typ getattr(type_ _FIELDS).items()<block_start>end_bit,fs=_gen_from_bits_strs(typ end_bit)<line_sep>from_strs.extend(fs)<block_end><return>end_bit [f"{type_name}({','.join(from_strs)})"]<block_end><else_stmt><block_start><if_stmt>type_<not><in>type_name_mapping<block_start>type_name_mapping[type_]=type_.__name__<block_end><else_stmt><block_start><assert_stmt>type_name_mapping[type_]<eq>type_.__name__<block_end>start_bit=end_bit-type_.nbits<line_sep><return>start_bit [f"other[{start_bit}:{end_bit}]"]<block_end><block_end>from_bits_strs=[]<line_sep>end_bit=total_nbits<line_sep># This is to make sure we capture two types with the same name but different # attributes type_name_mapping={}<line_sep>type_count=0<for_stmt>_,type_ fields.items()<block_start>end_bit,fs=_gen_from_bits_strs(type_ end_bit)<line_sep>from_bits_strs.extend(fs)<block_end><assert_stmt>end_bit<eq>0<line_sep>_globals={y:x<for>x,y type_name_mapping.items()}<assert_stmt>len(_globals)<eq>len(type_name_mapping)<line_sep># TODO add assertion in bits <return>_create_fn('from_bits' ['cls' 'other'] ["assert cls.nbits == other.nbits, f'LHS bitstruct {cls.nbits}-bit <> RHS other {other.nbits}-bit'" "other = other.to_bits()" f"return cls({','.join(from_bits_strs)})"] _globals)<block_end>#------------------------------------------------------------------------- # _check_valid_array #------------------------------------------------------------------------- <def_stmt>_recursive_check_array_types current<block_start>x=current[0]<if_stmt>isinstance(x list)<block_start>x_len=len(x)<line_sep>x_type=_recursive_check_array_types(x)<for_stmt>y current[1:]<block_start><assert_stmt>isinstance(y list)<and>len(y)<eq>x_len<line_sep>y_type=_recursive_check_array_types(y)<assert_stmt>y_type<is>x_type<block_end><return>x_type<block_end><assert_stmt>issubclass(x Bits)<or>is_bitstruct_class(x)<for_stmt>y current[1:]<block_start><assert_stmt>y<is>x<block_end><return>x<block_end><def_stmt>_check_valid_array_of_types arr# Check if the provided list is a strict multidimensional array <block_start><try_stmt><block_start><return>_recursive_check_array_types(arr)<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep><return><none><block_end><block_end>#------------------------------------------------------------------------- # _check_field_annotation #------------------------------------------------------------------------- <def_stmt>_check_field_annotation cls name type_# Make sure not default is annotated <block_start><if_stmt>hasattr(cls name)<block_start>default=getattr(cls name)<line_sep><raise>TypeError("We don't allow subfields to have default value:\n"<concat>f"- Field '{name}' of BitStruct {cls.__name__} has default value {default!r}.")<block_end># Special case if the type is an instance of list <if_stmt>isinstance(type_ list)<block_start><if_stmt>_check_valid_array_of_types(type_)<is><none><block_start><raise>TypeError("The provided list spec should be a strict multidimensional ARRAY "<concat>"with no varying sizes or types. All non-list elements should be VALID types.")<block_end><block_end><else_stmt># Now we work with types <block_start><if_stmt><not>isinstance(type_ type)<block_start><raise>TypeError(f"{type_} is not a type\n"<concat>f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}.")<block_end># More specifically, Bits and BitStruct <if_stmt><not>issubclass(type_ Bits)<and><not>is_bitstruct_class(type_)<block_start><raise>TypeError("We currently only support BitsN, list, or another BitStruct as BitStruct field:\n"<concat>f"- Field '{name}' of BitStruct {cls.__name__} is annotated as {type_}.")<block_end><block_end><block_end>#------------------------------------------------------------------------- # _get_self_name #------------------------------------------------------------------------- # Return a self name based on fields. <def_stmt>_get_self_name fields<block_start><return>(_ANTI_CONFLICT_SELF_NAME<if>_DEFAULT_SELF_NAME<in>fields<else>_DEFAULT_SELF_NAME)<block_end>#------------------------------------------------------------------------- # _process_cls #------------------------------------------------------------------------- # Process the input cls and add methods to it. _bitstruct_hash_cache={}<def_stmt>_process_class cls add_init=<true> add_str=<true> add_repr=<true> add_hash=<true># Get annotations of the class <block_start>cls_annotations=cls.__dict__.get('__annotations__' {})<if_stmt><not>cls_annotations<block_start><raise>AttributeError("No field is declared in the bit struct definition.\n"<concat>f"Suggestion: check the definition of {cls.__name__} to"<concat>" make sure it only contains 'field_name(string): Type(type).'")<block_end># Get field information from the annotation and prepare for hashing fields={}<line_sep>hashable_fields={}<def_stmt>_convert_list_to_tuple x<block_start><if_stmt>isinstance(x list)<block_start><return>tuple([_convert_list_to_tuple(y)<for>y x])<block_end><return>x<block_end>reserved_fields=['to_bits' 'from_bits' 'nbits']<for_stmt>x reserved_fields<block_start><assert_stmt>x<not><in>cls.__dict__ f"Currently a bitstruct cannot have {reserved_fields}, but "<concat>f"{x} is provided as {cls.__dict__[x]}"<block_end><for_stmt>a_name,a_type cls_annotations.items()<block_start><assert_stmt>a_name<not><in>reserved_fields f"Currently a bitstruct cannot have {reserved_fields}, but "<concat>f"{a_name} is annotated as {a_type}"<line_sep>_check_field_annotation(cls a_name a_type)<line_sep>fields[a_name]=a_type<line_sep>hashable_fields[a_name]=_convert_list_to_tuple(a_type)<block_end>cls._hash=_hash=hash((cls.__name__ *tuple(hashable_fields.items()) add_init add_str add_repr add_hash))<if_stmt>_hash<in>_bitstruct_hash_cache<block_start><return>_bitstruct_hash_cache[_hash]<block_end>_bitstruct_hash_cache[_hash]=cls<line_sep># Stamp the special attribute so that translation pass can identify it # as bit struct. setattr(cls _FIELDS fields)<line_sep># Add methods to the class # Create __init__. Here I follow the dataclass convention that we only # add our generated __init__ function when add_init is true and user # did not define their own init. <if_stmt>add_init<block_start><if_stmt><not>'__init__'<in>cls.__dict__<block_start>cls.__init__=_mk_init_fn(_get_self_name(fields) fields)<block_end><block_end># Create __str__ <if_stmt>add_str<block_start><if_stmt><not>'__str__'<in>cls.__dict__<block_start>cls.__str__=_mk_str_fn(fields)<block_end><block_end># Create __repr__ <if_stmt>add_repr<block_start><if_stmt><not>'__repr__'<in>cls.__dict__<block_start>cls.__repr__=_mk_repr_fn(fields)<block_end><block_end># Create __eq__. There is no need for a __ne__ method as python will # call __eq__ and negate it. # NOTE: if user overwrites __eq__ it may lead to different behavior for # the translated verilog as in the verilog world two bit structs are # equal only if all the fields are equal. We always try to add __eq__ <if_stmt><not>'__eq__'<in>cls.__dict__<block_start>cls.__eq__=_mk_eq_fn(fields)<block_end><else_stmt><block_start>w_msg=(f'Overwriting {cls.__qualname__}\'s __eq__ may cause the '<concat>'translated verilog behaves differently from PyMTL '<concat>'simulation.')<line_sep>warnings.warn(w_msg)<block_end># Create __hash__. <if_stmt>add_hash<block_start><if_stmt><not>'__hash__'<in>cls.__dict__<block_start>cls.__hash__=_mk_hash_fn(fields)<block_end><block_end># Shunning: add __ilshift__ and _flip for update_ff <assert_stmt><not>'__ilshift__'<in>cls.__dict__<and><not>'_flip'<in>cls.__dict__<line_sep>cls.__ilshift__,cls._flip=_mk_ff_fn(fields)<line_sep># Shunning: add clone <assert_stmt><not>'clone'<in>cls.__dict__<and><not>'__deepcopy__'<in>cls.__dict__<line_sep>cls.clone=_mk_clone_fn(fields)<line_sep>cls.__deepcopy__=_mk_deepcopy_fn(fields)<line_sep># Shunning: add imatmul for assignment, as well as nbits/to_bits/from_bits <assert_stmt>'__imatmul__'<not><in>cls.__dict__<and>'to_bits'<not><in>cls.__dict__<and>'nbits'<not><in>cls.__dict__<and>'from_bits'<not><in>cls.__dict__<line_sep>cls.__imatmul__=_mk_imatmul_fn(fields)<line_sep>cls.nbits,cls.to_bits=_mk_nbits_to_bits_fn(fields)<line_sep>from_bits=_mk_from_bits_fns(fields cls.nbits)<line_sep>cls.from_bits=classmethod(from_bits)<assert_stmt><not>'get_field_type'<in>cls.__dict__<def_stmt>get_field_type cls name<block_start><if_stmt>name<in>cls.__bitstruct_fields__<block_start><return>cls.__bitstruct_fields__[name]<block_end><raise>AttributeError(f"{cls} has no field '{name}'")<block_end>cls.get_field_type=classmethod(get_field_type)<line_sep># TODO: maybe add a to_bits and from bits function. <return>cls<block_end>#------------------------------------------------------------------------- # bitstruct #------------------------------------------------------------------------- # The actual class decorator. We add a * in the argument list so that the # following argument can only be used as keyword arguments. <def_stmt>bitstruct _cls=<none> * add_init=<true> add_str=<true> add_repr=<true> add_hash=<true><block_start><def_stmt>wrap cls<block_start><return>_process_class(cls add_init add_str add_repr)<block_end># Called as @bitstruct(...) <if_stmt>_cls<is><none><block_start><return>wrap<block_end># Called as @bitstruct without parens. <return>wrap(_cls)<block_end>#------------------------------------------------------------------------- # mk_bitstruct #------------------------------------------------------------------------- # Dynamically generate a bit struct class. # TODO: should we add base parameters to support inheritence? <def_stmt>mk_bitstruct cls_name fields * namespace=<none> add_init=<true> add_str=<true> add_repr=<true> add_hash=<true># copy namespace since will mutate it <block_start>namespace={}<if>namespace<is><none><else>namespace.copy()<line_sep># We assume fields is a dictionary and thus there won't be duplicate # field names. So we only check if the field names are indeed strings # and that they are not keywords. annos={}<for_stmt>name,f fields.items()<block_start><if_stmt><not>isinstance(name str)<or><not>name.isidentifier()<block_start><raise>TypeError(f'Field name {name!r} is not a valid identifier!')<block_end><if_stmt>keyword.iskeyword(name)<block_start><raise>TypeError(f'Field name {name!r} is a keyword!')<block_end>annos[name]=f<block_end>namespace['__annotations__']=annos<line_sep>cls=types.new_class(cls_name () {} <lambda>ns:ns.update(namespace))<line_sep><return>bitstruct(cls add_init=add_init add_str=add_str add_repr=add_repr add_hash=add_hash)<block_end>
<import_stmt>os<import_from_stmt>pathlib Path<def_stmt>menpo3d_src_dir_path <block_start>r"""The path to the top of the menpo3d Python package. Useful for locating where the data folder is stored. Returns ------- path : str The full path to the top of the Menpo3d package """<line_sep><return>Path(os.path.abspath(__file__)).parent<block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ORQA ops."""<import_from_stmt>language.orqa ops<as>orqa_ops<import_stmt>tensorflow.compat.v1<as>tf<class_stmt>OrqaOpsTest(tf.test.TestCase)<block_start><def_stmt>test_reader_inputs self<block_start>concat_inputs=orqa_ops.reader_inputs(question_token_ids=[0 1] block_token_ids=[[2 3 4] [5 6 0]] block_lengths=[3 2] block_token_map=[[1 2 5] [1 3 4]] answer_token_ids=[[3 4] [7 0]] answer_lengths=[2 1] cls_token_id=10 sep_token_id=11 max_sequence_len=10)<line_sep>self.assertAllEqual(concat_inputs.token_ids.numpy() [[10 0 1 11 2 3 4 11 0 0] [10 0 1 11 5 6 11 0 0 0]])<line_sep>self.assertAllEqual(concat_inputs.mask.numpy() [[1 1 1 1 1 1 1 1 0 0] [1 1 1 1 1 1 1 0 0 0]])<line_sep>self.assertAllEqual(concat_inputs.segment_ids.numpy() [[0 0 0 0 1 1 1 1 0 0] [0 0 0 0 1 1 1 0 0 0]])<line_sep>self.assertAllEqual(concat_inputs.block_mask.numpy() [[0 0 0 0 1 1 1 0 0 0] [0 0 0 0 1 1 0 0 0 0]])<line_sep>self.assertAllEqual(concat_inputs.token_map.numpy() [[-1 -1 -1 -1 1 2 5 -1 -1 -1] [-1 -1 -1 -1 1 3 -1 -1 -1 -1]])<line_sep>self.assertAllEqual(concat_inputs.gold_starts.numpy() [[5] [-1]])<line_sep>self.assertAllEqual(concat_inputs.gold_ends.numpy() [[6] [-1]])<block_end><def_stmt>test_has_answer self<block_start>result=orqa_ops.has_answer(blocks=["abcdefg" "hijklmn"] answers=["hij"])<line_sep>self.assertAllEqual(result.numpy() [<false> <true>])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>adchist=[(0 137477) (1 98524) (2 71744) (3 60967) (4 44372) (5 46348) (6 19944) (7 10092) (8 13713) (9 11182) (10 6903) (11 4072) (12 2642) (13 968) (14 296) (15 166) (16 17) (17 2) (-1 39662) (-2 43502) (-3 57596) (-4 33915) (-5 25611) (-6 10880) (-7 8237) (-8 3518) (-9 4789) (-10 4689) (-11 6345) (-12 3901) (-13 5781) (-14 4803) (-15 6428) (-16 3563) (-17 4478) (-18 976) (-19 491)]<line_sep>adchist.sort()<line_sep>adchist=np.array(adchist)<line_sep>plt.figure()<line_sep>plt.bar(adchist[: 0] adchist[: 1])<line_sep>plt.show()<line_sep>
# coding=utf-8 <import_stmt>sys<line_sep>sys.path.append('../../common')<import_from_stmt>env_indigo *<line_sep>indigo=Indigo()<line_sep>indigo.setOption("molfile-saving-skip-date" "1")<line_sep>print("****** Load molfile with UTF-8 characters in Data S-group ********")<line_sep>m=indigo.loadMoleculeFromFile(joinPathPy("molecules/sgroups_utf8.mol" __file__))<line_sep>indigo.setOption("molfile-saving-mode" "2000")<line_sep>res=m.molfile()<line_sep>m=indigo.loadMolecule(res)<line_sep># TODO: Fails on IronPython 2.7.9: # - M SED 1 single-value-бензол # + M SED 1 single-value-������ <if_stmt>isIronPython()<block_start><import_from_stmt>System.Text Encoding<import_from_stmt>System Console<line_sep>print(m.molfile())<line_sep>print(res)<line_sep>print(m.cml())<line_sep># reload(sys) # sys.setdefaultencoding('utf-8') # sys.stdout = codecs.getwriter('utf8')(sys.stdout) # Console.WriteLine(m.molfile().encode("utf-8-sig")) # print(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile().encode("utf-8-sig")))) # Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(m.molfile().encode("utf-8")))) # Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(res.encode("utf-8")))) # Console.Write(Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(m.cml().encode("utf-8")))) # m.saveMolfile("test.mol") # with codecs.open(joinPathPy("test.mol", __file__), "r", "utf-8-sig") as temp: # print(temp.read()[510:]) # with codecs.open('test', 'w', "utf-8") as f: # f.write(m.molfile()) # Console.WriteLine(m.molfile()) # f.write(repr(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile())))) # f.write(temp.read()) # f.write(Encoding.UTF8.GetString(Encoding.Default.GetBytes(m.molfile().encode('utf-8')))) # Console.Write(str(temp.read()).encode('utf-8')) <block_end><else_stmt><block_start><if_stmt>sys.version_info[0]<l>3<block_start>print(m.molfile().encode("utf-8"))<line_sep>print(res.encode("utf-8"))<line_sep>print(m.cml().encode("utf-8"))<block_end><else_stmt><block_start>print(m.molfile())<line_sep>print(res)<line_sep>print(m.cml())<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># Define EcalSelectiveReadoutProducer module as "simEcalDigis" with default settings <import_from_stmt>SimCalorimetry.EcalSelectiveReadoutProducers.ecalDigis_cfi *<line_sep>
<import_from_stmt>distutils.core setup<import_from_stmt>Cython.Build cythonize<line_sep>setup(name="tax" ext_modules=cythonize('tax.pyx') script_name='setup.py' script_args=['build_ext' '--inplace'])<import_stmt>tax<import_stmt>numpy<as>np<line_sep>print(tax.tax(np.ones(10)))<line_sep>
<import_from_stmt>rest_framework.permissions SAFE_METHODS BasePermission<class_stmt>ApiPermission(BasePermission)<block_start><def_stmt>_has_permission self view obj request<block_start>event=getattr(request "event" <none>)<if_stmt><not>event# Only true for root API view <block_start><return><true><block_end><if_stmt>request.method<in>SAFE_METHODS<block_start>read_permission=getattr(view "read_permission_required" <none>)<if_stmt>read_permission<block_start><return>request.user.has_perm(read_permission obj)<block_end><return><true><block_end>write_permission=getattr(view "write_permission_required" <none>)<if_stmt>write_permission<block_start><return>request.user.has_perm(write_permission obj)<block_end><return><false><block_end><def_stmt>has_permission self request view<block_start><return>self._has_permission(view getattr(request "event" <none>) request)<block_end><def_stmt>has_object_permission self request view obj<block_start><return>self._has_permission(view obj request)<block_end><block_end>
<import_stmt>argparse<import_stmt>os<line_sep>argparser=argparse.ArgumentParser()<line_sep>argparser.add_argument("--dataset_names" default="all" type=str)# "all" or names joined by comma argparser.add_argument("--dataset_path" default="DATASET/odinw" type=str)<line_sep>args=argparser.parse_args()<line_sep>root="https://vlpdatasets.blob.core.windows.net/odinw/odinw/odinw_35"<line_sep>all_datasets=["AerialMaritimeDrone" "AmericanSignLanguageLetters" "Aquarium" "BCCD" "ChessPieces" "CottontailRabbits" "DroneControl" "EgoHands" "HardHatWorkers" "MaskWearing" "MountainDewCommercial" "NorthAmericaMushrooms" "OxfordPets" "PKLot" "Packages" "PascalVOC" "Raccoon" "ShellfishOpenImages" "ThermalCheetah" "UnoCards" "VehiclesOpenImages" "WildfireSmoke" "boggleBoards" "brackishUnderwater" "dice" "openPoetryVision" "pistols" "plantdoc" "pothole" "selfdrivingCar" "thermalDogsAndPeople" "vector" "websiteScreenshots"]<line_sep>datasets_to_download=[]<if_stmt>args.dataset_names<eq>"all"<block_start>datasets_to_download=all_datasets<block_end><else_stmt><block_start>datasets_to_download=args.dataset_names.split(",")<block_end><for_stmt>dataset datasets_to_download<block_start><if_stmt>dataset<in>all_datasets<block_start>print("Downloading dataset: " dataset)<line_sep>os.system("wget "+root+"/"+dataset+".zip"+" -O "+args.dataset_path+"/"+dataset+".zip")<line_sep>os.system("unzip "+args.dataset_path+"/"+dataset+".zip -d "+args.dataset_path)<line_sep>os.system("rm "+args.dataset_path+"/"+dataset+".zip")<block_end><else_stmt><block_start>print("Dataset not found: " dataset)<block_end><block_end>
"""Tests the experiment module of pyexperiment Written by <NAME> """<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_stmt>unittest<import_stmt>argparse<import_stmt>io<import_stmt>mock<import_stmt>tempfile<import_stmt>logging<import_stmt>multiprocessing<import_from_stmt>pyexperiment experiment<import_from_stmt>pyexperiment.utils.stdout_redirector stdout_redirector<import_from_stmt>pyexperiment state<import_from_stmt>pyexperiment conf<import_from_stmt>pyexperiment Logger<import_from_stmt>pyexperiment log<class_stmt>TestExperimentBasic(unittest.TestCase)<block_start>"""Test the experiment module's basic functions """<def_stmt>test_main_runs_function self<block_start>"""Test running main calls function """<line_sep>run=[<false>]<def_stmt>custom_function <block_start>"""User function """<line_sep>run[0]=<true><block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "custom_function"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[custom_function])<block_end>self.assertTrue(run[0])<line_sep>self.assertEqual(len(buf.getvalue()) 0)<block_end><def_stmt>test_main_prints_result self<block_start>"""Test running main prints the result of a function """<def_stmt>custom_function <block_start>"""User function """<line_sep><return>"Foo"<block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "custom_function"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[custom_function])<block_end>self.assertNotEqual(len(buf.getvalue()) 0)<line_sep>self.assertRegexpMatches(buf.getvalue() r'Foo')<block_end><def_stmt>test_main_shows_commands self<block_start>"""Test running main shows commands """<def_stmt>default_function <block_start>"""Default function """<line_sep><pass><block_end><def_stmt>custom_function1 <block_start>"""User function """<line_sep><pass><block_end><def_stmt>custom_function2 <block_start>"""User function """<line_sep><pass><block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "show_commands"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(default=default_function commands=[custom_function1 custom_function2])<block_end>self.assertNotEqual(len(buf.getvalue()) 0)<line_sep>self.assertRegexpMatches(buf.getvalue() r"default_function")<line_sep>self.assertRegexpMatches(buf.getvalue() r"custom_function1")<line_sep>self.assertRegexpMatches(buf.getvalue() r"custom_function2")<block_end><def_stmt>test_main_not_enough_arguments self<block_start>"""Test running main without command """<line_sep># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main()<block_end>self.assertNotEqual(len(buf.getvalue()) 0)<line_sep>self.assertRegexpMatches(buf.getvalue() r"[Nn]ot enough arguments")<block_end><def_stmt>test_main_runs_default self<block_start>"""Test running main with default command """<line_sep># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test"]<line_sep>run=[<false>]<def_stmt>custom_function <block_start>"""User function """<line_sep>run[0]=<true><block_end>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(default=custom_function)<block_end>self.assertEqual(len(buf.getvalue()) 0)<line_sep>self.assertTrue(run[0])<block_end><def_stmt>test_main_complains_default self<block_start>"""Test running main with default command taking an argument """<line_sep># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test"]<def_stmt>custom_function _argument<block_start>"""User function that takes an argument """<line_sep><pass><block_end>self.assertRaises(TypeError experiment.main default=custom_function)<block_end><def_stmt>test_main_runs_other_function self<block_start>"""Test running main with default command and other function """<line_sep># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "custom_function2"]<line_sep>run=[<false> <false>]<def_stmt>custom_function <block_start>"""User function """<line_sep>run[0]=<true><block_end><def_stmt>custom_function2 <block_start>"""User function2 """<line_sep>run[1]=<true><block_end>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(default=custom_function commands=[custom_function2])<block_end>self.assertEqual(len(buf.getvalue()) 0)<line_sep>self.assertFalse(run[0])<line_sep>self.assertTrue(run[1])<block_end><def_stmt>test_main_does_not_run_function self<block_start>"""Test running main does not call unnecessary function but complains """<line_sep>run=[<false>]<def_stmt>custom_function <block_start>"""User function """<line_sep>run[0]=<true><block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "help"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[custom_function])<block_end>self.assertFalse(run[0])<line_sep>self.assertNotEqual(len(buf.getvalue()) 0)<block_end><def_stmt>test_main_gives_help self<block_start>"""Test running help shows docstring """<line_sep>run=[<false>]<def_stmt>custom_function <block_start>"""This should be printed!! """<line_sep>run[0]=<true><block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "help" "custom_function"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[custom_function])<block_end>self.assertFalse(run[0])<line_sep>self.assertIn("This should be printed!!" buf.getvalue())<block_end><def_stmt>test_main_complains_on_help self<block_start>"""Test running help complains on help for wrong command """<def_stmt>custom_function <block_start>"""Foo function """<line_sep><pass><block_end># Monkey patch arg parser here argparse._sys.argv=[# pylint: disable=W0212 "test" "help" "foo"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[custom_function])<block_end>self.assertRegexpMatches(buf.getvalue() r"[cC]ommand")<line_sep>self.assertRegexpMatches(buf.getvalue() r"not")<line_sep>self.assertRegexpMatches(buf.getvalue() r"foo")<block_end><def_stmt>test_main_runs_test self<block_start>"""Test running main calls tests when needed """<class_stmt>ExampleTest(unittest.TestCase)<block_start>"""Test case for the test """<line_sep><pass><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "test"]<with_stmt>mock.patch.object(unittest 'TextTestRunner')<as>mock_method<block_start>experiment.main(commands=[] tests=[ExampleTest])<block_end>self.assertEqual(mock_method.call_count 1)<block_end><def_stmt>test_main_shows_test self<block_start>"""Test running main shows tests when needed """<class_stmt>ExampleTest(unittest.TestCase)<block_start>"""Test case for the test """<line_sep><pass><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_tests"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(tests=[ExampleTest])<block_end>self.assertRegexpMatches(buf.getvalue() r"ExampleTest")<block_end><def_stmt>test_main_shows_no_test self<block_start>"""Test running main complains if there are no tests """<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_tests"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(tests=[])<block_end>self.assertRegexpMatches(buf.getvalue() r"No tests available")<block_end><def_stmt>test_main_doesnt_test_on_help self<block_start>"""Test running main does not call tests when not needed """<class_stmt>ExampleTest(unittest.TestCase)<block_start>"""Test case for the test """<line_sep><pass><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "-h"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start><with_stmt>mock.patch.object(unittest 'TextTestRunner')<as>mock_method<block_start><try_stmt><block_start>experiment.main(commands=[] tests=[ExampleTest])<line_sep>self.assertEqual(mock_method.call_count 0)<block_end><except_stmt>SystemExit<block_start><pass><block_end><block_end><block_end><block_end>@mock.patch('pyexperiment.experiment.embed_interactive')<def_stmt>test_main_runs_interactive self mock_interactive<block_start>"""Test running main runs interactive session """<line_sep>argparse._sys.argv=[# pylint: disable=W0212 "test" "--interactive"]<line_sep>experiment.main(commands=[] tests=[])<line_sep>self.assertTrue(mock_interactive.call_count<eq>1)<block_end><def_stmt>test_main_shows_empty_state self<block_start>"""Test running main shows empty state """<with_stmt>tempfile.NamedTemporaryFile()<as>temp<block_start>state['bla']=12<del_stmt>state['bla']<line_sep>state.save(temp.name)<line_sep>spec=('[pyexperiment]\n'<concat>'state_filename = string(default=%s)'%temp.name)<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_state"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(config_spec=spec)<block_end>self.assertRegexpMatches(buf.getvalue() r"[Ss]tate empty")<block_end><block_end><def_stmt>test_main_shows_default_state self<block_start>"""Test running main shows the default state """<with_stmt>tempfile.NamedTemporaryFile()<as>temp<block_start>state['bla']=12<line_sep>state.save(temp.name)<line_sep>spec=('[pyexperiment]\n'<concat>'state_filename = string(default=%s)'%temp.name)<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_state"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(config_spec=spec)<block_end>self.assertRegexpMatches(buf.getvalue() r"bla")<line_sep>self.assertRegexpMatches(buf.getvalue() r"12")<block_end><block_end><def_stmt>test_main_shows_other_state self<block_start>"""Test running main shows state from file """<with_stmt>tempfile.NamedTemporaryFile()<as>temp<block_start>state['foo']=42<line_sep>state.save(temp.name)<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_state" temp.name]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main()<block_end>self.assertRegexpMatches(buf.getvalue() r"foo")<line_sep>self.assertRegexpMatches(buf.getvalue() r"42")<block_end><block_end><def_stmt>test_main_shows_config self<block_start>"""Test running main shows the configuration """<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "show_config"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main()<block_end>self.assertRegexpMatches(buf.getvalue() r"\[pyexperiment\]")<line_sep>self.assertRegexpMatches(buf.getvalue() r"n_processes")<block_end><def_stmt>test_main_saves_config self<block_start>"""Test running main saves the configuration """<with_stmt>tempfile.NamedTemporaryFile()<as>temp# Monkey patch arg parser <block_start>argparse._sys.argv=[# pylint: disable=W0212 "test" "save_config" temp.name]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main()<block_end>lines=open(temp.name).readlines()<line_sep>self.assertNotEqual(len(lines) 0)<line_sep>self.assertRegexpMatches("".join(lines) r"\[pyexperiment\]")<line_sep>self.assertRegexpMatches("".join(lines) r"n_processes")<line_sep>self.assertRegexpMatches(buf.getvalue() r'Wrote configuration')<block_end><block_end><block_end><class_stmt>TestExperimentOverrides(unittest.TestCase)<block_start>"""Test the experiment module's option overriding """<def_stmt>test_main_overrides_option self<block_start>"""Test running main called with -o works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><line_sep>self.assertEqual(conf['bla'] 'foo')<block_end>conf['bla']='bla'<line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "-o" "bla" "foo" "foo_fun"]<line_sep>self.assertFalse(called[0])<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[foo_fun])<block_end>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['bla'] 'foo')<block_end><def_stmt>test_main_no_processes_default self<block_start>"""Test running main called without -j works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "foo_fun"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[foo_fun])<block_end>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.n_processes'] multiprocessing.cpu_count())<block_end><def_stmt>test_main_no_processes_simple self<block_start>"""Test running main called with -j works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "-j" "42" "foo_fun"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[foo_fun])<block_end>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.n_processes'] 42)<block_end><def_stmt>test_main_no_processes_long self<block_start>"""Test running main called with --processes works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "--processes" "44" "foo_fun"]<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[foo_fun])<block_end>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.n_processes'] 44)<block_end><block_end><class_stmt>TestExperimentLogging(unittest.TestCase)<block_start>"""Test the experiment's logging context """<def_stmt>setUp self<block_start>"""Set up the test """<line_sep>self.log_stream=io.StringIO()<line_sep>Logger.CONSOLE_STREAM_HANDLER=logging.StreamHandler(self.log_stream)<line_sep>log.reset_instance()<line_sep>conf.reset_instance()<block_end><def_stmt>test_main_logs_console self<block_start>"""Test running main logs as expected """<line_sep>argparse._sys.argv=[# pylint: disable=W0212 "test"]<def_stmt>hello <block_start>"""Logs a message """<line_sep>log.fatal("Hello")<block_end>experiment.main(default=hello)<line_sep>self.assertNotEqual(len(self.log_stream.getvalue()) 0)<line_sep>self.assertRegexpMatches(self.log_stream.getvalue() r'Hello')<block_end><def_stmt>test_main_prints_timings self<block_start>"""Test running main logs timings as expected """<line_sep>argparse._sys.argv=[# pylint: disable=W0212 "test" "-o" "pyexperiment.print_timings" "True"]<def_stmt>hello <block_start>"""Logs a message """<with_stmt>log.timed("bla")<block_start><pass><block_end><block_end>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(default=hello)<block_end>self.assertNotEqual(len(buf.getvalue()) 0)<line_sep>self.assertRegexpMatches(buf.getvalue() r'bla')<block_end><def_stmt>test_main_prints_timings_simple self<block_start>"""Test running main logs timings as expected with --print_timings """<line_sep>argparse._sys.argv=[# pylint: disable=W0212 "test" "--print-timings"]<def_stmt>hello <block_start>"""Logs a message """<with_stmt>log.timed("bla")<block_start><pass><block_end><block_end>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(default=hello)<block_end>self.assertNotEqual(len(buf.getvalue()) 0)<line_sep>self.assertRegexpMatches(buf.getvalue() r'bla')<block_end><def_stmt>test_main_logs_file self<block_start>"""Test running main logs as expected """<line_sep>conf['pyexperiment.rotate_n_logs']=0<line_sep>argparse._sys.argv=[# pylint: disable=W0212 "test"]<def_stmt>hello <block_start>"""Logs a message """<line_sep>log.debug("Hello")<block_end><with_stmt>tempfile.NamedTemporaryFile()<as>temp<block_start>conf['pyexperiment.log_filename']=temp.name<line_sep>conf['pyexperiment.log_to_file']=<true><line_sep>experiment.main(default=hello)<line_sep>lines=open(temp.name).readlines()<line_sep>self.assertNotEqual(len(lines) 0)<line_sep>self.assertRegexpMatches("".join(lines) r'Hello')<block_end>self.assertEqual(len(self.log_stream.getvalue()) 0)<block_end><def_stmt>test_main_verbosity_debug self<block_start>"""Test running main called with -v works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "-v" "foo_fun"]<line_sep>self.assertFalse(called[0])<line_sep>experiment.main(commands=[foo_fun])<line_sep>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.verbosity'] 'DEBUG')<block_end><def_stmt>test_main_overrides_verbosity self<block_start>"""Test running main called with --verbosity works as expected """<line_sep>called=[<false>]<def_stmt>foo_fun <block_start>"""Foo function """<line_sep>called[0]=<true><block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "--verbosity" "DEBUG" "foo_fun"]<line_sep>self.assertFalse(called[0])<line_sep>buf=io.StringIO()<with_stmt>stdout_redirector(buf)<block_start>experiment.main(commands=[foo_fun])<block_end>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.verbosity'] 'DEBUG')<line_sep>called[0]=<false><line_sep># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "--verbosity" "WARNING" "foo_fun"]<line_sep>self.assertFalse(called[0])<line_sep>experiment.main(commands=[foo_fun])<line_sep>self.assertTrue(called[0])<line_sep>self.assertEqual(conf['pyexperiment.verbosity'] 'WARNING')<block_end><def_stmt>test_logger_after_exception self<block_start>"""Test logger closing correctly after exception """<line_sep># Monkey patch log closing close_old=Logger.Logger.close<line_sep>called=[<false>]<def_stmt>close self<block_start>"""Close the logger and record it"""<line_sep>close_old(self)<line_sep>called[0]=<true><block_end>Logger.Logger.close=close<def_stmt>foo_fun <block_start>"""Foo function """<line_sep><raise>RuntimeError<block_end># Monkey patch arg parser argparse._sys.argv=[# pylint: disable=W0212 "test" "foo_fun"]<try_stmt><block_start>experiment.main(commands=[foo_fun])<block_end><except_stmt>RuntimeError<block_start><pass><block_end><else_stmt><block_start><raise>AssertionError("RuntimeError not raised")<block_end># Make sure logger is closed self.assertTrue(called[0])<line_sep>Logger.Logger.close=close_old<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>uuid<def_stmt>gen_unique_id prefix='' length=5<block_start><return>prefix+str(uuid.uuid4()).upper().replace('-' '')[:length]<block_end><def_stmt>get_class_name obj<block_start>invalid_class_names=['function']<line_sep>classname=obj.__class__.__name__<if_stmt>classname<is><none><or>classname<in>invalid_class_names<block_start>classname=obj.__name__<block_end><return>classname<block_end><def_stmt>dict_to_html dd level=0<block_start>""" Convert dict to html using basic html tags """<import_stmt>simplejson<line_sep>text=''<for_stmt>k,v dd.items()<block_start>text<augadd>'<br>'+'&nbsp;'<times>(4<times>level)+'<b>%s</b>: %s'%(k dict_to_html(v level+1)<if>isinstance(v dict)<else>(simplejson.dumps(v)<if>isinstance(v list)<else>v))<block_end><return>text<block_end><def_stmt>dict_to_html_ul dd level=0<block_start>""" Convert dict to html using ul/li tags """<import_stmt>simplejson<line_sep>text='<ul>'<for_stmt>k,v dd.items()<block_start>text<augadd>'<li><b>%s</b>: %s</li>'%(k dict_to_html_ul(v level+1)<if>isinstance(v dict)<else>(simplejson.dumps(v)<if>isinstance(v list)<else>v))<block_end>text<augadd>'</ul>'<line_sep><return>text<block_end>
<import_stmt>sys<import_stmt>setuptools<line_sep>sys.path.insert(0 "src")<import_stmt>pytorch_adapt<with_stmt>open("README.md" "r")<as>fh<block_start>long_description=fh.read()<block_end>extras_require_ignite=["pytorch-ignite == 0.5.0.dev20220221"]<line_sep>extras_require_lightning=["pytorch-lightning"]<line_sep>extras_require_record_keeper=["record-keeper >= 0.9.31"]<line_sep>extras_require_timm=["timm"]<line_sep>extras_require_docs=["mkdocs-material" "mkdocstrings[python]" "griffe" "mkdocs-gen-files" "mkdocs-section-index" "mkdocs-literate-nav" ]<line_sep>extras_require_dev=["black" "isort" "nbqa" "flake8"]<line_sep>setuptools.setup(name="pytorch-adapt" version=pytorch_adapt.__version__ author="<NAME>" description="Domain adaptation made easy. Fully featured, modular, and customizable." long_description=long_description long_description_content_type="text/markdown" url="https://github.com/KevinMusgrave/pytorch-adapt" package_dir={"":"src"} packages=setuptools.find_packages(where="src") classifiers=["Programming Language :: Python :: 3" "License :: OSI Approved :: MIT License" "Operating System :: OS Independent" ] python_requires=">=3.0" install_requires=["numpy" "torch" "torchvision" "torchmetrics" "pytorch-metric-learning >= 1.3.1.dev0" ] extras_require={"ignite":extras_require_ignite "lightning":extras_require_lightning "record-keeper":extras_require_record_keeper "timm":extras_require_timm "docs":extras_require_docs "dev":extras_require_dev } )<line_sep>
""" BracketHighlighter. Copyright (c) 2013 - 2016 <NAME> <<EMAIL>> License: MIT """<import_from_stmt>BracketHighlighter.bh_plugin import_module<line_sep>lowercase=import_module("bh_modules.lowercase")<def_stmt>validate *args<block_start>"""Check if bracket is lowercase."""<line_sep><return>lowercase.validate(*args)<block_end>
d={'a':1 'c':3}<match_stmt>d<block_start><case_stmt>{'a':chave_a 'b':_}<block_start>print(f'chave A {chave_a=} + chave B')<block_end><case_stmt>{'a':_}|{'c':_}<block_start>print('chave A ou C')<block_end><case_stmt>{}<block_start>print('vazio')<block_end><case_stmt>_<block_start>print('Não sei')<block_end><block_end>
# # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # """Layer for applying channel responses to channel inputs in the time domain"""<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>scipy<import_from_stmt>sionna.utils insert_dims<import_from_stmt>.awgn AWGN<class_stmt>ApplyTimeChannel(tf.keras.layers.Layer)# pylint: disable=line-too-long <block_start>r"""ApplyTimeChannel(num_time_samples, l_tot, add_awgn=True, dtype=tf.complex64, **kwargs) Apply time domain channel responses ``h_time`` to channel inputs ``x``, by filtering the channel inputs with time-variant channel responses. This class inherits from the Keras `Layer` class and can be used as layer in a Keras model. For each batch example, ``num_time_samples`` + ``l_tot`` - 1 time steps of a channel realization are required to filter the channel inputs. The channel output consists of ``num_time_samples`` + ``l_tot`` - 1 time samples, as it is the result of filtering the channel input of length ``num_time_samples`` with the time-variant channel filter of length ``l_tot``. In the case of a single-input single-output link and given a sequence of channel inputs :math:`x_0,\cdots,x_{N_B}`, where :math:`N_B` is ``num_time_samples``, this layer outputs .. math:: y_b = \sum_{\ell = 0}^{L_{\text{tot}}} x_{b-\ell} \bar{h}_{b,\ell} + w_b where :math:`L_{\text{tot}}` corresponds ``l_tot``, :math:`w_b` to the additive noise, and :math:`\bar{h}_{b,\ell}` to the :math:`\ell^{th}` tap of the :math:`b^{th}` channel sample. This layer outputs :math:`y_b` for :math:`b` ranging from 0 to :math:`N_B + L_{\text{tot}} - 1`, and :math:`x_{b}` is set to 0 for :math:`b \geq N_B`. For multiple-input multiple-output (MIMO) links, the channel output is computed for each antenna of each receiver and by summing over all the antennas of all transmitters. Parameters ---------- num_time_samples : int Number of time samples forming the channel input (:math:`N_B`) l_tot : int Length of the channel filter (:math:`L_{\text{tot}} = L_{\text{max}} - L_{\text{min}} + 1`) add_awgn : bool If set to `False`, no white Gaussian noise is added. Defaults to `True`. dtype : tf.DType Complex datatype to use for internal processing and output. Defaults to `tf.complex64`. Input ----- (x, h_time, no) or (x, h_time): Tuple: x : [batch size, num_tx, num_tx_ant, num_time_samples], tf.complex Channel inputs h_time : [batch size, num_rx, num_rx_ant, num_tx, num_tx_ant, num_time_samples + l_tot - 1, l_tot], tf.complex Channel responses. For each batch example, ``num_time_samples`` + ``l_tot`` - 1 time steps of a channel realization are required to filter the channel inputs. no : Scalar or Tensor, tf.float Scalar or tensor whose shape can be broadcast to the shape of the channel outputs: [batch size, num_rx, num_rx_ant, num_time_samples + l_tot - 1]. Only required if ``add_awgn`` is set to `True`. The noise power ``no`` is per complex dimension. If ``no`` is a scalar, noise of the same variance will be added to the outputs. If ``no`` is a tensor, it must have a shape that can be broadcast to the shape of the channel outputs. This allows, e.g., adding noise of different variance to each example in a batch. If ``no`` has a lower rank than the channel outputs, then ``no`` will be broadcast to the shape of the channel outputs by adding dummy dimensions after the last axis. Output ------- y : [batch size, num_rx, num_rx_ant, num_time_samples + l_tot - 1], tf.complex Channel outputs. The channel output consists of ``num_time_samples`` + ``l_tot`` - 1 time samples, as it is the result of filtering the channel input of length ``num_time_samples`` with the time-variant channel filter of length ``l_tot``. """<def_stmt>__init__ self num_time_samples l_tot add_awgn=<true> dtype=tf.complex64 **kwargs<block_start>super().__init__(trainable=<false> dtype=dtype **kwargs)<line_sep>self._add_awgn=add_awgn<line_sep># The channel transfert function is implemented by first gathering from # the vector of transmitted baseband symbols # x = [x_0,...,x_{num_time_samples-1}]^T the symbols that are then # multiplied by the channel tap coefficients. # We build here the matrix of indices G, with size # `num_time_samples + l_tot - 1` x `l_tot` that is used to perform this # gathering. # For example, if there are 4 channel taps # h = [h_0, h_1, h_2, h_3]^T # and `num_time_samples` = 10 time steps then G would be # [[0, 10, 10, 10] # [1, 0, 10, 10] # [2, 1, 0, 10] # [3, 2, 1, 0] # [4, 3, 2, 1] # [5, 4, 3, 2] # [6, 5, 4, 3] # [7, 6, 5, 4] # [8, 7, 6, 5] # [9, 8, 7, 6] # [10, 9, 8, 7] # [10,10, 9, 8] # [10,10, 10, 9] # Note that G is a Toeplitz matrix. # In this example, the index `num_time_samples`=10 corresponds to the # zero symbol. The vector of transmitted symbols is padded with one # zero at the end. first_colum=np.concatenate([np.arange(0 num_time_samples) np.full([l_tot-1] num_time_samples)])<line_sep>first_row=np.concatenate([[0] np.full([l_tot-1] num_time_samples)])<line_sep>self._g=scipy.linalg.toeplitz(first_colum first_row)<block_end><def_stmt>build self input_shape#pylint: disable=unused-argument <block_start><if_stmt>self._add_awgn<block_start>self._awgn=AWGN(dtype=self.dtype)<block_end><block_end><def_stmt>call self inputs<block_start><if_stmt>self._add_awgn<block_start>x,h_time,no=inputs<block_end><else_stmt><block_start>x,h_time=inputs<block_end># Preparing the channel input for broadcasting and matrix multiplication x=tf.pad(x [[0 0] [0 0] [0 0] [0 1]])<line_sep>x=insert_dims(x 2 axis=1)<line_sep>x=tf.gather(x self._g axis=-1)<line_sep># Apply the channel response y=tf.reduce_sum(h_time<times>x axis=-1)<line_sep>y=tf.reduce_sum(tf.reduce_sum(y axis=4) axis=3)<line_sep># Add AWGN if requested <if_stmt>self._add_awgn<block_start>y=self._awgn((y no))<block_end><return>y<block_end><block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>os<import_stmt>cProfile<import_from_stmt>CountingGridsPy.models CountingGridModel CountingGridModelWithGPU<class_stmt>TimeGPUvsCPU(object)<block_start><def_stmt>__init__ self<block_start>SEED="03071994"<line_sep>np.random.seed(int(SEED))<line_sep>M,N=[5000 1000]<line_sep>extentSize=40<line_sep>self.data=np.round(np.random.random((M N))<times>10)<line_sep>self.extent=np.array([extentSize extentSize])<line_sep>self.window=np.array([5 5])<line_sep>self.pi_init=np.ones([extentSize]<times>2+[N])/N<line_sep>self.cpuModel=CountingGridModel(self.extent self.window)<line_sep>self.gpuModel=CountingGridModelWithGPU(self.extent self.window)<block_end><def_stmt>run_nolayers self<block_start>numIters=50<line_sep>device=torch.device("cuda:0")<line_sep>outfileForGPU="gpuProfile.txt"<line_sep>gpuJob='''self.gpuModel.fit( self.data, max_iter=numIters, pi=torch.tensor(self.pi_init, device=device, dtype=torch.double), layers=1 ) '''<line_sep>cProfile.runctx(gpuJob globals() locals() outfileForGPU)<line_sep>outfileForCPU="cpuProfile.txt"<line_sep>cpuJob='''self.cpuModel.fit( self.data, max_iter=numIters, returnSumSquareDifferencesOfPi=False, pi=np.copy(self.pi_init), layers=1 ) '''<line_sep>cProfile.runctx(cpuJob globals() locals() outfileForCPU)<block_end><def_stmt>run_withlayers self<block_start>numIters=50<line_sep>device=torch.device("cuda:0")<line_sep>outfileForGPU="gpu2LayersProfile.txt"<line_sep>gpuJob='''self.gpuModel.fit( self.data, max_iter=numIters, pi=torch.tensor(self.pi_init, device=device, dtype=torch.double), layers=2, writeOutput=False ) '''<line_sep>cProfile.runctx(gpuJob globals() locals() outfileForGPU)<line_sep>outfileForCPU="cpu2LayersProfile.txt"<line_sep>cpuJob='''self.cpuModel.fit( self.data, max_iter=numIters, returnSumSquareDifferencesOfPi=False, pi=np.copy(self.pi_init), layers=2, writeOutput=False ) '''<line_sep>cProfile.runctx(cpuJob globals() locals() outfileForCPU)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>o=TimeGPUvsCPU()<line_sep>o.run_withlayers()<block_end>
# Copyright 2017 The TensorFlow Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example configurations using the PPO algorithm."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<line_sep># pylint: disable=unused-variable <import_from_stmt>pybullet_envs.minitaur.agents ppo<import_from_stmt>pybullet_envs.minitaur.agents.scripts networks<def_stmt>default <block_start>"""Default configuration for PPO."""<line_sep># General algorithm=ppo.PPOAlgorithm<line_sep>num_agents=10<line_sep>eval_episodes=25<line_sep>use_gpu=<false><line_sep># Network network=networks.ForwardGaussianPolicy<line_sep>weight_summaries=dict(all=r'.*' policy=r'.*/policy/.*' value=r'.*/value/.*')<line_sep>policy_layers=200 100<line_sep>value_layers=200 100<line_sep>init_mean_factor=0.05<line_sep>init_logstd=-1<line_sep># Optimization update_every=25<line_sep>policy_optimizer='AdamOptimizer'<line_sep>value_optimizer='AdamOptimizer'<line_sep>update_epochs_policy=50<line_sep>update_epochs_value=50<line_sep>policy_lr=1e-4<line_sep>value_lr=3e-4<line_sep># Losses discount=0.985<line_sep>kl_target=1e-2<line_sep>kl_cutoff_factor=2<line_sep>kl_cutoff_coef=1000<line_sep>kl_init_penalty=1<line_sep><return>locals()<block_end><def_stmt>pendulum <block_start>"""Configuration for the pendulum classic control task."""<line_sep>locals().update(default())<line_sep># Environment env='Pendulum-v0'<line_sep>max_length=200<line_sep>steps=1e6# 1M <return>locals()<block_end><def_stmt>cheetah <block_start>"""Configuration for MuJoCo's half cheetah task."""<line_sep>locals().update(default())<line_sep># Environment env='HalfCheetah-v1'<line_sep>max_length=1000<line_sep>steps=1e7# 10M <return>locals()<block_end><def_stmt>walker <block_start>"""Configuration for MuJoCo's walker task."""<line_sep>locals().update(default())<line_sep># Environment env='Walker2d-v1'<line_sep>max_length=1000<line_sep>steps=1e7# 10M <return>locals()<block_end><def_stmt>reacher <block_start>"""Configuration for MuJoCo's reacher task."""<line_sep>locals().update(default())<line_sep># Environment env='Reacher-v1'<line_sep>max_length=1000<line_sep>steps=1e7# 10M <return>locals()<block_end><def_stmt>hopper <block_start>"""Configuration for MuJoCo's hopper task."""<line_sep>locals().update(default())<line_sep># Environment env='Hopper-v1'<line_sep>max_length=1000<line_sep>steps=2e7# 20M <return>locals()<block_end><def_stmt>ant <block_start>"""Configuration for MuJoCo's ant task."""<line_sep>locals().update(default())<line_sep># Environment env='Ant-v1'<line_sep>max_length=1000<line_sep>steps=5e7# 50M <return>locals()<block_end><def_stmt>humanoid <block_start>"""Configuration for MuJoCo's humanoid task."""<line_sep>locals().update(default())<line_sep># Environment env='Humanoid-v1'<line_sep>max_length=1000<line_sep>steps=5e7# 50M <return>locals()<block_end>
<import_stmt>json<class_stmt>TestUtils<block_start>@staticmethod<def_stmt>read_json_file file_path:str<block_start>file=open("./test/data/{0}".format(file_path))<line_sep>data=json.load(file)<line_sep><return>json.dumps(data indent=2 sort_keys=<true>)<block_end><block_end>
<import_stmt>pickle<import_stmt>inflect<line_sep>p=inflect.engine()<line_sep>words=set(['first' 'second' 'third' 'fourth' 'fifth' 'sixth' 'seventh' 'eighth' 'ninth' 'tenth' 'eleventh' 'twelfth' 'thirteenth' 'fourteenth' 'fifteenth' 'sixteenth' 'seventeenth' 'eighteenth' 'nineteenth' 'twentieth' 'twenty-first' 'twenty-second' 'twenty-third' 'twenty-fourth' 'twenty-fifth'])<line_sep>pickle.dump(words open("../data/constants/word_ordinals.p" "wb"))<line_sep>
<import_from_stmt>plenum.common.util hexToFriendly randomString<import_from_stmt>stp_core.common.log getlogger<import_from_stmt>plenum.test.node_catchup.helper waitNodeDataEquality<import_from_stmt>plenum.test.node_request.helper sdk_ensure_pool_functional<import_from_stmt>plenum.test.pool_transactions.helper sdk_send_update_node sdk_pool_refresh sdk_add_new_steward_and_node<import_from_stmt>plenum.test.test_node TestNode checkNodesConnected<import_from_stmt>stp_core.network.port_dispenser genHa<import_from_stmt>plenum.common.config_helper PNodeConfigHelper<line_sep>logger=getlogger()<def_stmt>testChangeHaPersistsPostNodesRestart looper txnPoolNodeSet tdir tconf sdk_pool_handle sdk_wallet_client sdk_wallet_steward<block_start>new_steward_wallet,new_node=sdk_add_new_steward_and_node(looper sdk_pool_handle sdk_wallet_steward 'AnotherSteward'+randomString(4) 'AnotherNode'+randomString(4) tdir tconf)<line_sep>txnPoolNodeSet.append(new_node)<line_sep>looper.run(checkNodesConnected(txnPoolNodeSet))<line_sep>sdk_pool_refresh(looper sdk_pool_handle)<line_sep>node_new_ha,client_new_ha=genHa(2)<line_sep>logger.debug("{} changing HAs to {} {}".format(new_node node_new_ha client_new_ha))<line_sep># Making the change HA txn an confirming its succeeded node_dest=hexToFriendly(new_node.nodestack.verhex)<line_sep>sdk_send_update_node(looper new_steward_wallet sdk_pool_handle node_dest new_node.name node_new_ha.host node_new_ha.port client_new_ha.host client_new_ha.port)<line_sep># Stopping existing nodes <for_stmt>node txnPoolNodeSet<block_start>node.stop()<line_sep>looper.removeProdable(node)<block_end># Starting nodes again by creating `Node` objects since that simulates # what happens when starting the node with script restartedNodes=[]<for_stmt>node txnPoolNodeSet[:-1]<block_start>config_helper=PNodeConfigHelper(node.name tconf chroot=tdir)<line_sep>restartedNode=TestNode(node.name config_helper=config_helper config=tconf ha=node.nodestack.ha cliha=node.clientstack.ha)<line_sep>looper.add(restartedNode)<line_sep>restartedNodes.append(restartedNode)<block_end># Starting the node whose HA was changed config_helper=PNodeConfigHelper(new_node.name tconf chroot=tdir)<line_sep>node=TestNode(new_node.name config_helper=config_helper config=tconf ha=node_new_ha cliha=client_new_ha)<line_sep>looper.add(node)<line_sep>restartedNodes.append(node)<line_sep>looper.run(checkNodesConnected(restartedNodes))<line_sep>waitNodeDataEquality(looper node *restartedNodes[:-1])<line_sep>sdk_pool_refresh(looper sdk_pool_handle)<line_sep>sdk_ensure_pool_functional(looper restartedNodes sdk_wallet_client sdk_pool_handle)<block_end>
# -*- coding: utf-8 -*- # # Author: <NAME> <<EMAIL>> # # Setup the SMRT module <import_from_future_stmt> print_function absolute_import division<import_from_stmt>distutils.command.clean clean<line_sep># from setuptools import setup # DO NOT use setuptools!!!!!! <import_stmt>shutil<import_stmt>os<import_stmt>sys<if_stmt>sys.version_info[0]<l>3<block_start><import_stmt>__builtin__<as>builtins<block_end><else_stmt><block_start><import_stmt>builtins<block_end># Hacky, adopted from sklearn. This sets a global variable # so smrt __init__ can detect if it's being loaded in the setup # routine, so it won't load submodules that haven't yet been built. builtins.__SMRT_SETUP__=<true><line_sep># metadata DISTNAME='smrt'<line_sep>DESCRIPTION='Handle class imbalance intelligently by using Variational Autoencoders '<concat>'to generate synthetic observations of your minority class.'<line_sep>MAINTAINER='<NAME>'<line_sep>MAINTAINER_EMAIL='<EMAIL>'<line_sep>LICENSE='new BSD'<line_sep># import restricted version <import_stmt>smrt<line_sep>VERSION=smrt.__version__<line_sep># get the installation requirements: <with_stmt>open('requirements.txt')<as>req<block_start>REQUIREMENTS=req.read().split(os.linesep)<block_end># Custom clean command to remove build artifacts -- adopted from sklearn <class_stmt>CleanCommand(clean)<block_start>description="Remove build artifacts from the source tree"<line_sep># this is mostly in case we ever add a Cython module to SMRT <def_stmt>run self<block_start>clean.run(self)<line_sep># Remove c files if we are not within a sdist package cwd=os.path.abspath(os.path.dirname(__file__))<line_sep>remove_c_files=<not>os.path.exists(os.path.join(cwd 'PKG-INFO'))<if_stmt>remove_c_files<block_start>cython_hash_file=os.path.join(cwd 'cythonize.dat')<if_stmt>os.path.exists(cython_hash_file)<block_start>os.unlink(cython_hash_file)<block_end>print('Will remove generated .c & .so files')<block_end><if_stmt>os.path.exists('build')<block_start>shutil.rmtree('build')<block_end><for_stmt>dirpath,dirnames,filenames os.walk(DISTNAME)<block_start><for_stmt>filename filenames<block_start><if_stmt>any(filename.endswith(suffix)<for>suffix (".so" ".pyd" ".dll" ".pyc"))<block_start>print('Removing file: %s'%filename)<line_sep>os.unlink(os.path.join(dirpath filename))<line_sep><continue><block_end>extension=os.path.splitext(filename)[1]<if_stmt>remove_c_files<and>extension<in>['.c' '.cpp']<block_start>pyx_file=str.replace(filename extension '.pyx')<if_stmt>os.path.exists(os.path.join(dirpath pyx_file))<block_start>os.unlink(os.path.join(dirpath filename))<block_end><block_end><block_end># this is for FORTRAN modules, which some of my other packages have used in the past... <for_stmt>dirname dirnames<block_start><if_stmt>dirname<eq>'__pycache__'<or>dirname.endswith('.so.dSYM')<block_start>print('Removing directory: %s'%dirname)<line_sep>shutil.rmtree(os.path.join(dirpath dirname))<block_end><block_end><block_end><block_end><block_end>cmdclass={'clean':CleanCommand}<def_stmt>configuration parent_package='' top_path=<none># we know numpy is a valid import now <block_start><import_from_stmt>numpy.distutils.misc_util Configuration<line_sep>config=Configuration(<none> parent_package top_path)<line_sep># Avoid non-useful msg # "Ignoring attempt to set 'name' (from ... " config.set_options(ignore_setup_xxx_py=<true> assume_default_configuration=<true> delegate_options_to_subpackages=<true> quiet=<true>)<line_sep>config.add_subpackage(DISTNAME)<line_sep><return>config<block_end><def_stmt>do_setup # setup the config <block_start>metadata=dict(name=DISTNAME maintainer=MAINTAINER maintainer_email=MAINTAINER_EMAIL description=DESCRIPTION license=LICENSE version=VERSION classifiers=['Intended Audience :: Science/Research' 'Intended Audience :: Developers' 'Intended Audience :: Scikit-learn users' 'Programming Language :: Python' 'Topic :: Machine Learning' 'Topic :: Software Development' 'Topic :: Scientific/Engineering' 'Operating System :: Microsoft :: Windows' 'Operating System :: POSIX' 'Operating System :: Unix' 'Operating System :: MacOS' 'Programming Language :: Python :: 2.7'] keywords='sklearn scikit-learn tensorflow auto-encoders neural-networks class-imbalance' # packages=[DISTNAME], # install_requires=REQUIREMENTS, cmdclass=cmdclass)<if_stmt>len(sys.argv)<eq>1<or>(len(sys.argv)<ge>2<and>('--help'<in>sys.argv[1:]<or>sys.argv[1]<in>('--help-commands' 'egg-info' '--version' 'clean')))# For these actions, NumPy is not required <block_start><try_stmt><block_start><import_from_stmt>setuptools setup<block_end><except_stmt>ImportError<block_start><import_from_stmt>distutils.core setup<block_end><block_end><else_stmt># we DO need numpy <block_start><try_stmt><block_start><import_from_stmt>numpy.distutils.core setup<block_end><except_stmt>ImportError<block_start><raise>RuntimeError('Need numpy to build %s'%DISTNAME)<block_end># add the config to the metadata metadata['configuration']=configuration<block_end># call setup on the dict setup(**metadata)<block_end><if_stmt>__name__<eq>'__main__'<block_start>do_setup()<block_end>
<import_stmt>KratosMultiphysics<as>KratosMultiphysics<import_stmt>KratosMultiphysics.CableNetApplication<as>CableNetApplication<import_from_stmt>KratosMultiphysics Logger<def_stmt>Factory settings Model<block_start><if_stmt>(type(settings)<ne>KratosMultiphysics.Parameters)<block_start><raise>Exception("expected input shall be a Parameters object, encapsulating a json string")<block_end><return>EdgeCableElementProcess(Model settings["Parameters"])<block_end><class_stmt>custom_node<block_start><def_stmt>__init__ self start_distance kratos_node<block_start>self.start_distance=start_distance<line_sep>self.kratos_node=kratos_node<block_end><def_stmt>return_distance_to_line_start self<block_start><return>self.start_distance<block_end><block_end><def_stmt>return_node_distance_to_line_start node<block_start><return>node.return_distance_to_line_start()<block_end><class_stmt>EdgeCableElementProcess(KratosMultiphysics.Process)<block_start><def_stmt>__init__ self Model settings<block_start>KratosMultiphysics.Process.__init__(self)<line_sep>default_settings=KratosMultiphysics.Parameters(""" { "edge_sub_model_part_name" : "Structure.example_part", "element_type" : "cable", "node_id_order" : [1,2,3], "element_id" : 1, "property_id" : 1 } """)<line_sep>default_settings.ValidateAndAssignDefaults(settings)<line_sep>self.edge_model_part=Model[settings["edge_sub_model_part_name"].GetString()]<line_sep>node_list=settings["node_id_order"].GetVector()<if_stmt>len(node_list)<eq>0<block_start>node_list=self.CreateCorrectNodeOrder()<line_sep>settings["node_id_order"].SetVector(node_list)<block_end>self.edge_cable_element_process=CableNetApplication.EdgeCableElementProcess(self.edge_model_part settings)<block_end><def_stmt>ExecuteInitialize self<block_start>self.edge_cable_element_process.ExecuteInitialize()<line_sep>Logger.PrintInfo("Initialized" "EdgeCableElementProcess")<block_end><def_stmt>CreateCorrectNodeOrder self## find start/end nodes and calculate total distance <block_start>max_distance,end_points=0 []<for_stmt>node_i self.edge_model_part.Nodes<block_start><for_stmt>node_j self.edge_model_part.Nodes<block_start>distance_i=(node_i.X0-node_j.X0)<times>(node_i.X0-node_j.X0)<line_sep>distance_i<augadd>(node_i.Y0-node_j.Y0)<times>(node_i.Y0-node_j.Y0)<line_sep>distance_i<augadd>(node_i.Z0-node_j.Z0)<times>(node_i.Z0-node_j.Z0)<line_sep>distance_i=distance_i<power>0.5<if_stmt>distance_i<g>max_distance<block_start>max_distance=distance_i<line_sep>end_points=[node_i node_j]<block_end><block_end><block_end>## create sorted node_list custom_node_list=[]<for_stmt>node_i self.edge_model_part.Nodes<block_start>distance_i=(node_i.X0-end_points[0].X0)<times>(node_i.X0-end_points[0].X0)<line_sep>distance_i<augadd>(node_i.Y0-end_points[0].Y0)<times>(node_i.Y0-end_points[0].Y0)<line_sep>distance_i<augadd>(node_i.Z0-end_points[0].Z0)<times>(node_i.Z0-end_points[0].Z0)<line_sep>distance_i=distance_i<power>0.5<line_sep>custom_node_i=custom_node(distance_i node_i)<line_sep>custom_node_list.append(custom_node_i)<block_end>sorted_node_list=sorted(custom_node_list key=return_node_distance_to_line_start)<line_sep><return>[node.kratos_node.Id<for>node sorted_node_list]<block_end><block_end>
""" Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>math<import_from_stmt>itertools product<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>mpmath polyroots<import_from_stmt>spatial.t_inf_norm_transformer TInfNormTransformer<import_from_stmt>spatial.t_norm_transformer TNormTransformer<import_from_stmt>spatial.interpolation interpolate<class_stmt>T2NormTransformer(TNormTransformer)<block_start><def_stmt>add_norm_constraints self model vx vy<block_start>model.addConstr(vx+vy<le>math.sqrt(2)<times>self.delta)<line_sep>model.addConstr(vx-vy<le>math.sqrt(2)<times>self.delta)<line_sep>model.addConstr(-vx+vy<le>math.sqrt(2)<times>self.delta)<line_sep>model.addConstr(-vx-vy<le>math.sqrt(2)<times>self.delta)<block_end><def_stmt>compute_candidates self<block_start>delta_sqr=self.delta<power>2<line_sep>radius=math.ceil(self.delta)<for_stmt>row,col product(range(-radius radius) repeat=2)<block_start>lb_row,ub_row=row row+1<line_sep>lb_col,ub_col=col col+1<line_sep>interpolation_region=[[lb_col ub_col] [lb_row ub_row]]<line_sep>distances_row=sorted((abs(lb_row) abs(ub_row)))<line_sep>distances_col=sorted((abs(lb_col) abs(ub_col)))<line_sep># no overlap with adversarial region <if_stmt>distances_row[0]<power>2+distances_col[0]<power>2<ge>delta_sqr<block_start><continue><block_end>flows=list()<line_sep>flows_by_channel=list()<line_sep># full overlap with interpolation region <if_stmt>distances_row[1]<power>2+distances_col[1]<power>2<le>delta_sqr<block_start>flows=[torch.tensor([lb_col lb_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device) torch.tensor([ub_col lb_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device) torch.tensor([lb_col ub_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device) torch.tensor([ub_col ub_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device)]<block_end><else_stmt><block_start><if_stmt>lb_col<power>2+lb_row<power>2<le>delta_sqr<block_start>flows.append(torch.tensor([lb_col lb_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device))<block_end><if_stmt>ub_col<power>2+lb_row<power>2<le>delta_sqr<block_start>flows.append(torch.tensor([ub_col lb_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device))<block_end><if_stmt>lb_col<power>2+ub_row<power>2<le>delta_sqr<block_start>flows.append(torch.tensor([lb_col ub_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device))<block_end><if_stmt>ub_col<power>2+ub_row<power>2<le>delta_sqr<block_start>flows.append(torch.tensor([ub_col ub_row]).repeat(self.batch_size self.height self.width 1).float().to(self.device))<block_end>box_row=sorted((lb_row ub_row) key=abs)<line_sep>box_col=sorted((lb_col ub_col) key=abs)<line_sep>candidates=list()<line_sep>row_sign=-1<if>row<l>0<else>1<line_sep>col_sign=-1<if>col<l>0<else>1<if_stmt>box_col[0]<power>2<le>delta_sqr<block_start>candidates.append([box_col[0] row_sign<times>math.sqrt(delta_sqr-box_col[0]<power>2)])<block_end><if_stmt>box_col[1]<power>2<le>delta_sqr<block_start>candidates.append([box_col[1] row_sign<times>math.sqrt(delta_sqr-box_col[1]<power>2)])<block_end><if_stmt>box_row[0]<power>2<le>delta_sqr<block_start>candidates.append([col_sign<times>math.sqrt(delta_sqr-box_row[0]<power>2) box_row[0]])<block_end><if_stmt>box_row[1]<power>2<le>delta_sqr<block_start>candidates.append([col_sign<times>math.sqrt(delta_sqr-box_row[1]<power>2) box_row[1]])<block_end>endpoints=[candidate<for>candidate candidates<if>self.in_box(candidate lb_col ub_col lb_row ub_row)]<for_stmt>endpoint endpoints<block_start>flows.append(torch.tensor(endpoint).repeat(self.batch_size self.height self.width 1).float().to(self.device))<block_end>flows_by_channel=self.compute_extremum_on_arc(col=col row=row endpoints=endpoints interpolation_region=interpolation_region)<block_end><for_stmt>flow flows<block_start>candidate=interpolate(self.images flow)<for_stmt>channel range(self.channels)<block_start>self.candidates[channel].append(candidate[: channel])<line_sep>self.candidate_flows[channel].append(flow)<block_end><block_end><for_stmt>channel,flows enumerate(flows_by_channel)<block_start><for_stmt>flow flows<block_start>self.candidates[channel].append(interpolate(self.images flow)[: channel])<line_sep>self.candidate_flows[channel].append(flow)<block_end><block_end><block_end><block_end><def_stmt>in_box self point lb_x ub_x lb_y ub_y<block_start><return>(lb_x<le>point[0]<le>ub_x)<and>(lb_y<le>point[1]<le>ub_y)<block_end><def_stmt>compute_extremum_on_arc self col row endpoints interpolation_region<block_start>(lb_col ub_col),(lb_row ub_row)=interpolation_region<line_sep>alpha=interpolate(self.images.double() torch.tensor([lb_col lb_row]).double().to(self.device))<line_sep>beta=interpolate(self.images.double() torch.tensor([ub_col lb_row]).double().to(self.device))<line_sep>gamma=interpolate(self.images.double() torch.tensor([lb_col ub_row]).double().to(self.device))<line_sep>delta=interpolate(self.images.double() torch.tensor([ub_col ub_row]).double().to(self.device))<line_sep># a = torch.add( # alpha * ub_col * ub_row - beta * lb_col * ub_row, # delta * lb_col * lb_row - gamma * ub_col * lb_row # ) b=(beta-alpha)<times>ub_row+(gamma-delta)<times>lb_row<line_sep>c=(gamma-alpha)<times>ub_col+(beta-delta)<times>lb_col<line_sep>d=alpha-beta-gamma+delta<line_sep>e=-b/(2<times>d)<line_sep>f=b<times>b/(4<times>d<times>d)<line_sep>g=c/d<line_sep>h=e<times>e+f<line_sep>j=(self.delta<power>2-h)<power>2-4<times>f<times>e<times>e<line_sep>k=-2<times>g<times>((self.delta<power>2-h)+2<times>e<times>e)<line_sep>l=g<times>g-4<times>((self.delta<power>2-h)+e<times>e)<line_sep>m=4<times>g<line_sep>n=torch.full_like(m 4).double().to(self.device)<line_sep>flows=[[torch.zeros(self.batch_size self.height self.width 2).float().to(self.device)<for>_ range(16)]<for>channel range(self.channels)]<for_stmt>batch range(self.batch_size)<block_start><for_stmt>channel range(self.channels)<block_start><for_stmt>height range(self.height)<block_start><for_stmt>width range(self.width)<block_start>b_val=b[batch channel height width].item()<line_sep>c_val=c[batch channel height width].item()<line_sep>d_val=d[batch channel height width].item()<if_stmt>math.isclose(d_val 0 abs_tol=1e-6)<block_start><if_stmt>(c_val<eq>0)<or>(b_val<eq>0)<block_start><continue><block_end>denominator=math.sqrt(b_val<power>2+c_val<power>2)<line_sep>x=b_val<times>self.delta/denominator<line_sep>y=c_val<times>self.delta/denominator<line_sep>flows[channel][0][batch height width 0]=x<line_sep>flows[channel][0][batch height width 1]=y<line_sep>flows[channel][1][batch height width 0]=x<line_sep>flows[channel][1][batch height width 1]=-y<line_sep>flows[channel][2][batch height width 0]=-x<line_sep>flows[channel][2][batch height width 1]=y<line_sep>flows[channel][3][batch height width 0]=-x<line_sep>flows[channel][3][batch height width 1]=-y<line_sep><continue><block_end>coeffs=[n[batch channel height width].item() m[batch channel height width].item() l[batch channel height width].item() k[batch channel height width].item() j[batch channel height width].item()]<line_sep>roots=polyroots(coeffs maxsteps=500 extraprec=100)<for_stmt>idx,root enumerate(roots)<block_start>root=complex(root)<if_stmt><not>math.isclose(root.imag 0 abs_tol=1e-7)<block_start><continue><block_end>x=float(root.real)<if_stmt>self.delta<power>2<l>x<power>2<block_start><continue><block_end>y=math.sqrt(self.delta<power>2-x<power>2)<line_sep>i=4<times>idx<line_sep>flows[channel][i+0][batch height width 0]=x<line_sep>flows[channel][i+0][batch height width 1]=y<line_sep>flows[channel][i+1][batch height width 0]=x<line_sep>flows[channel][i+1][batch height width 1]=-y<line_sep>flows[channel][i+2][batch height width 0]=-x<line_sep>flows[channel][i+2][batch height width 1]=y<line_sep>flows[channel][i+3][batch height width 0]=-x<line_sep>flows[channel][i+3][batch height width 1]=-y<block_end><block_end><block_end><block_end><block_end><for_stmt>channel range(self.channels)<block_start><for_stmt>idx range(16)<block_start>vx=flows[channel][idx][: : : 0]<line_sep>vy=flows[channel][idx][: : : 1]<line_sep>box_col_constraint=(lb_col<le>vx)&(vx<le>ub_col)<line_sep>box_row_constraint=(lb_row<le>vy)&(vy<le>ub_row)<line_sep>box_constraint=box_col_constraint&box_row_constraint<line_sep>flows[channel][idx][: : : 0]=torch.where(box_constraint vx torch.zeros_like(vx))<line_sep>flows[channel][idx][: : : 1]=torch.where(box_constraint vy torch.zeros_like(vy))<block_end><block_end><return>flows<block_end><def_stmt>linear_constraints self<block_start><return>TInfNormTransformer(self.images self.delta).linear_constraints()<block_end><block_end>
r""" batch 模块实现了 fastNLP 所需的 :class:`~fastNLP.core.batch.DataSetIter` 类。 """<line_sep>__all__=["BatchIter" "DataSetIter" "TorchLoaderIter" ]<import_stmt>atexit<import_stmt>abc<import_from_stmt>numbers Number<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.utils.data<import_from_stmt>collections defaultdict<import_from_stmt>.dataset DataSet<import_from_stmt>.sampler SequentialSampler Sampler<import_from_stmt>._logger logger<line_sep>_python_is_exit=<false><def_stmt>_set_python_is_exit <block_start><global>_python_is_exit<line_sep>_python_is_exit=<true><block_end>atexit.register(_set_python_is_exit)<def_stmt>_pad batch_dict dataset as_numpy<block_start>result={}<for_stmt>n,vlist batch_dict.items()<block_start>f=dataset.field_arrays[n]<if_stmt>f.padder<is><none><block_start>result[n]=np.array(vlist)<block_end><else_stmt><block_start>res=f.pad(vlist)<if_stmt><not>as_numpy<block_start>res,_=_to_tensor(res field_dtype=f.dtype)<block_end>result[n]=res<block_end><block_end><return>result<block_end><class_stmt>DataSetGetter<block_start>r""" 传递给torch.utils.data.DataLoader获取数据,DataLoder会传入int的idx获取数据(调用这里的__getitem__()函数)。 """<def_stmt>__init__ self dataset:DataSet as_numpy=<false><block_start>self.dataset=dataset<line_sep>self.as_numpy=as_numpy<line_sep>self.idx_list=list(range(len(dataset)))<line_sep>self.x_names={n<for>n,f dataset.get_all_fields().items()<if>f.is_input}<line_sep>self.y_names={n<for>n,f dataset.get_all_fields().items()<if>f.is_target}<block_end><def_stmt>__getitem__ self idx:int# mapping idx to sampled idx <block_start>idx=self.idx_list[idx]<line_sep>ins=self.dataset[idx]<line_sep><return>idx ins<block_end><def_stmt>__len__ self<block_start><return>len(self.dataset)<block_end><def_stmt>collate_fn self ins_list:list<block_start>r""" :param batch: [[idx1, x_dict1, y_dict1], [idx2, x_dict2, y_dict2], [xx, xx, xx]] :return: """<line_sep>indices=[]<line_sep>sin_x,sin_y=defaultdict(list) defaultdict(list)<line_sep># 收集需要关注的field的数据 <for_stmt>idx,ins ins_list<block_start>indices.append(idx)<for_stmt>n,v ins.items()<block_start><if_stmt>n<in>self.x_names<block_start>sin_x[n].append(v)<block_end><if_stmt>n<in>self.y_names<block_start>sin_y[n].append(v)<block_end><block_end><block_end># 根据情况,进行pad sin_x=_pad(sin_x dataset=self.dataset as_numpy=self.as_numpy)<line_sep>sin_y=_pad(sin_y dataset=self.dataset as_numpy=self.as_numpy)<if_stmt><not>self.dataset.collater.is_empty()<block_start>bx,by=self.dataset._collate_batch(ins_list)<line_sep>sin_x.update(bx)<line_sep>sin_y.update(by)<block_end><return>indices sin_x sin_y<block_end><def_stmt>__getattr__ self item<block_start><if_stmt>hasattr(self.dataset item)<block_start><return>getattr(self.dataset item)<block_end><else_stmt><block_start><raise>AttributeError("'DataSetGetter' object has no attribute '{}'".format(item))<block_end><block_end><block_end><class_stmt>SamplerAdapter(torch.utils.data.Sampler)<block_start>r""" 用于传入torch.utils.data.DataLoader中,DataLoader会调用__iter__()方法获取index(一次只取一个int) """<def_stmt>__init__ self sampler dataset<block_start>super().__init__(dataset)<line_sep>self.sampler=sampler<line_sep>self.dataset=dataset<block_end><def_stmt>__len__ self<block_start><return>len(self.dataset)<block_end><def_stmt>__iter__ self<block_start><return>iter(self.sampler(self.dataset))<block_end><block_end><class_stmt>BatchIter<block_start>r""" Trainer用于迭代数据的类。继承该类,并实现get_num_batches(), get_batch_indices(), num_batches(), __iter__()方法以及dataset属性。 """<def_stmt>__init__ self dataset batch_size=1 sampler=<none> num_workers=0 pin_memory=<false> drop_last=<false> timeout=0 worker_init_fn=<none> collate_fn=<none> batch_sampler=<none><block_start><if_stmt>isinstance(sampler Sampler)# 如果时fastNLP的sampler需要adapt一下 <block_start>sampler=SamplerAdapter(sampler=sampler<or>SequentialSampler() dataset=dataset)<block_end>self.sampler=sampler<line_sep>self.batch_sampler=batch_sampler<line_sep># DataLoader的collate_fn输入是List[],里面的元素是dataset[index]返回的结果 <if_stmt>collate_fn<is><none># pytoch <= 1.1 中不能设置collate_fn=None <block_start>self.dataiter=torch.utils.data.DataLoader(dataset=dataset batch_size=batch_size sampler=self.sampler num_workers=num_workers pin_memory=pin_memory drop_last=drop_last timeout=timeout worker_init_fn=worker_init_fn batch_sampler=batch_sampler)<block_end><else_stmt><block_start>self.dataiter=torch.utils.data.DataLoader(dataset=dataset batch_size=batch_size sampler=self.sampler collate_fn=collate_fn num_workers=num_workers pin_memory=pin_memory drop_last=drop_last timeout=timeout worker_init_fn=worker_init_fn batch_sampler=batch_sampler)<block_end># 以sampler的数量为准,因为DistributedSampler的时候每个进程上并不是所有的数据都用上了 <if_stmt>self.batch_sampler<is><none><block_start>self._num_batches=self.get_num_batches(len(self.dataiter.sampler) batch_size drop_last)<block_end><else_stmt><block_start>self._num_batches=len(self.batch_sampler)<block_end>self.batch_size=batch_size<line_sep>self.cur_batch_indices=<none><block_end>@property<def_stmt>num_batches self<block_start><return>self._num_batches<block_end>@num_batches.setter<def_stmt>num_batches self value<block_start>self._num_batches=value<block_end><def_stmt>init_iter self<block_start><pass><block_end>@staticmethod<def_stmt>get_num_batches num_samples batch_size drop_last<block_start>r""" 计算batch的数量。用于前端显示进度 :param int num_samples: :param int batch_size: :param bool drop_last: 如果最后一个batch没有batch_size这么多,是否就丢掉。 :return: """<line_sep>num_batches=num_samples<floordiv>batch_size<if_stmt><not>drop_last<and>(num_samples%batch_size<g>0)<block_start>num_batches<augadd>1<block_end><return>num_batches<block_end><def_stmt>get_batch_indices self<block_start>r""" 获取最近输出的batch的index。用于溯源当前batch的数据 :return: """<line_sep><return>self.cur_batch_indices<block_end><def_stmt>__len__ self<block_start><return>self.num_batches<block_end>@property<def_stmt>dataset self<block_start>r""" 获取正在参与iterate的dataset :return: """<line_sep><return>self.dataiter.dataset<block_end>@abc.abstractmethod<def_stmt>__iter__ self<block_start>r""" 用于实际数据循环的类,返回值需要为两个dict, 第一个dict中的内容会认为是input, 第二个dict中的内容会认为是target :return: """<line_sep><raise>NotImplemented<block_end><block_end><class_stmt>DataSetIter(BatchIter)<block_start>r""" DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,通过使用DataSetIter,可以不需要考虑 输入的padding(由DataSet中每列的Padder决定了)以及不需要考虑将数据转为tensor。 组成 `x` 和 `y`:: batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler()) num_batch = len(batch) for batch_x, batch_y in batch: # do stuff ... """<def_stmt>__init__ self dataset batch_size=1 sampler=<none> as_numpy=<false> num_workers=0 pin_memory=<false> drop_last=<false> timeout=0 worker_init_fn=<none> batch_sampler=<none><block_start>r""" :param dataset: :class:`~fastNLP.DataSet` 对象, 数据集 :param int batch_size: 取出的batch大小 :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`. Default: ``None`` :param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`. Default: ``False`` :param int num_workers: 使用多少个进程来预处理数据 :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。 :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个 :param timeout: 生成一个batch的timeout值 :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。 :param batch_sampler: 当每次batch取出的数据数量不一致时,可以使用该sampler。batch_sampler每次iter应该输出一个list的index。 当batch_sampler不为None时,参数batch_size, sampler, drop_last会被忽略。 """<assert_stmt>isinstance(dataset DataSet)<line_sep>dataset=DataSetGetter(dataset as_numpy)<line_sep>collate_fn=dataset.collate_fn<if_stmt>batch_sampler<is><not><none><block_start>batch_size=1<line_sep>sampler=<none><line_sep>drop_last=<false><block_end>super().__init__(dataset=dataset batch_size=batch_size sampler=sampler num_workers=num_workers pin_memory=pin_memory drop_last=drop_last timeout=timeout worker_init_fn=worker_init_fn collate_fn=collate_fn batch_sampler=batch_sampler)<block_end><def_stmt>__iter__ self<block_start>self.init_iter()<for_stmt>indices,batch_x,batch_y self.dataiter<block_start>self.cur_batch_indices=indices<line_sep><yield>batch_x batch_y<block_end><block_end><block_end><class_stmt>TorchLoaderIter(BatchIter)<block_start>r""" 与DataSetIter类似,但可以用于非fastNLP的数据容器对象,以及可以实现完全自定义的生成batch的方式,然后与Trainer,Tester可以实现 与DataSetIter一样的对接。 需要保证传入的数据容器实现了实现了以下的方法 Example:: import random from fastNLP import TorchLoaderIter import torch class UdfDataSet: def __init__(self, num_samples): self.num_samples = num_samples def __getitem__(self, idx): # 必须实现的方法,输入参数是一个int,范围为[0, len(self)) x = [random.random() for _ in range(3)] y = random.random() return x,y def __len__(self): # 需要实现该方法返回值需要是一个int数据 return self.num_samples # 需要实现collact_fn将数据转换为tensor def collate_fn(data_list): # [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list xs, ys = [], [] for l in data_list: x, y = l xs.append(x) ys.append(y) # 不需要转移到gpu,Trainer或Tester会将其转移到model所在的device x,y = torch.FloatTensor(xs), torch.FloatTensor(ys) return {'x':x, 'y':y}, {'y':y} # 第一个dict中内容类似于DataSet中的input列,第二个dict的内容类似于target列 udf_dataset = UdfDataSet(10) dataset = TorchLoaderIter(udf_dataset, collate_fn=collate_fn) class Model(nn.Module): def __init__(self): super().__init__() self.fc = nn.Linear(3, 1) def forward(self, x, y): return {'loss':torch.pow(self.fc(x).squeeze(-1)-y, 2).sum()} def predict(self, x): return {'pred':self.fc(x).squeeze(0)} model = Model() trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset, metrics=AccuracyMetric(target='y'), use_tqdm=False) trainer.train(load_best_model=False) 除此之外,还可以通过该方法实现OnTheFly的训练,如下面的代码所示 Example:: import tempfile import random import torch tmp_file_handler, tmp_file_path = tempfile.mkstemp(text=True) try: num_samples, data = 10, [] for _ in range(num_samples): x, y = [random.random() for _ in range(3)], random.random() data.append(x + [y]) with open(tmp_file_path, 'w') as f: for d in data: f.write(' '.join(map(str, d)) + '\n') class FileDataSet: def __init__(self, tmp_file): num_samples = 0 line_pos = [0] # 对应idx是某一行对应的位置 self.tmp_file_handler = open(tmp_file, 'r', encoding='utf-8') line = self.tmp_file_handler.readline() while line: if line.strip(): num_samples += 1 line_pos.append(self.tmp_file_handler.tell()) line = self.tmp_file_handler.readline() self.tmp_file_handler.seek(0) self.num_samples = num_samples self.line_pos = line_pos def __getitem__(self, idx): line_start, line_end = self.line_pos[idx], self.line_pos[idx + 1] self.tmp_file_handler.seek(line_start) line = self.tmp_file_handler.read(line_end - line_start).strip() values = list(map(float, line.split())) x, y = values[:3], values[-1] return x, y def __len__(self): return self.num_samples def collate_fn(data_list): # [(x1,y1), (x2,y2), ...], 这里的输入实际上是将UdfDataSet的__getitem__输入结合为list xs, ys = [], [] for l in data_list: x, y = l xs.append(x) ys.append(y) x, y = torch.FloatTensor(xs), torch.FloatTensor(ys) return {'x': x, 'y': y}, {'y': y} # 第一个dict中内容类似于DataSet中的input列,第二个dict的内容类似于target列 file_data = FileDataSet(tmp_file_path) dataset = TorchLoaderIter(file_data, collate_fn=collate_fn) class Model(nn.Module): def __init__(self): super().__init__() self.fc = nn.Linear(3, 1) def forward(self, x, y): return {'loss': torch.pow(self.fc(x).squeeze(-1) - y, 2).sum()} def predict(self, x): return {'pred': self.fc(x).squeeze(0)} model = Model() trainer = Trainer(train_data=dataset, model=model, loss=None, print_every=2, dev_data=dataset, metrics=AccuracyMetric(target='y'), use_tqdm=False, n_epochs=2) trainer.train(load_best_model=False) finally: import os if os.path.exists(tmp_file_path): os.remove(tmp_file_path) """<def_stmt>__init__ self dataset collate_fn batch_size=1 sampler=<none> num_workers=0 pin_memory=<false> drop_last=<false> timeout=0 worker_init_fn=<none> batch_sampler=<none><block_start>r""" :param dataset: 实现了__getitem__和__len__方法的数据容器。 :param callable collate_fn: 用于将样本组合成batch的函数。输入为[dataset[idx1], dataset[idx2], ...], 即dataset中 __getitem__返回值组成的list,返回值必须为两个dict,其中第一个dict会被认为是input,第二个dict中的内容被认为是target。 需要转换为tensor的数据,需要在collate_fn中转化,但不需要转移到对应device。 :param int batch_size: 取出的batch大小 :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`. Default: ``None`` :param int num_workers: 使用多少个进程来预处理数据 :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。 :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个 :param timeout: 生成一个batch的timeout值 :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。 :param batch_sampler: 当每次batch取出的数据数量不一致时,可以使用该sampler。batch_sampler每次iter应该输出一个list的index。 当batch_sampler不为None时,参数batch_size, sampler, drop_last会被忽略。 """<assert_stmt>len(dataset)<g>0<assert_stmt>collate_fn<is><not><none> "You must pass collate_fn to pad the batch."<if_stmt>batch_sampler<is><not><none><block_start>batch_size=1<line_sep>sampler=<none><line_sep>drop_last=<false><block_end>super().__init__(dataset=dataset batch_size=batch_size sampler=sampler num_workers=num_workers pin_memory=pin_memory drop_last=drop_last timeout=timeout worker_init_fn=worker_init_fn collate_fn=collate_fn batch_sampler=batch_sampler)<block_end><def_stmt>__iter__ self<block_start>self.init_iter()<for_stmt>batch_x,batch_y self.dataiter<block_start>self.cur_batch_indices=<none><line_sep><yield>batch_x batch_y<block_end><block_end><block_end><def_stmt>_to_tensor batch field_dtype<block_start>r""" :param batch: np.array() :param field_dtype: 数据类型 :return: batch, flag. 如果传入的数据支持转为tensor,返回的batch就是tensor,且flag为True;如果传入的数据不支持转为tensor, 返回的batch就是原来的数据,且flag为False """<try_stmt><block_start><if_stmt>field_dtype<is><not><none><and>isinstance(field_dtype type)<and>issubclass(field_dtype Number)<and><not>isinstance(batch torch.Tensor)<block_start>new_batch=torch.as_tensor(batch)<line_sep>flag=<true><block_end><else_stmt><block_start>new_batch=batch<line_sep>flag=<false><block_end><if_stmt>torch.is_tensor(new_batch)<block_start><if_stmt>'float'<in>new_batch.dtype.__repr__()<block_start>new_batch=new_batch.float()<block_end><elif_stmt>'int'<in>new_batch.dtype.__repr__()<block_start>new_batch=new_batch.long()<block_end><block_end><return>new_batch flag<block_end><except_stmt>Exception<as>e<block_start><raise>e<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>unittest<line_sep>hungarian_module=tf.load_op_library("hungarian.so")<class_stmt>HungarianTests(unittest.TestCase)<block_start><def_stmt>test_min_weighted_bp_cover_1 self<block_start>W=np.array([[3 2 2] [1 2 0] [2 2 1]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<line_sep>c_0=c_0.eval()<line_sep>c_1=c_1.eval()<block_end>c_0_t=np.array([2 1 1])<line_sep>c_1_t=np.array([1 1 0])<line_sep>M_t=np.array([[1 0 0] [0 1 0] [0 0 1]])<line_sep>self.assertTrue((c_0.flatten()<eq>c_0_t.flatten()).all())<line_sep>self.assertTrue((c_1.flatten()<eq>c_1_t.flatten()).all())<line_sep>self.assertTrue((M<eq>M_t).all())<line_sep><pass><block_end><def_stmt>test_min_weighted_bp_cover_2 self<block_start>W=np.array([[5 0 4 0] [0 4 6 8] [4 0 5 7]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<line_sep>c_0=c_0.eval()<line_sep>c_1=c_1.eval()<block_end>c_0_t=np.array([5 6 5])<line_sep>c_1_t=np.array([0 0 0 2])<line_sep>M_t=np.array([[1 0 0 0] [0 0 1 0] [0 0 0 1]])<line_sep>self.assertTrue((c_0.flatten()<eq>c_0_t.flatten()).all())<line_sep>self.assertTrue((c_1.flatten()<eq>c_1_t.flatten()).all())<line_sep>self.assertTrue((M<eq>M_t).all())<block_end><def_stmt>test_min_weighted_bp_cover_3 self<block_start>W=np.array([[5 0 2] [3 1 0] [0 5 0]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<line_sep>c_0=c_0.eval()<line_sep>c_1=c_1.eval()<block_end>c_0_t=np.array([2 0 4])<line_sep>c_1_t=np.array([3 1 0])<line_sep>M_t=np.array([[0 0 1] [1 0 0] [0 1 0]])<line_sep>self.assertTrue((c_0.flatten()<eq>c_0_t.flatten()).all())<line_sep>self.assertTrue((c_1.flatten()<eq>c_1_t.flatten()).all())<line_sep>self.assertTrue((M<eq>M_t).all())<block_end><def_stmt>test_min_weighted_bp_cover_4 self<block_start>W=np.array([[[5 0 2] [3 1 0] [0 5 0]] [[3 2 2] [1 2 0] [2 2 1]]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<line_sep>c_0=c_0.eval()<line_sep>c_1=c_1.eval()<block_end>c_0_t=np.array([[2 0 4] [2 1 1]])<line_sep>c_1_t=np.array([[3 1 0] [1 1 0]])<line_sep>M_t=np.array([[[0 0 1] [1 0 0] [0 1 0]] [[1 0 0] [0 1 0] [0 0 1]]])<line_sep>self.assertTrue((c_0.flatten()<eq>c_0_t.flatten()).all())<line_sep>self.assertTrue((c_1.flatten()<eq>c_1_t.flatten()).all())<line_sep>self.assertTrue((M<eq>M_t).all())<block_end><def_stmt>test_real_values_1 self# Test the while loop terminates with real values. <block_start>W=np.array([[0.90 0.70 0.30 0.20 0.40 0.001 0.001 0.001 0.001 0.001] [0.80 0.75 0.92 0.10 0.15 0.001 0.001 0.001 0.001 0.001] [0.78 0.85 0.66 0.29 0.21 0.001 0.001 0.001 0.001 0.001] [0.42 0.55 0.23 0.43 0.33 0.002 0.001 0.001 0.001 0.001] [0.64 0.44 0.33 0.33 0.34 0.001 0.002 0.001 0.001 0.001] [0.22 0.55 0.43 0.43 0.14 0.001 0.001 0.002 0.001 0.001] [0.43 0.33 0.34 0.22 0.14 0.001 0.001 0.001 0.002 0.001] [0.33 0.42 0.23 0.13 0.43 0.001 0.001 0.001 0.001 0.002] [0.39 0.24 0.53 0.56 0.89 0.001 0.001 0.001 0.001 0.001] [0.12 0.34 0.82 0.82 0.77 0.001 0.001 0.001 0.001 0.001]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end>M_t=np.array([[1 0 0 0 0 0 0 0 0 0] [0 0 1 0 0 0 0 0 0 0] [0 1 0 0 0 0 0 0 0 0] [0 0 0 0 0 1 0 0 0 0] [0 0 0 0 0 0 1 0 0 0] [0 0 0 0 0 0 0 1 0 0] [0 0 0 0 0 0 0 0 1 0] [0 0 0 0 0 0 0 0 0 1] [0 0 0 0 1 0 0 0 0 0] [0 0 0 1 0 0 0 0 0 0]])<line_sep>self.assertTrue((M<eq>M_t).all())<block_end><def_stmt>test_real_values_2 self<block_start>W=np.array([[0.00604139 0.0126045 0.0117373 0.01245 0.00808836 0.0162662 0.0137996 0.00403898 0.0123786 1e-05] [0.00604229 0.0126071 0.0117400 0.0124528 0.00808971 0.0162703 0.0138028 0.00403935 0.0123812 1e-05] [0.00604234 0.0126073 0.0117402 0.012453 0.00808980 0.0162706 0.0138030 0.00403937 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05] [0.00604235 0.0126073 0.0117402 0.012453 0.00808981 0.0162706 0.0138030 0.00403938 0.0123814 1e-05]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end><block_end><def_stmt>test_real_values_3 self<block_start>W=np.array([[0.00302646 0.00321431 0.0217552 0.00836773 0.0256353 0.0177026 0.0289461 0.0214768 0.0101898 1e-05] [0.00302875 0.003217 0.0217628 0.00836405 0.0256229 0.0177137 0.0289468 0.0214719 0.0101904 1e-05] [0.00302897 0.00321726 0.0217636 0.00836369 0.0256217 0.0177148 0.0289468 0.0214714 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.0177149 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.0177149 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.017715 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.017715 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.017715 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.017715 0.0289468 0.0214713 0.0101905 1e-05] [0.003029 0.0032173 0.0217637 0.00836364 0.0256216 0.017715 0.0289468 0.0214713 0.0101905 1e-05]])<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end><block_end><def_stmt>test_real_values_4 self<block_start>W=np.array([[1e-05 0.0634311 1e-05 4.76687e-05 1.00079e-05 1.00378e-05 1e-05 1e-05 1e-05 3.9034e-05] [1e-05 3.42696e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1.0122e-05 3.43236e-05 1e-05] [1e-05 0.0426792 0.031155 1.0008e-05 0.00483961 0.0228187 1e-05 1e-05 1e-05 0.102463] [1e-05 1e-05 1e-05 1.07065e-05 1e-05 1.00185e-05 1e-05 1e-05 1e-05 1.00007e-05] [1e-05 4.22947e-05 0.00062168 0.623917 1.03468e-05 0.00588984 1.00004e-05 1.44433e-05 1.00014e-05 0.000213425] [1e-05 1.01764e-05 1e-05 0.000667249 1e-05 0.000485082 1e-05 1e-05 1.00002e-05 1e-05] [1e-05 1e-05 1.50331e-05 1e-05 0.11269 1e-05 1e-05 1e-05 1e-05 1.13251e-05] [1.0001e-05 1e-05 1e-05 1e-05 1e-05 1e-05 0.0246974 1e-05 1e-05 1e-05] [1e-05 2.89144e-05 1e-05 1.05147e-05 1e-05 0.000894762 1.03587e-05 0.150301 1e-05 1.00045e-05] [1e-05 3.97901e-05 1e-05 1.11641e-05 1e-05 2.34249e-05 1.0007e-05 2.42828e-05 1e-05 1.10529e-05]])<line_sep>p=1e6<line_sep>W=np.round(W<times>p)/p<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end><block_end><def_stmt>test_real_values_5 self<block_start>W=np.array([[1.4e-05 1e-05 1e-05 0.053306 0.044139 1e-05 1.2e-05 1e-05 1e-05 1e-05] [0.001234 1e-05 1e-05 2.1e-05 1e-05 0.001535 0.019553 1e-05 1e-05 1e-05] [0.002148 1e-05 1e-05 1.6e-05 0.651536 2e-05 7.4e-05 0.002359 1e-05 1e-05] [3.8e-05 1e-05 0.000592 4.7e-05 0.09173 1e-05 1e-05 1e-05 1e-05 1e-05] [1e-05 1e-05 1e-05 0.213736 1e-05 4.5e-05 0.000768 1e-05 1e-05 1e-05] [1e-05 1e-05 1e-05 0.317609 1e-05 1e-05 0.002151 1e-05 1e-05 1e-05] [0.002802 1e-05 1.2e-05 1e-05 1e-05 0.002999 4.8e-05 1.1e-05 0.000919 1e-05] [1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 0.028816 1e-05] [1e-05 1e-05 0.047335 1e-05 1.2e-05 1e-05 1e-05 1e-05 1e-05 1e-05] [1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05 1e-05]])<line_sep>p=1e6<line_sep>W=np.round(W<times>p)/p<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end><block_end><def_stmt>test_real_values_6 self<block_start>W=np.array([[0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116] [0.003408 0.010531 0.002795 1e-05 0.019786 0.010435 0.002743 0.023617 0.010436 0.003116]])<line_sep>p=1e6<line_sep>W=np.round(W<times>p)/p<line_sep>M,c_0,c_1=hungarian_module.hungarian(W)<with_stmt>tf.Session()<as>sess<block_start>M=M.eval()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>suite=unittest.TestLoader().loadTestsFromTestCase(HungarianTests)<line_sep>unittest.TextTestRunner(verbosity=2).run(suite)<block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>backend.templatesets.legacy_apps.instance.funutils update_nested_dict<import_from_stmt>..constants FILE_DIR_PATTERN KRESOURCE_NAMES NUM_VAR_PATTERN<line_sep># 资源名称 K8S_RES_NAME_PATTERN="^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$"<line_sep># 挂载卷名称限制 VOLUMR_NAME_PATTERN="^[a-zA-Z{]{1}[a-zA-Z0-9-_{}]{0,254}$"<line_sep># TODO 验证变量的情况 PORT_NAME_PATTERN="^[a-zA-Z{]{1}[a-zA-Z0-9-{}_]{0,254}$"<line_sep># configmap/secret key 名称限制 KEY_NAME_PATTERN="^[.a-zA-Z{]{1}[a-zA-Z0-9-_.{}]{0,254}$"<line_sep># 亲和性验证 AFFINITY_MATCH_EXPRESSION_SCHEMA={"type":"array" "items":{"type":"object" "required":["key" "operator"] "properties":{"key":{"type":"string" "minLength":1} "operator":{"type":"string" "enum":["In" "NotIn" "Exists" "DoesNotExist" "Gt" "Lt"]} "values":{"type":"array" "items":{"type":"string" "minLength":1}} } "additionalProperties":<false> } }<line_sep>POD_AFFINITY_TERM_SCHEMA={"type":"object" "properties":{"labelSelector":{"type":"object" "properties":{"matchExpressions":AFFINITY_MATCH_EXPRESSION_SCHEMA}} "namespaces":{"type":"array" "items":{"type":"string"}} "topologyKey":{"type":"string"} } "additionalProperties":<false> }<line_sep>POD_AFFINITY_SCHEMA={"type":"object" "properties":{"requiredDuringSchedulingIgnoredDuringExecution":{"type":"array" "items":POD_AFFINITY_TERM_SCHEMA} "preferredDuringSchedulingIgnoredDuringExecution":{"type":"array" "items":{"type":"object" "required":["podAffinityTerm"] "properties":{"weight":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1 "maximum":100} ]} "podAffinityTerm":POD_AFFINITY_TERM_SCHEMA } } } } "additionalProperties":<false> }<line_sep># 健康检查 & 就绪检查 K8S_CHECK_SCHEMA={"type":"object" "required":["initialDelaySeconds" "periodSeconds" "timeoutSeconds" "failureThreshold" "successThreshold"] "properties":{"initialDelaySeconds":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} "periodSeconds":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1}]} "timeoutSeconds":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1}]} "failureThreshold":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1}]} "successThreshold":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1}]} "exec":{"type":"object" "properties":{"command":{"type":"string"}}} "tcpSocket":{"type":"object" "properties":{"port":{"oneOf":[{"type":"number"} {"type":"string"}]}}} "httpGet":{"type":"object" "properties":{"port":{"oneOf":[{"type":"number"} {"type":"string"}]} "path":{"type":"string"} "httpHeaders":{"type":"array" "items":{"type":"object" "properties":{"name":{"type":"string"} "value":{"type":"string"}} } } } } } }<line_sep>INIT_CONTAINER_SCHEMA={"type":"array" "items":{"type":"object" "required":["name" "image" "imagePullPolicy" "volumeMounts" "ports" "resources"] "properties":{"name":{"type":"string" "minLength":1} "image":{"type":"string" "minLength":1} "imagePullPolicy":{"type":"string" "enum":["Always" "IfNotPresent" "Never"]} "volumeMounts":{"type":"array" "items":{"type":"object" "required":["name" "mountPath" "readOnly"] "properties":{"name":{"type":"string" "pattern":VOLUMR_NAME_PATTERN} "mountPath":{"type":"string" "pattern":FILE_DIR_PATTERN} "readOnly":{"type":"boolean"} } } } "ports":{"type":"array" "items":{"type":"object" "required":["name" "containerPort"] "properties":{"name":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":PORT_NAME_PATTERN} ]} "containerPort":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1 "maximum":65535} ]} } } } "command":{"type":"string"} "args":{"type":"string"} # 环境变量前端统一存放在 webCache.env_list 中,有后台组装为 env & envFrom "env":{"type":"array" "items":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "minLength":1} "value":{"type":"string"} "valueFrom":{"type":"object" "properties":{"fieldRef":{"type":"object" "required":["fieldPath"] "properties":{"fieldPath":{"type":"string"}} } "configMapKeyRef":{"type":"object" "required":["name" "key"] "properties":{"name":{"type":"string" "minLength":1} "key":{"type":"string" "minLength":1} } } "secretKeyRef":{"type":"object" "required":["name" "key"] "properties":{"name":{"type":"string" "minLength":1} "key":{"type":"string" "minLength":1} } } } } } } } "envFrom":{"type":"array" "items":{"type":"object" "properties":{"configMapRef":{"type":"object" "properties":{"name":{"type":"string" "minLength":1}} } "secretRef":{"type":"object" "properties":{"name":{"type":"string" "minLength":1}}} } } } "resources":{"type":"object" "properties":{"limits":{"type":"object" "properties":{"cpu":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0} ]} "memory":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} } } "requests":{"type":"object" "properties":{"cpu":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} "memory":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} } } } } } } }<line_sep>CONTAINER_SCHEMA={"type":"array" "items":{"type":"object" "required":["name" "image" "imagePullPolicy" "volumeMounts" "ports" "resources" "livenessProbe" "readinessProbe" "lifecycle" ] "properties":{"name":{"type":"string" "minLength":1} "image":{"type":"string" "minLength":1} "imagePullPolicy":{"type":"string" "enum":["Always" "IfNotPresent" "Never"]} "volumeMounts":{"type":"array" "items":{"type":"object" "required":["name" "mountPath" "readOnly"] "properties":{"name":{"type":"string" "pattern":VOLUMR_NAME_PATTERN} "mountPath":{"type":"string" "pattern":FILE_DIR_PATTERN} "readOnly":{"type":"boolean"} } } } "ports":{"type":"array" "items":{"type":"object" "required":["name" "containerPort"] "properties":{"name":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":PORT_NAME_PATTERN} ]} "containerPort":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1 "maximum":65535} ]} } } } "command":{"type":"string"} "args":{"type":"string"} # 环境变量前端统一存放在 webCache.env_list 中,有后台组装为 env & envFrom "env":{"type":"array" "items":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "minLength":1} "value":{"type":"string"} "valueFrom":{"type":"object" "properties":{"fieldRef":{"type":"object" "required":["fieldPath"] "properties":{"fieldPath":{"type":"string"}} } "configMapKeyRef":{"type":"object" "required":["name" "key"] "properties":{"name":{"type":"string" "minLength":1} "key":{"type":"string" "minLength":1} } } "secretKeyRef":{"type":"object" "required":["name" "key"] "properties":{"name":{"type":"string" "minLength":1} "key":{"type":"string" "minLength":1} } } } } } } } "envFrom":{"type":"array" "items":{"type":"object" "properties":{"configMapRef":{"type":"object" "properties":{"name":{"type":"string" "minLength":1}} } "secretRef":{"type":"object" "properties":{"name":{"type":"string" "minLength":1}}} } } } "resources":{"type":"object" "properties":{"limits":{"type":"object" "properties":{"cpu":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0} ]} "memory":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} } } "requests":{"type":"object" "properties":{"cpu":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} "memory":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"number" "minimum":0} {"type":"string" "pattern":NUM_VAR_PATTERN} ]} } } } } "livenessProbe":K8S_CHECK_SCHEMA "readinessProbe":K8S_CHECK_SCHEMA "lifecycle":{"type":"object" "required":["preStop" "postStart"] "properties":{"preStop":{"type":"object" "required":["exec"] "properties":{"command":{"type":"string"}}} "postStart":{"type":"object" "required":["exec"] "properties":{"command":{"type":"string"}} } } } } } }<line_sep>K8S_DEPLOYMENT_SCHEMA={"type":"object" "required":["metadata" "spec"] "properties":{"metadata":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "pattern":K8S_RES_NAME_PATTERN}} } "spec":{"type":"object" "required":["replicas" "strategy" "template"] "properties":{"replicas":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} "strategy":{"type":"object" "required":["type"] "properties":{"type":{"type":"string" "enum":["RollingUpdate" "Recreate"]} "rollingUpdate":{"type":"object" "required":["maxUnavailable" "maxSurge"]} } } "template":{"type":"object" "required":["metadata" "spec"] "properties":{"metadata":{"type":"object" "properties":{"lables":{"type":"object"} "annotations":{"type":"object"}} } "spec":{"type":"object" "required":["restartPolicy" "terminationGracePeriodSeconds" "nodeSelector" "hostNetwork" "dnsPolicy" "volumes" "containers" ] "properties":{"restartPolicy":{"type":"string" "enum":["Always" "OnFailure" "Never"]} "terminationGracePeriodSeconds":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0} ]} "nodeSelector":{"type":"object"} "hostNetwork":{"oneOf":[{"type":"number"} {"type":"string"}]} "dnsPolicy":{"type":"string" "enum":["ClusterFirst" "Default" "None" "ClusterFirstWithHostNet"] } "volumes":{"type":"array" "items":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "pattern":VOLUMR_NAME_PATTERN} "hostPath":{"type":"object" "required":["path"] "properties":{"path":{"type":"string" "pattern":FILE_DIR_PATTERN}} } "emptyDir":{"type":"object"} "configMap":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "minLength":1}} } "secret":{"type":"object" "required":["secretName"] "properties":{"secretName":{"type":"string" "minLength":1}} } "persistentVolumeClaim":{"type":"object" "required":["claimName"] "properties":{"claimName":{"type":"string" "minLength":1}} } } } } "containers":CONTAINER_SCHEMA "initContainers":INIT_CONTAINER_SCHEMA } } } } } } } }<line_sep>AFFINITY_SCHEMA={"type":"object" "properties":{"nodeAffinity":{"type":"object" "properties":{"requiredDuringSchedulingIgnoredDuringExecution":{"type":"object" "required":["nodeSelectorTerms"] "properties":{"nodeSelectorTerms":{"type":"array" "items":{"type":"object" "required":["matchExpressions"] "properties":{"matchExpressions":AFFINITY_MATCH_EXPRESSION_SCHEMA} } }} } "preferredDuringSchedulingIgnoredDuringExecution":{"type":"array" "items":{"type":"object" "required":["preference"] "properties":{"weight":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1 "maximum":100} ]} "preference":{"type":"object" "required":["matchExpressions"] "properties":{"matchExpressions":AFFINITY_MATCH_EXPRESSION_SCHEMA} } } } } } "additionalProperties":<false> } "podAffinity":POD_AFFINITY_SCHEMA "podAntiAffinity":POD_AFFINITY_SCHEMA } "additionalProperties":<false> }<line_sep># DS 与 Deployment 的差异项:滚动升级策略 中 选择 RollingUpdate 时,只可以选择 maxUnavailable # "required": ["replicas", "strategy", "template"], K8S_DAEMONSET_DIFF={"properties":{"spec":{"required":["updateStrategy" "template"] "properties":{"updateStrategy":{"properties":{"rollingUpdate":{"required":["maxUnavailable"]}}}} }}}<line_sep>K8S_DAEMONSET_SCHEMA=update_nested_dict(K8S_DEPLOYMENT_SCHEMA K8S_DAEMONSET_DIFF)<line_sep># Job 与 Deployment 的差异项: Pod 运行时设置 # TODO: 确认 job 中 replicas 和 parallelism 怎么配置 K8S_JOB_DIFF={"properties":{"spec":{"type":"object" "required":["template" "completions" "parallelism" "backoffLimit" "activeDeadlineSeconds"] "properties":{"parallelism":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} "completions":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} "backoffLimit":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} "activeDeadlineSeconds":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0}]} } }}}<line_sep>K8S_JOB_SCHEMA=update_nested_dict(K8S_DEPLOYMENT_SCHEMA K8S_JOB_DIFF)<line_sep># statefulset 与 Deployment 的差异项 K8S_STATEFULSET_DIFF={"properties":{"spec":{"required":["template" "updateStrategy" "podManagementPolicy" "volumeClaimTemplates"] "properties":{"updateStrategy":{"type":"object" "required":["type"] "properties":{"type":{"type":"string" "enum":["OnDelete" "RollingUpdate"]} "rollingUpdate":{"type":"object" "required":["partition"] "properties":{"partition":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0} ]}} } } } "podManagementPolicy":{"type":"string" "enum":["OrderedReady" "Parallel"]} "serviceName":{"type":"string" "minLength":1} "volumeClaimTemplates":{"type":"array" "items":{"type":"object" "required":["metadata" "spec"] "properties":{"metadata":{"type":"object" "required":["name"] "properties":{# "name": {"type": "string", "minLength": 1} } } "spec":{"type":"object" "required":["accessModes" "storageClassName" "resources"] "properties":{# "storageClassName": {"type": "string", "minLength": 1}, "accessModes":{"type":"array" "items":{"type":"string" "enum":["ReadWriteOnce" "ReadOnlyMany" "ReadWriteMany"] } } "resources":{"type":"object" "required":["requests"] "properties":{"requests":{"type":"object" "required":["storage"] "properties":{"storage":{"oneOf":[{"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":0} ]}} }} } } } } } } } }}}<line_sep>K8S_STATEFULSET_SCHEMA=update_nested_dict(K8S_DEPLOYMENT_SCHEMA K8S_STATEFULSET_DIFF)<line_sep>K8S_CONFIGMAP_SCHEMA={"type":"object" "required":["metadata" "data"] "properties":{"metadata":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "pattern":K8S_RES_NAME_PATTERN}} } "data":{"type":"object" "patternProperties":{KEY_NAME_PATTERN:{"type":"string"}} "additionalProperties":<false> } } }<line_sep>K8S_SECRET_SCHEMA={"type":"object" "required":["metadata" "data"] "properties":{"metadata":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "pattern":K8S_RES_NAME_PATTERN}} } "data":{"type":"object" "patternProperties":{KEY_NAME_PATTERN:{"type":"string"}} "additionalProperties":<false> } } }<line_sep>K8S_SERVICE_SCHEMA={"type":"object" "required":["metadata" "spec"] "properties":{"metadata":{"type":"object" "required":["name"] "properties":{"name":{"type":"string"}}} "spec":{"type":"object" "required":["type" "clusterIP" "ports"] "properties":{"type":{"type":"string" "enum":["ClusterIP" "NodePort"]} "clusterIP":{"type":"string"} "ports":{"type":"array" "items":{"type":"object" "required":["port" "protocol"] "properties":{"name":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":PORT_NAME_PATTERN} ]} "port":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":1 "maximum":65535} ]} "protocol":{"type":"string" "enum":["TCP" "UDP"]} "targetPort":{"anyof":[{"type":"number" "minimum":1 "maximum":65535} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"string" "minLength":1} ]} "nodePort":{"oneOf":[{"type":"string" "pattern":"^$"} {"type":"string" "pattern":NUM_VAR_PATTERN} {"type":"number" "minimum":30000 "maximum":32767} ]} } } } } } } }<line_sep>K8S_INGRESS_SCHEMA={"type":"object" "required":["metadata" "spec"]}<line_sep>K8S_HPA_SCHNEA={"$schema":"http://json-schema.org/draft-04/schema#" "id":"k8s_hpa" "type":"object" "required":["apiVersion" "kind" "metadata" "spec"] "properties":{"apiVersion":{"type":"string" "enum":["autoscaling/v2beta2"]} "kind":{"type":"string" "enum":["HorizontalPodAutoscaler"]} "metadata":{"type":"object" "required":["name"] "properties":{"name":{"type":"string" "pattern":K8S_RES_NAME_PATTERN}} } "spec":{"type":"object" "required":["scaleTargetRef" "minReplicas" "maxReplicas" "metrics"] "properties":{"scaleTargetRef":{"type":"object" "required":["kind" "name"] "properties":{"kind":{"type":"string" "enum":["Deployment"]} "name":{"type":"string" "pattern":K8S_RES_NAME_PATTERN} } } "minReplicas":{"type":"number" "minimum":0} "maxReplicas":{"type":"number" "minimum":0} "metrics":{"type":"array" "items":{"type":"object" "required":["type" "resource"] "properties":{"type":{"type":"string" "enum":["Resource"]} "resource":{"type":"object" "required":["name" "target"] "properties":{"name":{"type":"string" "enum":["cpu" "memory"]} "target":{"type":"object" "required":["type" "averageUtilization"] "properties":{"type":{"type":"string" "enum":["Utilization"]} "averageUtilization":{"type":"number" "minimum":0} } } } } } } } } } } }<line_sep>CONFIG_SCHEMA=[K8S_DEPLOYMENT_SCHEMA K8S_DAEMONSET_SCHEMA K8S_JOB_SCHEMA K8S_STATEFULSET_SCHEMA K8S_SERVICE_SCHEMA K8S_CONFIGMAP_SCHEMA K8S_SECRET_SCHEMA K8S_INGRESS_SCHEMA K8S_HPA_SCHNEA ]<line_sep>CONFIG_SCHEMA_MAP=dict(zip(KRESOURCE_NAMES CONFIG_SCHEMA))<line_sep>
<import_stmt>logging<import_from_stmt>logging.config dictConfig<import_stmt>dbnd<import_from_stmt>dbnd.testing.helpers run_dbnd_subprocess__with_home<import_from_stmt>dbnd_airflow_contrib.dbnd_airflow_default_logger DEFAULT_LOGGING_CONFIG<class_stmt>TestDbndAirflowLogging(object)<block_start><def_stmt>test_dbnd_airflow_logging_conifg self# we implement it as a separte test, as we don't want to affect current logging system <block_start>dbnd_config=DEFAULT_LOGGING_CONFIG<assert_stmt>dbnd_config<block_end><def_stmt>test_can_be_loaded self# we can't just load config, it will affect all future tests <block_start>output=run_dbnd_subprocess__with_home([__file__.replace(".pyc" ".py")])<assert_stmt>"test_can_be_loaded OK"<in>output<line_sep>logging.error("Done")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print(dbnd.__version__)<line_sep># we need it first to import, before we import any airflow code dbnd_config=DEFAULT_LOGGING_CONFIG<line_sep>dictConfig(dbnd_config)<line_sep>logging.info("test_can_be_loaded OK")<block_end>
<import_from_stmt>scrounger.core.module BaseModule<line_sep>#helper functions <import_from_stmt>scrounger.utils.ios plist_dict_to_xml plist<import_from_stmt>scrounger.utils.config Log<class_stmt>Module(BaseModule)<block_start>meta={"author":"RDC" "description":"Checks if there are any Application Transport Security \ misconfigurations" "certainty":90}<line_sep>options=[{"name":"info" "description":"path to a local Info.plist file" "required":<true> "default":<none>}]<line_sep>_ats_key="NSAppTransportSecurity"<line_sep>_insecure_options=["NSAllowsArbitraryLoads" "NSExceptionAllowsInsecureHTTPLoads" "NSThirdPartyExceptionAllowsInsecureHTTPLoads"]<def_stmt>run self<block_start>result={"title":"Application Has Insecure ATS Configurations" "details":"" "severity":"Medium" "report":<false>}<line_sep>info_content=plist(self.info)<line_sep>Log.info("Parsing Info.plist file contents")<line_sep>ats_xml=plist_dict_to_xml(info_content self._ats_key)<line_sep>Log.info("Analysing Info.plist file")<if_stmt>self._ats_key<not><in>info_content<or><not>info_content[self._ats_key]<block_start>result.update({"report":<true> "details":"No evidence of ATS being implemented found."})<block_end><if_stmt>any(option<in>ats_xml<for>option self._insecure_options)<block_start>result.update({"report":<true> "details":"The following insecure ATS configuration was \ found : {}".format(ats_xml)})<block_end><return>{"{}_result".format(self.name()):result}<block_end><block_end>
<import_from_stmt>parsetron *# NOQA <import_stmt>re<import_stmt>pytest<line_sep>__author__='<NAME>'<class_stmt>TestMul(object)<block_start><def_stmt>test_mul self<block_start>s=String("t")('t')<line_sep># valid grammar: <class_stmt>G(Grammar)<block_start>GOAL=s<times>1<block_end>s_1=RobustParser(G())<assert_stmt>s_1.print_parse("t" strict_match=<true>)<assert_stmt><false><is>s_1.print_parse("t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>(1 2)<block_end>s_1_2=RobustParser(G())<assert_stmt>s_1_2.print_parse("t" strict_match=<true>)<assert_stmt>s_1_2.print_parse("t t" strict_match=<true>)<assert_stmt><false><is>s_1_2.print_parse("t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[1 ]<block_end>s_1_none=RobustParser(G())<assert_stmt>s_1_none.print_parse("t" strict_match=<true>)<assert_stmt>s_1_none.print_parse("t t" strict_match=<true>)<assert_stmt>s_1_none.print_parse("t t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[1 <none>]<block_end>s_1_none_a=RobustParser(G())<assert_stmt>s_1_none_a.print_parse("t" strict_match=<true>)<assert_stmt>s_1_none_a.print_parse("t t" strict_match=<true>)<assert_stmt>s_1_none_a.print_parse("t t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[3 ]<block_end>s_3_none=RobustParser(G())<assert_stmt><false><is>s_3_none.print_parse("t" strict_match=<true>)<assert_stmt><false><is>s_3_none.print_parse("t t" strict_match=<true>)<assert_stmt>s_3_none.print_parse("t t t" strict_match=<true>)<assert_stmt>s_3_none.print_parse("t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[3 <none>]<block_end>s_3_none_a=RobustParser(G())<assert_stmt><false><is>s_3_none_a.print_parse("t" strict_match=<true>)<assert_stmt><false><is>s_3_none_a.print_parse("t t" strict_match=<true>)<assert_stmt>s_3_none_a.print_parse("t t t" strict_match=<true>)<assert_stmt>s_3_none_a.print_parse("t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[<none> 1]<block_end>s_none_1=RobustParser(G())<assert_stmt>s_none_1.print_parse("t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[<none> 3]<block_end>s_none_3=RobustParser(G())<assert_stmt>s_none_3.print_parse("t" strict_match=<true>)<assert_stmt>s_none_3.print_parse("t t" strict_match=<true>)<assert_stmt>s_none_3.print_parse("t t t" strict_match=<true>)<assert_stmt><false><is>s_none_3.print_parse("t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[0 1]<block_end>s_0_1=RobustParser(G())<assert_stmt>s_0_1.print_parse("t" strict_match=<true>)<assert_stmt><false><is>s_0_1.print_parse("a" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[0 <none>]<block_end>s_0_1=RobustParser(G())<assert_stmt>s_0_1.print_parse("t" strict_match=<true>)<assert_stmt><false><is>s_0_1.print_parse("a" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[0 1]+"b"<block_end>s_0_1=RobustParser(G())<assert_stmt>s_0_1.print_parse("t b" strict_match=<true>)<assert_stmt>s_0_1.print_parse("b")<class_stmt>G(Grammar)<block_start>GOAL=s<times>[0 3]<block_end>s_0_3=RobustParser(G())<assert_stmt>s_0_3.print_parse("t" strict_match=<true>)<assert_stmt>s_0_3.print_parse("t t" strict_match=<true>)<assert_stmt>s_0_3.print_parse("t t t" strict_match=<true>)<assert_stmt><false><is>s_0_3.print_parse("t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[3 5]<block_end>s_3_5=RobustParser(G())<assert_stmt><false><is>s_3_5.print_parse("t t" strict_match=<true>)<assert_stmt>s_3_5.print_parse("t t t" strict_match=<true>)<assert_stmt>s_3_5.print_parse("t t t t" strict_match=<true>)<assert_stmt>s_3_5.print_parse("t t t t t" strict_match=<true>)<assert_stmt><false><is>s_3_5.print_parse("t t t t t t" strict_match=<true>)<class_stmt>G(Grammar)<block_start>GOAL=s<times>[3 3]<block_end>s_3_3=RobustParser(G())<assert_stmt><false><is>s_3_3.print_parse("t t" strict_match=<true>)<assert_stmt>s_3_3.print_parse("t t t" strict_match=<true>)<assert_stmt><false><is>s_3_3.print_parse("t t t t" strict_match=<true>)<line_sep># invalid grammar: <with_stmt>pytest.raises(ValueError)<block_start>s<times>[3 2]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>(3 2)<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>3.0<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[3.0 4]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>(3 4.5)<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times><none><block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>0<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>-1<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[-1 3]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[-1 <none>]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[1 -1]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[<none> -1]<block_end><with_stmt>pytest.raises(ValueError)<block_start>s<times>[1 2 3]<block_end><block_end><block_end><class_stmt>TestString(object)<block_start><def_stmt>test_empty_init self<block_start><with_stmt>pytest.raises(ValueError)<block_start>String("")<block_end><block_end><def_stmt>test_parse self<block_start>s=StringCs("hello")<assert_stmt>s.parse("hello")<with_stmt>pytest.raises(ParseException)<block_start>s.parse("Hello")<block_end><with_stmt>pytest.raises(ParseException)<block_start>s.parse("")<block_end><with_stmt>pytest.raises(ParseException)<block_start>s.parse("helloo")<block_end><with_stmt>pytest.raises(ParseException)<block_start>s.parse("hell")<block_end><block_end><block_end><class_stmt>TestRegex(object)<block_start><def_stmt>test_empty_init self<block_start><with_stmt>pytest.raises(ValueError)<block_start>Regex("")<block_end><block_end><def_stmt>test_equal self<block_start><assert_stmt>Regex("ab")<eq>"ab"<assert_stmt>Regex("ab")<eq>"AB"<assert_stmt>Regex("ab")<ne>"ac"<assert_stmt>Regex("ab")<ne>1<block_end><def_stmt>test_parse self<block_start>r=Regex(r"(ab|bc)")<line_sep>repr(r)<assert_stmt>r.parse("ab")<assert_stmt>r.parse("bc")<with_stmt>pytest.raises(ParseException)<block_start><assert_stmt>r.parse("cd")<block_end><with_stmt>pytest.raises(ParseException)<block_start><assert_stmt>r.parse("abc")<block_end>r1=Regex("ab" match_whole=<true>)<with_stmt>pytest.raises(ParseException)<block_start><assert_stmt>r1.parse("abc")<block_end>r2=Regex(re.compile("ab"))<line_sep>r2.parse("ab")<with_stmt>pytest.raises(ValueError)<block_start>Regex(12)<block_end>r3=Regex("ab" match_whole=<false>)<line_sep>r3.parse("abc")<block_end><block_end><class_stmt>TestSet(object)<block_start><def_stmt>test_parse self<block_start><with_stmt>pytest.raises(ValueError)<block_start>Set(123)<block_end>s1=Set("a b c")<line_sep>s2=Set(["a" "b" "c"])<line_sep>s3=Set({"a" "b" "c"})<line_sep>s4=Set("abc")<for_stmt>s [s1 s2 s3 s4]<block_start>s.parse("A")<line_sep>s.parse("B")<line_sep>s.parse("C")<with_stmt>pytest.raises(ParseException)<block_start>s.parse("d")<block_end><block_end><block_end><def_stmt>test_parse_casesensitive self<block_start>s1=SetCs("a b c")<line_sep>s2=SetCs(["a" "b" "c"])<line_sep>s3=SetCs({"a" "b" "c"})<line_sep>s4=SetCs("abc")<for_stmt>s [s1 s2 s3 s4]<block_start>s.parse("a")<line_sep>s.parse("b")<line_sep>s.parse("c")<with_stmt>pytest.raises(ParseException)<block_start>s.parse("A")<block_end><block_end><block_end><block_end><class_stmt>TestAnd(object)<block_start><def_stmt>test_plus self<block_start>a=String("1")+Optional(String("1"))<assert_stmt>len(a.exprs)<eq>2<line_sep>a<augadd>Regex("3")<assert_stmt>len(a.exprs)<eq>3<line_sep>b="1"+a<assert_stmt>len(b.exprs)<eq>2<line_sep>b<augadd>"3"<assert_stmt>len(b.exprs)<eq>3<assert_stmt>b[0].str<eq>"1"<block_end><def_stmt>test_streamline self<block_start>a=String("1")+Optional(String("1"))<line_sep>b=String("1")+Optional(String("1"))<line_sep>c=a+b<with_stmt>pytest.raises(GrammarException)<block_start>c.parse("1 1")<block_end><block_end><block_end><class_stmt>TestOr(object)<block_start><def_stmt>test_or self<block_start>a=String("1")|Optional(String("1"))<assert_stmt>len(a.exprs)<eq>2<line_sep>a<augor>Regex("3")<assert_stmt>len(a.exprs)<eq>3<line_sep>a<augor>String("4")+Optional(String("4"))<assert_stmt>len(a.exprs)<eq>4<line_sep>a<augor>"3"<assert_stmt>len(a.exprs)<eq>5<with_stmt>pytest.raises(GrammarException)<block_start>print(a|3.3)<block_end><block_end><block_end><class_stmt>TestStr(object)<block_start><def_stmt>test_str self<block_start>s=String("a string")<assert_stmt>str(s)<eq>"String(a string)"<line_sep>o=Optional("a string")<assert_stmt>str(o)<eq>"Optional(String(a string))"<line_sep>r=Regex(r"abc")('regex')<assert_stmt>str(r)<eq>'regex'<line_sep>r1=r.set_name('xeger')<assert_stmt>str(r1)<eq>'xeger'<block_end><block_end><class_stmt>TestGrammar(object)<block_start><class_stmt>LightGrammar(Grammar)<block_start>light_general=String("light")<line_sep>action=Regex(r"(turn on|turn off|off|blink)")<line_sep>times=Regex(r"(once|twice|three times)")<line_sep>optional_times=Optional(times)<line_sep>one_more_light=OneOrMore(light_general)<line_sep>zero_more_action=ZeroOrMore(action)<line_sep>GOAL=zero_more_action+optional_times+one_more_light+optional_times+OneOrMore(String("quickly")('quick'))<block_end><def_stmt>test_constructor self<block_start><assert_stmt>2<eq>len(GrammarExpression(["1" "2"]).exprs)<assert_stmt>1<eq>len(GrammarExpression("2").exprs)<with_stmt>pytest.raises(GrammarException)<block_start>GrammarExpression(1)<block_end><block_end><def_stmt>test_grammar_str self<block_start>light=TestGrammar.LightGrammar()<line_sep>repr(light)<assert_stmt>light.name<eq>"LightGrammar"<line_sep>print(light)<line_sep>parser=RobustParser(light)<line_sep>parser_bu=RobustParser(light BottomUpStrategy)<line_sep># TODO: this semantics is NOT tesing "grammar" <assert_stmt><true><eq>parser.print_parse("blink light light light quickly")<assert_stmt><true><eq>parser_bu.print_parse("blink light light light quickly")<assert_stmt><true><eq>parser.print_parse("light quickly")<assert_stmt><true><eq>parser_bu.print_parse("light quickly")<block_end><def_stmt>test_reserved self<block_start><with_stmt>pytest.raises(GrammarException)<block_start><class_stmt>ReservedGrammar(Grammar)<block_start>_grammar_="reserved"<line_sep>GOAL=_grammar_<block_end>ReservedGrammar()<block_end><block_end><def_stmt>test_goal self<block_start><with_stmt>pytest.raises(GrammarException)<block_start><class_stmt>NoGoalGrammar(Grammar)<block_start>random='random'<block_end>NoGoalGrammar()<block_end><block_end><def_stmt>test_test self<block_start><with_stmt>pytest.raises(NotImplementedError)<block_start>TestGrammar.LightGrammar.test()<block_end><block_end><block_end><class_stmt>TestZeroOrMore(object)<block_start><def_stmt>test_or self<block_start><class_stmt>OrGrammar(Grammar)<block_start>GOAL="1"|ZeroOrMore("1")<block_end>p=RobustParser(OrGrammar())<line_sep>p.parse("1 1")<block_end><block_end><class_stmt>TestParser(object)<block_start><class_stmt>LightGrammar(Grammar)<block_start><def_stmt>times2int r<block_start><if_stmt>r.get()<eq>"once"<block_start>r.set(1)<block_end><elif_stmt>r.get()<eq>"twice"<block_start>r.set(2)<block_end><elif_stmt>r.get()<eq>"three times"<block_start>r.set(3)<block_end><block_end>light=String("light").ignore()<line_sep>color=String("red").replace_result_with((255 0 0))<line_sep>action=Regex(r"(turn on|turn off|off|blink)")<line_sep>times=Regex(r"(once|twice|three times)").set_result_action(times2int)<line_sep>GOAL=action+Optional(color)+light+ZeroOrMore(times)+String("quickly")('quick')<block_end>light=LightGrammar()<line_sep>parser=RobustParser(light)<line_sep>test_str="blink red light once quickly ignore"<def_stmt>test_parse self<block_start>parser=TestParser.parser<line_sep>test_str=TestParser.test_str<assert_stmt><true><eq>parser.print_parse(test_str print_json=<true>)<assert_stmt><true><eq>parser.print_parse(test_str best_parse=<false>)<assert_stmt><true><eq>parser.print_parse(test_str best_parse=<false> print_json=<true>)<assert_stmt>(<none> <none>)<eq>parser.parse("can't parse")<assert_stmt>(<none> <none>)<eq>parser.parse_string("can't parse")<line_sep>t,r=parser.parse(test_str)<line_sep># test result <assert_stmt>r.times<eq>[1]<assert_stmt>r.color<eq>(255 0 0)<line_sep>print(repr(r))# test __repr__() <assert_stmt>'quickly'<in>r.values()<assert_stmt>'quick'<in>r.keys()<del_stmt>r['quick']<assert_stmt>'quick'<not><in>r.keys()<line_sep>r.quick='quickly'<assert_stmt>'quickly'<in>r.values()<del_stmt>r.quick<assert_stmt>'quick'<not><in>r<line_sep>r['quick']='quickly'<assert_stmt>'quickly'<eq>r.get('quick')<assert_stmt>'light'<not><in>r<line_sep># test tree d=t.get_flat_dict(key='GOAL' only_leaf=<true>)<assert_stmt>'action'<in>d[0]<line_sep>d=t.get_flat_dict(key='GOAL' only_leaf=<false>)<assert_stmt>'action'<in>d[0]<line_sep>d=t.get_flat_dict(key='action' only_leaf=<false>)<assert_stmt>'action'<in>d[0]<line_sep>TreeNode.recursive_str_verbose(t)<with_stmt>pytest.raises(ParseException)<block_start>parser.parse("")<block_end><with_stmt>pytest.raises(ValueError)<block_start>parser._parse_multi_token("")<block_end>_,tokens=parser._parse_multi_token("can't parse")<assert_stmt>len(tokens)<eq>0<with_stmt>pytest.raises(TypeError)<block_start>parser._parse_multi_token(1)<block_end><block_end><def_stmt>test_incremental_parse self<block_start>parser=TestParser.parser<line_sep>test_str=TestParser.test_str<line_sep>parser.print_incremental_parse(test_str)<assert_stmt>(<none> <none>)<eq>parser.incremental_parse('blink' <false> is_first=<true>)<assert_stmt>(<none> <none>)<eq>parser.incremental_parse('light' <false>)<line_sep>t,r=parser.incremental_parse('quickly' is_final=<true>)<assert_stmt>t<is><not><none><assert_stmt>(<none> <none>)<eq>parser.incremental_parse('light' is_final=<true>)<line_sep>parser.clear_cache()<block_end><def_stmt>test_num_edges self<block_start><class_stmt>BadRule(ChartRule)<block_start>NUM_EDGES=2<block_end><with_stmt>pytest.raises(ValueError)<block_start>ParsingStrategy([BadRule])<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start>BadRule().apply()<block_end><block_end># def test_full_trees(self): # class CatalanGrammar(Grammar): # a = String("a") # A = NULL | a # A |= A + A # GOAL = A # p = RobustParser(CatalanGrammar(), TopDownStrategy) # chart, tokens = p.parse_to_chart("a a a a") # trees = list(chart.trees(tokens, # all_trees=True, # goal=CatalanGrammar.GOAL)) # assert len(trees) == 5 <def_stmt>test_full_trees self<block_start><class_stmt>FullGrammar(Grammar)<block_start>a=String("a")<line_sep>b=String("a")<line_sep>GOAL=a+b|b+a|a+a|b+b<block_end>p=RobustParser(FullGrammar() TopDownStrategy)<line_sep>chart,tokens=p.parse_to_chart("a a")<line_sep>trees=list(chart.trees(tokens all_trees=<true> goal=FullGrammar.GOAL))<line_sep>print(chart)# test chart __str__() <assert_stmt>len(trees)<eq>4<block_end><def_stmt>test_lex_span self<block_start>parser=TestParser.parser<line_sep>test_str="please turn off the light once twice quickly"<line_sep>_=" 0 1 2 3 4 5 6 7"<line_sep>tree,result=parser.parse(test_str)<line_sep># turn off <assert_stmt>parser.chart.get_lexical_span(0)<eq>(1 3)<line_sep># light <assert_stmt>parser.chart.get_lexical_span(1)<eq>(4 5)<line_sep># once <assert_stmt>parser.chart.get_lexical_span(2)<eq>(5 6)<line_sep># twice <assert_stmt>parser.chart.get_lexical_span(3)<eq>(6 7)<line_sep># quickly <assert_stmt>parser.chart.get_lexical_span(4)<eq>(7 8)<line_sep># turn off (the) light <assert_stmt>parser.chart.get_lexical_span(0 2)<eq>(1 5)<assert_stmt>result.action<eq>"turn off"<assert_stmt>result.times<eq>[1 2]<assert_stmt>result.lex_span()<eq>(1 8)<assert_stmt>result.lex_span('action')<eq>(1 3)<assert_stmt>result.lex_span('times')<eq>[(5 6) (6 7)]<assert_stmt>result.lex_span('quick')<eq>(7 8)<block_end><block_end><class_stmt>TestHierarchicalParser(object)<block_start><class_stmt>LightGrammar(Grammar)<block_start>light=String("light").ignore()<line_sep>color=String("red").replace_result_with((255 0 0))<line_sep>action=Regex(r"(turn on|turn off|off|blink)")<line_sep>times=Regex(r"(once|twice|three times)")<line_sep>one_parse=action+Optional(color)+light+ZeroOrMore(times)<line_sep>GOAL=OneOrMore(one_parse)<block_end>light=LightGrammar()<line_sep>parser=RobustParser(light)<line_sep>test_str="blink the red light once turn off red the light twice"<line_sep>_=" 0 1 2 3 4 5 6 7 8 9 10 "<def_stmt>test_parse self<block_start>parser=TestHierarchicalParser.parser<line_sep>test_str=TestHierarchicalParser.test_str<line_sep>tree,result=parser.parse(test_str)<assert_stmt>len(result.one_parse)<eq>2<assert_stmt>result.lex_span()<eq>(0 11)<assert_stmt>result.one_parse[0].lex_span()<eq>(0 5)<assert_stmt>result.one_parse[1].lex_span()<eq>(5 11)<assert_stmt>result.one_parse[1].lex_span('action')<eq>(5 7)<block_end><block_end><def_stmt>test_topdown_init_rule <block_start><class_stmt>CornerGrammar(Grammar)<block_start>GOAL=String("a")+String("b")<block_end>p=RobustParser(CornerGrammar() TopDownStrategy)<assert_stmt>(<none> <none>)<eq>p.parse("b")<line_sep>t,_=p.parse("a b")<assert_stmt>t<is><not><none><assert_stmt>(<none> <none>)<eq>p.parse("b a")<block_end><class_stmt>TestOptional(object)<block_start><def_stmt>test_o2 self<block_start><class_stmt>OptionalGrammar(Grammar)<block_start>s=String("t")('t')<line_sep>o1=Optional(s)<line_sep>o2=Optional(o1)<line_sep>GOAL=s+o2<block_end>parser=RobustParser(OptionalGrammar() strategy=TopDownStrategy)<assert_stmt><true><eq>parser.print_parse("t t")<assert_stmt><true><eq>parser.print_parse("t")<line_sep>OptionalGrammar.o2.parse("t")<block_end><def_stmt>test_o3 self<block_start><class_stmt>OptionalGrammar(Grammar)<block_start>s=String("t")('t')<line_sep>o3=Optional(Optional(s))<line_sep>GOAL=s+o3<block_end>parser=RobustParser(OptionalGrammar() strategy=BottomUpStrategy)<assert_stmt><true><eq>parser.print_parse("t t")<assert_stmt><true><eq>parser.print_parse("t")<block_end><block_end><class_stmt>TestNullAnd(object)<block_start><def_stmt>test_o2 self<block_start><class_stmt>OptionalGrammar(Grammar)<block_start>s=String("t")('t')<line_sep>o1=Optional(s)<line_sep>o2=ZeroOrMore(s)<line_sep>o3=o1+o2<line_sep>GOAL=s+o3<block_end>parser=RobustParser(OptionalGrammar() strategy=TopDownStrategy)<assert_stmt><true><eq>parser.print_parse("t t")<line_sep># known bug, FIXME <assert_stmt><false><eq>parser.print_parse("t")<block_end><block_end><class_stmt>TestDocGrammar(object)<block_start><def_stmt>test_o2 self<block_start><class_stmt>LightGrammar(Grammar)<block_start>action=Set(['change' 'flash' 'set' 'blink'])<line_sep>light=Set(['top' 'middle' 'bottom'])<line_sep>color=Regex(r'(red|yellow|blue|orange|purple|...)')<line_sep>times=Set(['once' 'twice' 'three times'])|Regex(r'\d+ times')<line_sep>one_parse=action+light+Optional(times)+color<line_sep>GOAL=OneOrMore(one_parse)<block_end>parser=RobustParser(LightGrammar() strategy=TopDownStrategy)<line_sep># assert parser.print_parse("set my top light to red") # assert parser.print_parse("set my top light to red and change " # "middle light to yellow") # assert parser.print_parse("set my top light to red and change " # "middle light to yellow and flash bottom light twice in blue") sents=["set my top light to red" "set my top light to red and change middle light to yellow" "set my top light to red and change middle light to yellow and "<concat>"flash bottom light twice in blue"]<for_stmt>sent sents<block_start>tree,result=parser.parse_string(sent)<line_sep>print('"%s"'%sent)<line_sep>print("parse tree:")<line_sep>print(tree)<line_sep>print("parse result:")<line_sep>print(result)<assert_stmt>type(result.one_parse)<is>list<line_sep>print()<block_end><block_end><block_end><def_stmt>test_find_word_boundaries <block_start>boundaries,starts,ends=find_word_boundaries(strip_string("my lights are off"))<assert_stmt>boundaries<eq>[(0 2) (3 9) (10 13) (14 17)]<assert_stmt>[0 3 10 14]<eq>sorted(list(starts))<assert_stmt>[2 9 13 17]<eq>sorted(list(ends))<line_sep>boundaries,starts,ends=find_word_boundaries(strip_string(""))<assert_stmt>len(boundaries)<eq>0<assert_stmt>len(starts)<eq>0<assert_stmt>len(ends)<eq>0<block_end>
<import_from_stmt>fourier fourier<line_sep>
<import_stmt>itertools<as>it<import_stmt>logpy.core<as>lc<import_from_stmt>sympy.ntheory.generate prime isprime<line_sep># Check if the elements of x are prime <def_stmt>check_prime x<block_start><if_stmt>lc.isvar(x)<block_start><return>lc.condeseq([(lc.eq x p)]<for>p map(prime it.count(1)))<block_end><else_stmt><block_start><return>lc.success<if>isprime(x)<else>lc.fail<block_end><block_end># Declate the variable x=lc.var()<line_sep># Check if an element in the list is a prime number list_nums=(23 4 27 17 13 10 21 29 3 32 11 19)<line_sep>print('\nList of primes in the list:')<line_sep>print(set(lc.run(0 x (lc.membero x list_nums) (check_prime x))))<line_sep># Print first 7 prime numbers print('\nList of first 7 prime numbers:')<line_sep>print(lc.run(7 x check_prime(x)))<line_sep>