idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
42,800
def genkey ( key_name = None ) : if gnupg is None : return gpg_key_param_list = [ ( 'key_type' , 'DSA' ) , ( 'key_length' , '2048' ) , ( 'subkey_type' , 'ELG-E' ) , ( 'subkey_length' , '2048' ) , ( 'expire_date' , '0' ) , ] gpg = gnupg . GPG ( ) gpg . encoding = 'utf-8' pyspread_key_fingerprint = config [ "gpg_key_fingerprint" ] gpg_private_keys = [ key for key in gpg . list_keys ( secret = True ) if has_no_password ( key [ "keyid" ] ) ] gpg_private_fingerprints = [ key [ 'fingerprint' ] for key in gpg . list_keys ( secret = True ) if has_no_password ( key [ "keyid" ] ) ] pyspread_key = None for private_key , fingerprint in zip ( gpg_private_keys , gpg_private_fingerprints ) : if str ( pyspread_key_fingerprint ) == fingerprint : pyspread_key = private_key if gpg_private_keys : pyspread_key = choose_key ( gpg_private_keys ) if pyspread_key : config [ "gpg_key_fingerprint" ] = repr ( pyspread_key [ 'fingerprint' ] ) else : if key_name is None : gpg_key_parameters = get_key_params_from_user ( gpg_key_param_list ) if gpg_key_parameters is None : return else : gpg_key_param_list . append ( ( 'name_real' , '{key_name}' . format ( key_name = key_name ) ) ) gpg_key_parameters = dict ( gpg_key_param_list ) input_data = gpg . gen_key_input ( ** gpg_key_parameters ) if key_name is None : style = wx . ICON_INFORMATION | wx . DIALOG_NO_PARENT | wx . OK | wx . CANCEL pyspread_key_uid = gpg_key_parameters [ "name_real" ] short_message = _ ( "New GPG key" ) . format ( pyspread_key_uid ) message = _ ( "After confirming this dialog, a new GPG key " ) + _ ( "'{key}' will be generated." ) . format ( key = pyspread_key_uid ) + _ ( " \n \nThis may take some time.\nPlease wait." ) dlg = wx . MessageDialog ( None , message , short_message , style ) dlg . Centre ( ) if dlg . ShowModal ( ) == wx . ID_OK : dlg . Destroy ( ) gpg_key = gpg . gen_key ( input_data ) _register_key ( gpg_key , gpg ) fingerprint = gpg_key . fingerprint else : dlg . Destroy ( ) return else : gpg_key = gpg . gen_key ( input_data ) _register_key ( gpg_key , gpg ) fingerprint = gpg_key . fingerprint return fingerprint
Creates a new standard GPG key
42,801
def fingerprint2keyid ( fingerprint ) : if gnupg is None : return gpg = gnupg . GPG ( ) private_keys = gpg . list_keys ( True ) keyid = None for private_key in private_keys : if private_key [ 'fingerprint' ] == config [ "gpg_key_fingerprint" ] : keyid = private_key [ 'keyid' ] break return keyid
Returns keyid from fingerprint for private keys
42,802
def sign ( filename ) : if gnupg is None : return gpg = gnupg . GPG ( ) with open ( filename , "rb" ) as signfile : keyid = fingerprint2keyid ( config [ "gpg_key_fingerprint" ] ) if keyid is None : msg = "No private key for GPG fingerprint '{}'." raise ValueError ( msg . format ( config [ "gpg_key_fingerprint" ] ) ) signed_data = gpg . sign_file ( signfile , keyid = keyid , detach = True ) return signed_data
Returns detached signature for file
42,803
def verify ( sigfilename , filefilename = None ) : if gnupg is None : return False gpg = gnupg . GPG ( ) with open ( sigfilename , "rb" ) as sigfile : verified = gpg . verify_file ( sigfile , filefilename ) pyspread_keyid = fingerprint2keyid ( config [ "gpg_key_fingerprint" ] ) if verified . valid and verified . key_id == pyspread_keyid : return True return False
Verifies a signature returns True if successful else False .
42,804
def _len_table_cache ( self ) : length = 0 for table in self . _table_cache : length += len ( self . _table_cache [ table ] ) return length
Returns the length of the table cache
42,805
def _update_table_cache ( self ) : self . _table_cache . clear ( ) for sel , tab , val in self : try : self . _table_cache [ tab ] . append ( ( sel , val ) ) except KeyError : self . _table_cache [ tab ] = [ ( sel , val ) ] assert len ( self ) == self . _len_table_cache ( )
Clears and updates the table cache to be in sync with self
42,806
def get_merging_cell ( self , key ) : row , col , tab = key merge_area = self [ key ] [ "merge_area" ] if merge_area : return merge_area [ 0 ] , merge_area [ 1 ] , tab
Returns key of cell that merges the cell key
42,807
def _get_data ( self ) : data = { } data [ "shape" ] = self . shape data [ "grid" ] = { } . update ( self . dict_grid ) data [ "attributes" ] = [ ca for ca in self . cell_attributes ] data [ "row_heights" ] = self . row_heights data [ "col_widths" ] = self . col_widths data [ "macros" ] = self . macros return data
Returns dict of data content .
42,808
def _set_data ( self , ** kwargs ) : if "shape" in kwargs : self . shape = kwargs [ "shape" ] if "grid" in kwargs : self . dict_grid . clear ( ) self . dict_grid . update ( kwargs [ "grid" ] ) if "attributes" in kwargs : self . attributes [ : ] = kwargs [ "attributes" ] if "row_heights" in kwargs : self . row_heights = kwargs [ "row_heights" ] if "col_widths" in kwargs : self . col_widths = kwargs [ "col_widths" ] if "macros" in kwargs : self . macros = kwargs [ "macros" ]
Sets data from given parameters
42,809
def get_row_height ( self , row , tab ) : try : return self . row_heights [ ( row , tab ) ] except KeyError : return config [ "default_row_height" ]
Returns row height
42,810
def get_col_width ( self , col , tab ) : try : return self . col_widths [ ( col , tab ) ] except KeyError : return config [ "default_col_width" ]
Returns column width
42,811
def _set_shape ( self , shape ) : old_shape = self . shape deleted_cells = { } if any ( new_axis < old_axis for new_axis , old_axis in zip ( shape , old_shape ) ) : for key in self . dict_grid . keys ( ) : if any ( key_ele >= new_axis for key_ele , new_axis in zip ( key , shape ) ) : deleted_cells [ key ] = self . pop ( key ) self . dict_grid . shape = shape self . _adjust_rowcol ( 0 , 0 , 0 ) self . _adjust_cell_attributes ( 0 , 0 , 0 ) yield "_set_shape" self . shape = old_shape for key in deleted_cells : self [ key ] = deleted_cells [ key ]
Deletes all cells beyond new shape and sets dict_grid shape
42,812
def get_last_filled_cell ( self , table = None ) : maxrow = 0 maxcol = 0 for row , col , tab in self . dict_grid : if table is None or tab == table : maxrow = max ( row , maxrow ) maxcol = max ( col , maxcol ) return maxrow , maxcol , table
Returns key for the bottommost rightmost cell with content
42,813
def cell_array_generator ( self , key ) : for i , key_ele in enumerate ( key ) : if type ( key_ele ) is SliceType : slc_keys = xrange ( * key_ele . indices ( self . dict_grid . shape [ i ] ) ) key_list = list ( key ) key_list [ i ] = None has_subslice = any ( type ( ele ) is SliceType for ele in key_list ) for slc_key in slc_keys : key_list [ i ] = slc_key if has_subslice : yield self . cell_array_generator ( key_list ) else : yield self [ tuple ( key_list ) ] break
Generator traversing cells specified in key
42,814
def _shift_rowcol ( self , insertion_point , no_to_insert ) : new_row_heights = { } del_row_heights = [ ] for row , tab in self . row_heights : if tab > insertion_point : new_row_heights [ ( row , tab + no_to_insert ) ] = self . row_heights [ ( row , tab ) ] del_row_heights . append ( ( row , tab ) ) for row , tab in new_row_heights : self . set_row_height ( row , tab , new_row_heights [ ( row , tab ) ] ) for row , tab in del_row_heights : if ( row , tab ) not in new_row_heights : self . set_row_height ( row , tab , None ) new_col_widths = { } del_col_widths = [ ] for col , tab in self . col_widths : if tab > insertion_point : new_col_widths [ ( col , tab + no_to_insert ) ] = self . col_widths [ ( col , tab ) ] del_col_widths . append ( ( col , tab ) ) for col , tab in new_col_widths : self . set_col_width ( col , tab , new_col_widths [ ( col , tab ) ] ) for col , tab in del_col_widths : if ( col , tab ) not in new_col_widths : self . set_col_width ( col , tab , None )
Shifts row and column sizes when a table is inserted or deleted
42,815
def _get_adjusted_merge_area ( self , attrs , insertion_point , no_to_insert , axis ) : assert axis in range ( 2 ) if "merge_area" not in attrs or attrs [ "merge_area" ] is None : return top , left , bottom , right = attrs [ "merge_area" ] selection = Selection ( [ ( top , left ) ] , [ ( bottom , right ) ] , [ ] , [ ] , [ ] ) selection . insert ( insertion_point , no_to_insert , axis ) __top , __left = selection . block_tl [ 0 ] __bottom , __right = selection . block_br [ 0 ] rows , cols , tabs = self . shape if __top < 0 and __bottom < 0 or __top >= rows and __bottom >= rows or __left < 0 and __right < 0 or __left >= cols and __right >= cols : return if __top < 0 : __top = 0 if __top >= rows : __top = rows - 1 if __bottom < 0 : __bottom = 0 if __bottom >= rows : __bottom = rows - 1 if __left < 0 : __left = 0 if __left >= cols : __left = cols - 1 if __right < 0 : __right = 0 if __right >= cols : __right = cols - 1 return __top , __left , __bottom , __right
Returns updated merge area
42,816
def set_row_height ( self , row , tab , height ) : try : old_height = self . row_heights . pop ( ( row , tab ) ) except KeyError : old_height = None if height is not None : self . row_heights [ ( row , tab ) ] = float ( height )
Sets row height
42,817
def set_col_width ( self , col , tab , width ) : try : old_width = self . col_widths . pop ( ( col , tab ) ) except KeyError : old_width = None if width is not None : self . col_widths [ ( col , tab ) ] = float ( width )
Sets column width
42,818
def _make_nested_list ( self , gen ) : res = [ ] for ele in gen : if ele is None : res . append ( None ) elif not is_string_like ( ele ) and is_generator_like ( ele ) : res . append ( self . _make_nested_list ( ele ) ) else : res . append ( ele ) return res
Makes nested list from generator for creating numpy . array
42,819
def _get_assignment_target_end ( self , ast_module ) : if len ( ast_module . body ) > 1 : raise ValueError ( "More than one expression or assignment." ) elif len ( ast_module . body ) > 0 and type ( ast_module . body [ 0 ] ) is ast . Assign : if len ( ast_module . body [ 0 ] . targets ) != 1 : raise ValueError ( "More than one assignment target." ) else : return len ( ast_module . body [ 0 ] . targets [ 0 ] . id ) return - 1
Returns position of 1st char after assignment traget .
42,820
def _get_updated_environment ( self , env_dict = None ) : if env_dict is None : env_dict = { 'S' : self } env = globals ( ) . copy ( ) env . update ( env_dict ) return env
Returns globals environment with magic variable
42,821
def _eval_cell ( self , key , code ) : def nn ( val ) : try : return numpy . array ( filter ( None , val . flat ) ) except AttributeError : return numpy . array ( filter ( None , val ) ) env_dict = { 'X' : key [ 0 ] , 'Y' : key [ 1 ] , 'Z' : key [ 2 ] , 'bz2' : bz2 , 'base64' : base64 , 'charts' : charts , 'nn' : nn , 'R' : key [ 0 ] , 'C' : key [ 1 ] , 'T' : key [ 2 ] , 'S' : self , 'vlcpanel_factory' : vlcpanel_factory } env = self . _get_updated_environment ( env_dict = env_dict ) if self . safe_mode : return code if code is None : return elif is_generator_like ( code ) : return numpy . array ( self . _make_nested_list ( code ) , dtype = "O" ) try : assignment_target_error = None module = ast . parse ( code ) assignment_target_end = self . _get_assignment_target_end ( module ) except ValueError , err : assignment_target_error = ValueError ( err ) except AttributeError , err : assignment_target_error = AttributeError ( err ) except Exception , err : assignment_target_error = Exception ( err ) if assignment_target_error is None and assignment_target_end != - 1 : glob_var = code [ : assignment_target_end ] expression = code . split ( "=" , 1 ) [ 1 ] expression = expression . strip ( ) self . result_cache . clear ( ) else : glob_var = None expression = code if assignment_target_error is not None : result = assignment_target_error else : try : import signal signal . signal ( signal . SIGALRM , self . handler ) signal . alarm ( config [ "timeout" ] ) except : pass try : result = eval ( expression , env , { } ) except AttributeError , err : result = AttributeError ( err ) except RuntimeError , err : result = RuntimeError ( err ) except Exception , err : result = Exception ( err ) finally : try : signal . alarm ( 0 ) except : pass if glob_var is not None : globals ( ) . update ( { glob_var : result } ) return result
Evaluates one cell and returns its result
42,822
def pop ( self , key ) : try : self . result_cache . pop ( repr ( key ) ) except KeyError : pass return DataArray . pop ( self , key )
Pops dict_grid with undo and redo support
42,823
def reload_modules ( self ) : import src . lib . charts as charts from src . gui . grid_panels import vlcpanel_factory modules = [ charts , bz2 , base64 , re , ast , sys , wx , numpy , datetime ] for module in modules : reload ( module )
Reloads modules that are available in cells
42,824
def clear_globals ( self ) : base_keys = [ 'cStringIO' , 'IntType' , 'KeyValueStore' , 'undoable' , 'is_generator_like' , 'is_string_like' , 'bz2' , 'base64' , '__package__' , 're' , 'config' , '__doc__' , 'SliceType' , 'CellAttributes' , 'product' , 'ast' , '__builtins__' , '__file__' , 'charts' , 'sys' , 'is_slice_like' , '__name__' , 'copy' , 'imap' , 'wx' , 'ifilter' , 'Selection' , 'DictGrid' , 'numpy' , 'CodeArray' , 'DataArray' , 'datetime' , 'vlcpanel_factory' ] for key in globals ( ) . keys ( ) : if key not in base_keys : globals ( ) . pop ( key )
Clears all newly assigned globals
42,825
def execute_macros ( self ) : if self . safe_mode : return '' , "Safe mode activated. Code not executed." self . macros = self . macros . replace ( '\r\n' , '\n' ) globals ( ) . update ( self . _get_updated_environment ( ) ) code_out = cStringIO . StringIO ( ) code_err = cStringIO . StringIO ( ) err_msg = cStringIO . StringIO ( ) sys . stdout = code_out sys . stderr = code_err try : import signal signal . signal ( signal . SIGALRM , self . handler ) signal . alarm ( config [ "timeout" ] ) except : pass try : exec ( self . macros , globals ( ) ) try : signal . alarm ( 0 ) except : pass except Exception : from traceback import print_exception from src . lib . exception_handling import get_user_codeframe exc_info = sys . exc_info ( ) user_tb = get_user_codeframe ( exc_info [ 2 ] ) or exc_info [ 2 ] print_exception ( exc_info [ 0 ] , exc_info [ 1 ] , user_tb , None , err_msg ) sys . stdout = sys . __stdout__ sys . stderr = sys . __stderr__ results = code_out . getvalue ( ) errs = code_err . getvalue ( ) + err_msg . getvalue ( ) code_out . close ( ) code_err . close ( ) self . result_cache . clear ( ) self . frozen_cache . clear ( ) return results , errs
Executes all macros and returns result string
42,826
def _sorted_keys ( self , keys , startkey , reverse = False ) : tuple_key = lambda t : t [ : : - 1 ] if reverse : tuple_cmp = lambda t : t [ : : - 1 ] > startkey [ : : - 1 ] else : tuple_cmp = lambda t : t [ : : - 1 ] < startkey [ : : - 1 ] searchkeys = sorted ( keys , key = tuple_key , reverse = reverse ) searchpos = sum ( 1 for _ in ifilter ( tuple_cmp , searchkeys ) ) searchkeys = searchkeys [ searchpos : ] + searchkeys [ : searchpos ] for key in searchkeys : yield key
Generator that yields sorted keys starting with startkey
42,827
def findnextmatch ( self , startkey , find_string , flags , search_result = True ) : assert "UP" in flags or "DOWN" in flags assert not ( "UP" in flags and "DOWN" in flags ) if search_result : def is_matching ( key , find_string , flags ) : code = self ( key ) if self . string_match ( code , find_string , flags ) is not None : return True else : res_str = unicode ( self [ key ] ) return self . string_match ( res_str , find_string , flags ) is not None else : def is_matching ( code , find_string , flags ) : code = self ( key ) return self . string_match ( code , find_string , flags ) is not None reverse = "UP" in flags for key in self . _sorted_keys ( self . keys ( ) , startkey , reverse = reverse ) : try : if is_matching ( key , find_string , flags ) : return key except Exception : pass
Returns a tuple with the position of the next match of find_string
42,828
def Validate ( self , win ) : val = self . GetWindow ( ) . GetValue ( ) for x in val : if x not in string . digits : return False return True
Returns True if Value in digits False otherwise
42,829
def OnChar ( self , event ) : key = event . GetKeyCode ( ) if key < wx . WXK_SPACE or key == wx . WXK_DELETE or key > 255 or chr ( key ) in string . digits : event . Skip ( )
Eats event if key not in digits
42,830
def Draw ( self , grid , attr , dc , rect , row , col , is_selected ) : render = wx . RendererNative . Get ( ) dc . SetBackgroundMode ( wx . SOLID ) if is_selected : dc . SetBrush ( wx . Brush ( wx . BLUE , wx . SOLID ) ) dc . SetPen ( wx . Pen ( wx . BLUE , 1 , wx . SOLID ) ) else : dc . SetBrush ( wx . Brush ( wx . WHITE , wx . SOLID ) ) dc . SetPen ( wx . Pen ( wx . WHITE , 1 , wx . SOLID ) ) dc . DrawRectangleRect ( rect ) cb_lbl = grid . GetCellValue ( row , col ) string_x = rect . x + 2 string_y = rect . y + 2 dc . DrawText ( cb_lbl , string_x , string_y ) button_x = rect . x + rect . width - self . iconwidth button_y = rect . y button_width = self . iconwidth button_height = rect . height button_size = button_x , button_y , button_width , button_height render . DrawComboBoxDropButton ( grid , dc , button_size , wx . CONTROL_CURRENT )
Draws the text and the combobox icon
42,831
def _setup_param_widgets ( self ) : for parameter in self . csv_params : pname , ptype , plabel , phelp = parameter label = wx . StaticText ( self . parent , - 1 , plabel ) widget = self . type2widget [ ptype ] ( self . parent ) if pname in self . choices : widget . AppendItems ( self . choices [ pname ] ) widget . SetValue = widget . Select widget . SetSelection ( 0 ) if ptype is types . StringType or ptype is types . UnicodeType : event_type = wx . EVT_TEXT elif ptype is types . BooleanType : event_type = wx . EVT_CHECKBOX else : event_type = wx . EVT_CHOICE handler = getattr ( self , self . widget_handlers [ pname ] ) self . parent . Bind ( event_type , handler , widget ) label . SetToolTipString ( phelp ) widget . SetToolTipString ( phelp ) label . __name__ = wx . StaticText . __name__ . lower ( ) widget . __name__ = self . type2widget [ ptype ] . __name__ . lower ( ) self . param_labels . append ( label ) self . param_widgets . append ( widget ) self . __setattr__ ( "_" . join ( [ label . __name__ , pname ] ) , label ) self . __setattr__ ( "_" . join ( [ widget . __name__ , pname ] ) , widget )
Creates the parameter entry widgets and binds them to methods
42,832
def _do_layout ( self ) : sizer_csvoptions = wx . FlexGridSizer ( 5 , 4 , 5 , 5 ) leftpos = wx . LEFT | wx . ADJUST_MINSIZE rightpos = wx . RIGHT | wx . EXPAND current_label_margin = 0 other_label_margin = 15 for label , widget in zip ( self . param_labels , self . param_widgets ) : sizer_csvoptions . Add ( label , 0 , leftpos , current_label_margin ) sizer_csvoptions . Add ( widget , 0 , rightpos , current_label_margin ) current_label_margin , other_label_margin = other_label_margin , current_label_margin sizer_csvoptions . AddGrowableCol ( 1 ) sizer_csvoptions . AddGrowableCol ( 3 ) self . sizer_csvoptions = sizer_csvoptions
Sizer hell returns a sizer that contains all widgets
42,833
def _update_settings ( self , dialect ) : for parameter in self . csv_params [ 2 : ] : pname , ptype , plabel , phelp = parameter widget = self . _widget_from_p ( pname , ptype ) if ptype is types . TupleType : ptype = types . ObjectType digest = Digest ( acceptable_types = [ ptype ] ) if pname == 'self.has_header' : if self . has_header is not None : widget . SetValue ( digest ( self . has_header ) ) else : value = getattr ( dialect , pname ) widget . SetValue ( digest ( value ) )
Sets the widget settings to those of the chosen dialect
42,834
def _widget_from_p ( self , pname , ptype ) : widget_name = self . type2widget [ ptype ] . __name__ . lower ( ) widget_name = "_" . join ( [ widget_name , pname ] ) return getattr ( self , widget_name )
Returns a widget from its ptype and pname
42,835
def OnDialectChoice ( self , event ) : dialect_name = event . GetString ( ) value = list ( self . choices [ 'dialects' ] ) . index ( dialect_name ) if dialect_name == 'sniffer' : if self . csvfilepath is None : event . Skip ( ) return None dialect , self . has_header = sniff ( self . csvfilepath ) elif dialect_name == 'user' : event . Skip ( ) return None else : dialect = csv . get_dialect ( dialect_name ) self . _update_settings ( dialect ) self . choice_dialects . SetValue ( value )
Updates all param widgets confirming to the selcted dialect
42,836
def OnWidget ( self , event ) : self . choice_dialects . SetValue ( len ( self . choices [ 'dialects' ] ) - 1 ) event . Skip ( )
Update the dialect widget to user
42,837
def get_dialect ( self ) : parameters = { } for parameter in self . csv_params [ 2 : ] : pname , ptype , plabel , phelp = parameter widget = self . _widget_from_p ( pname , ptype ) if ptype is types . StringType or ptype is types . UnicodeType : parameters [ pname ] = str ( widget . GetValue ( ) ) elif ptype is types . BooleanType : parameters [ pname ] = widget . GetValue ( ) elif pname == 'quoting' : choice = self . choices [ 'quoting' ] [ widget . GetSelection ( ) ] parameters [ pname ] = getattr ( csv , choice ) else : raise TypeError ( _ ( "{type} unknown." ) . format ( type = ptype ) ) has_header = parameters . pop ( "self.has_header" ) try : csv . register_dialect ( 'user' , ** parameters ) except TypeError , err : msg = _ ( "The dialect is invalid. \n " "\nError message:\n{msg}" ) . format ( msg = err ) dlg = wx . MessageDialog ( self . parent , msg , style = wx . ID_CANCEL ) dlg . ShowModal ( ) dlg . Destroy ( ) raise TypeError ( err ) return csv . get_dialect ( 'user' ) , has_header
Returns a new dialect that implements the current selection
42,838
def OnMouse ( self , event ) : self . SetGridCursor ( event . Row , event . Col ) self . EnableCellEditControl ( True ) event . Skip ( )
Reduces clicks to enter an edit control
42,839
def OnGridEditorCreated ( self , event ) : editor = event . GetControl ( ) editor . Bind ( wx . EVT_KILL_FOCUS , self . OnGridEditorClosed ) event . Skip ( )
Used to capture Editor close events
42,840
def OnGridEditorClosed ( self , event ) : try : dialect , self . has_header = self . parent . csvwidgets . get_dialect ( ) except TypeError : event . Skip ( ) return 0 self . fill_cells ( dialect , self . has_header , choices = False )
Event handler for end of output type choice
42,841
def get_digest_keys ( self ) : digest_keys = [ ] for col in xrange ( self . GetNumberCols ( ) ) : digest_key = self . GetCellValue ( self . has_header , col ) if digest_key == "" : digest_key = self . digest_types . keys ( ) [ 0 ] digest_keys . append ( digest_key ) return digest_keys
Returns a list of the type choices
42,842
def OnButtonApply ( self , event ) : try : dialect , self . has_header = self . csvwidgets . get_dialect ( ) except TypeError : event . Skip ( ) return 0 self . preview_textctrl . fill ( data = self . data , dialect = dialect ) event . Skip ( )
Updates the preview_textctrl
42,843
def _set_properties ( self ) : self . codetext_ctrl . SetToolTipString ( _ ( "Enter python code here." ) ) self . apply_button . SetToolTipString ( _ ( "Apply changes to current macro" ) ) self . splitter . SetBackgroundStyle ( wx . BG_STYLE_COLOUR ) self . result_ctrl . SetMinSize ( ( 10 , 10 ) )
Setup title size and tooltips
42,844
def OnApply ( self , event ) : try : ast . parse ( self . macros ) except : s = StringIO ( ) e = exc_info ( ) usr_tb = get_user_codeframe ( e [ 2 ] ) or None print_exception ( e [ 0 ] , e [ 1 ] , usr_tb , None , s ) post_command_event ( self . parent , self . MacroErrorMsg , err = s . getvalue ( ) ) success = False else : self . result_ctrl . SetValue ( '' ) post_command_event ( self . parent , self . MacroReplaceMsg , macros = self . macros ) post_command_event ( self . parent , self . MacroExecuteMsg ) success = True event . Skip ( ) return success
Event handler for Apply button
42,845
def update_result_ctrl ( self , event ) : if not self : return printLen = 0 self . result_ctrl . SetValue ( '' ) if hasattr ( event , 'msg' ) : self . result_ctrl . AppendText ( event . msg ) printLen = len ( event . msg ) if hasattr ( event , 'err' ) : errLen = len ( event . err ) errStyle = wx . TextAttr ( wx . RED ) self . result_ctrl . AppendText ( event . err ) self . result_ctrl . SetStyle ( printLen , printLen + errLen , errStyle ) if not hasattr ( event , 'err' ) or event . err == '' : if self . _ok_pressed : self . Destroy ( ) self . _ok_pressed = False
Update event result following execution by main window
42,846
def _ondim ( self , dimension , valuestring ) : try : self . dimensions [ dimension ] = int ( valuestring ) except ValueError : self . dimensions [ dimension ] = 1 self . textctrls [ dimension ] . SetValue ( str ( 1 ) ) if self . dimensions [ dimension ] < 1 : self . dimensions [ dimension ] = 1 self . textctrls [ dimension ] . SetValue ( str ( 1 ) )
Converts valuestring to int and assigns result to self . dim
42,847
def OnOk ( self , event ) : key_strings = [ self . row_textctrl . GetValue ( ) , self . col_textctrl . GetValue ( ) , self . tab_textctrl . GetValue ( ) ] key = [ ] for key_string in key_strings : try : key . append ( int ( key_string ) ) except ValueError : key . append ( 0 ) post_command_event ( self . parent , self . GotoCellMsg , key = tuple ( key ) )
Posts a command event that makes the grid show the entered cell
42,848
def _set_properties ( self ) : self . SetTitle ( _ ( "About pyspread" ) ) label = _ ( "pyspread {version}\nCopyright Martin Manns" ) label = label . format ( version = VERSION ) self . about_label . SetLabel ( label )
Setup title and label
42,849
def get_max_dim ( self , obj ) : try : iter ( obj ) except TypeError : return 0 try : for o in obj : iter ( o ) break except TypeError : return 1 return 2
Returns maximum dimensionality over which obj is iterable < = 2
42,850
def alter ( self , operation , timeout = None , metadata = None , credentials = None ) : return self . stub . Alter ( operation , timeout = timeout , metadata = metadata , credentials = credentials )
Runs alter operation .
42,851
def query ( self , req , timeout = None , metadata = None , credentials = None ) : return self . stub . Query ( req , timeout = timeout , metadata = metadata , credentials = credentials )
Runs query operation .
42,852
def mutate ( self , mutation , timeout = None , metadata = None , credentials = None ) : return self . stub . Mutate ( mutation , timeout = timeout , metadata = metadata , credentials = credentials )
Runs mutate operation .
42,853
def commit_or_abort ( self , ctx , timeout = None , metadata = None , credentials = None ) : return self . stub . CommitOrAbort ( ctx , timeout = timeout , metadata = metadata , credentials = credentials )
Runs commit or abort operation .
42,854
def check_version ( self , check , timeout = None , metadata = None , credentials = None ) : return self . stub . CheckVersion ( check , timeout = timeout , metadata = metadata , credentials = credentials )
Returns the version of the Dgraph instance .
42,855
def alter ( self , operation , timeout = None , metadata = None , credentials = None ) : new_metadata = self . add_login_metadata ( metadata ) try : return self . any_client ( ) . alter ( operation , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : if util . is_jwt_expired ( error ) : self . retry_login ( ) new_metadata = self . add_login_metadata ( metadata ) return self . any_client ( ) . alter ( operation , timeout = timeout , metadata = new_metadata , credentials = credentials ) else : raise error
Runs a modification via this client .
42,856
def txn ( self , read_only = False , best_effort = False ) : return txn . Txn ( self , read_only = read_only , best_effort = best_effort )
Creates a transaction .
42,857
def query ( self , query , variables = None , timeout = None , metadata = None , credentials = None ) : new_metadata = self . _dg . add_login_metadata ( metadata ) req = self . _common_query ( query , variables = variables ) try : res = self . _dc . query ( req , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : if util . is_jwt_expired ( error ) : self . _dg . retry_login ( ) new_metadata = self . _dg . add_login_metadata ( metadata ) res = self . _dc . query ( req , timeout = timeout , metadata = new_metadata , credentials = credentials ) else : raise error self . merge_context ( res . txn ) return res
Adds a query operation to the transaction .
42,858
def mutate ( self , mutation = None , set_obj = None , del_obj = None , set_nquads = None , del_nquads = None , commit_now = None , ignore_index_conflict = None , timeout = None , metadata = None , credentials = None ) : mutation = self . _common_mutate ( mutation = mutation , set_obj = set_obj , del_obj = del_obj , set_nquads = set_nquads , del_nquads = del_nquads , commit_now = commit_now , ignore_index_conflict = ignore_index_conflict ) new_metadata = self . _dg . add_login_metadata ( metadata ) mutate_error = None try : assigned = self . _dc . mutate ( mutation , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : if util . is_jwt_expired ( error ) : self . _dg . retry_login ( ) new_metadata = self . _dg . add_login_metadata ( metadata ) try : assigned = self . _dc . mutate ( mutation , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : mutate_error = error else : mutate_error = error if mutate_error is not None : try : self . discard ( timeout = timeout , metadata = metadata , credentials = credentials ) except : pass self . _common_except_mutate ( mutate_error ) if mutation . commit_now : self . _finished = True self . merge_context ( assigned . context ) return assigned
Adds a mutate operation to the transaction .
42,859
def commit ( self , timeout = None , metadata = None , credentials = None ) : if not self . _common_commit ( ) : return new_metadata = self . _dg . add_login_metadata ( metadata ) try : self . _dc . commit_or_abort ( self . _ctx , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : if util . is_jwt_expired ( error ) : self . _dg . retry_login ( ) new_metadata = self . _dg . add_login_metadata ( metadata ) try : self . _dc . commit_or_abort ( self . _ctx , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : return self . _common_except_commit ( error ) self . _common_except_commit ( error )
Commits the transaction .
42,860
def discard ( self , timeout = None , metadata = None , credentials = None ) : if not self . _common_discard ( ) : return new_metadata = self . _dg . add_login_metadata ( metadata ) try : self . _dc . commit_or_abort ( self . _ctx , timeout = timeout , metadata = new_metadata , credentials = credentials ) except Exception as error : if util . is_jwt_expired ( error ) : self . _dg . retry_login ( ) new_metadata = self . _dg . add_login_metadata ( metadata ) self . _dc . commit_or_abort ( self . _ctx , timeout = timeout , metadata = new_metadata , credentials = credentials ) else : raise error
Discards the transaction .
42,861
def merge_context ( self , src = None ) : if src is None : return if self . _ctx . start_ts == 0 : self . _ctx . start_ts = src . start_ts elif self . _ctx . start_ts != src . start_ts : raise Exception ( 'StartTs mismatch' ) self . _ctx . keys . extend ( src . keys ) self . _ctx . preds . extend ( src . preds )
Merges context from this instance with src .
42,862
def generate_token ( self , * args , ** kwargs ) : length = random . randint ( self . min_length , self . max_length ) return binascii . hexlify ( os . urandom ( self . max_length ) ) . decode ( ) [ 0 : length ]
generates a pseudo random code using os . urandom and binascii . hexlify
42,863
def get_text ( self , node ) : try : return node . children [ 0 ] . content or "" except ( AttributeError , IndexError ) : return node . content or ""
Try to emit whatever text is in the node .
42,864
def emit_children ( self , node ) : return "" . join ( [ self . emit_node ( child ) for child in node . children ] )
Emit all the children of a node .
42,865
def emit_node ( self , node ) : emit = getattr ( self , "%s_emit" % node . kind , self . default_emit ) return emit ( node )
Emit a single node .
42,866
def ajax_preview ( request , ** kwargs ) : data = { "html" : render_to_string ( "pinax/blog/_preview.html" , { "content" : parse ( request . POST . get ( "markup" ) ) } ) } return JsonResponse ( data )
Currently only supports markdown
42,867
def set_system_lock ( cls , redis , name , timeout ) : pipeline = redis . pipeline ( ) pipeline . zadd ( name , SYSTEM_LOCK_ID , time . time ( ) + timeout ) pipeline . expire ( name , timeout + 10 ) pipeline . execute ( )
Set system lock for the semaphore .
42,868
def acquire ( self ) : acquired , locks = self . _semaphore ( keys = [ self . name ] , args = [ self . lock_id , self . max_locks , self . timeout , time . time ( ) ] ) acquired = True if acquired == 1 else False return acquired , locks
Obtain a semaphore lock .
42,869
def renew ( self , new_timeout ) : if self . local . token is None : raise LockError ( "Cannot extend an unlocked lock" ) if self . timeout is None : raise LockError ( "Cannot extend a lock with no timeout" ) return self . do_renew ( new_timeout )
Sets a new timeout for an already acquired lock .
42,870
def task ( self , _fn = None , queue = None , hard_timeout = None , unique = None , lock = None , lock_key = None , retry = None , retry_on = None , retry_method = None , schedule = None , batch = False , max_queue_size = None ) : def _delay ( func ) : def _delay_inner ( * args , ** kwargs ) : return self . delay ( func , args = args , kwargs = kwargs ) return _delay_inner if schedule is not None : unique = True def _wrap ( func ) : if hard_timeout is not None : func . _task_hard_timeout = hard_timeout if queue is not None : func . _task_queue = queue if unique is not None : func . _task_unique = unique if lock is not None : func . _task_lock = lock if lock_key is not None : func . _task_lock_key = lock_key if retry is not None : func . _task_retry = retry if retry_on is not None : func . _task_retry_on = retry_on if retry_method is not None : func . _task_retry_method = retry_method if batch is not None : func . _task_batch = batch if schedule is not None : func . _task_schedule = schedule if max_queue_size is not None : func . _task_max_queue_size = max_queue_size func . delay = _delay ( func ) if schedule is not None : serialized_func = serialize_func_name ( func ) assert serialized_func not in self . periodic_task_funcs , "attempted duplicate registration of periodic task" self . periodic_task_funcs [ serialized_func ] = func return func return _wrap if _fn is None else _wrap ( _fn )
Function decorator that defines the behavior of the function when it is used as a task . To use the default behavior tasks don t need to be decorated .
42,871
def run_worker ( self , queues = None , module = None , exclude_queues = None , max_workers_per_queue = None , store_tracebacks = None ) : try : module_names = module or '' for module_name in module_names . split ( ',' ) : module_name = module_name . strip ( ) if module_name : importlib . import_module ( module_name ) self . log . debug ( 'imported module' , module_name = module_name ) worker = Worker ( self , queues . split ( ',' ) if queues else None , exclude_queues . split ( ',' ) if exclude_queues else None , max_workers_per_queue = max_workers_per_queue , store_tracebacks = store_tracebacks ) worker . run ( ) except Exception : self . log . exception ( 'Unhandled exception' ) raise
Main worker entry point method .
42,872
def delay ( self , func , args = None , kwargs = None , queue = None , hard_timeout = None , unique = None , lock = None , lock_key = None , when = None , retry = None , retry_on = None , retry_method = None , max_queue_size = None ) : task = Task ( self , func , args = args , kwargs = kwargs , queue = queue , hard_timeout = hard_timeout , unique = unique , lock = lock , lock_key = lock_key , retry = retry , retry_on = retry_on , retry_method = retry_method ) task . delay ( when = when , max_queue_size = max_queue_size ) return task
Queues a task . See README . rst for an explanation of the options .
42,873
def get_queue_sizes ( self , queue ) : states = [ QUEUED , SCHEDULED , ACTIVE ] pipeline = self . connection . pipeline ( ) for state in states : pipeline . zcard ( self . _key ( state , queue ) ) results = pipeline . execute ( ) return dict ( zip ( states , results ) )
Get the queue s number of tasks in each state .
42,874
def get_queue_system_lock ( self , queue ) : key = self . _key ( LOCK_REDIS_KEY , queue ) return Semaphore . get_system_lock ( self . connection , key )
Get system lock timeout
42,875
def set_queue_system_lock ( self , queue , timeout ) : key = self . _key ( LOCK_REDIS_KEY , queue ) Semaphore . set_system_lock ( self . connection , key , timeout )
Set system lock on a queue .
42,876
def _install_signal_handlers ( self ) : def request_stop ( signum , frame ) : self . _stop_requested = True self . log . info ( 'stop requested, waiting for task to finish' ) signal . signal ( signal . SIGINT , request_stop ) signal . signal ( signal . SIGTERM , request_stop )
Sets up signal handlers for safely stopping the worker .
42,877
def _uninstall_signal_handlers ( self ) : signal . signal ( signal . SIGINT , signal . SIG_DFL ) signal . signal ( signal . SIGTERM , signal . SIG_DFL )
Restores default signal handlers .
42,878
def _filter_queues ( self , queues ) : def match ( queue ) : for part in reversed_dotted_parts ( queue ) : if part in self . exclude_queues : return False if part in self . only_queues : return True return not self . only_queues return [ q for q in queues if match ( q ) ]
Applies the queue filter to the given list of queues and returns the queues that match . Note that a queue name matches any subqueues starting with the name followed by a date . For example foo will match both foo and foo . bar .
42,879
def _worker_queue_scheduled_tasks ( self ) : queues = set ( self . _filter_queues ( self . connection . smembers ( self . _key ( SCHEDULED ) ) ) ) now = time . time ( ) for queue in queues : result = self . scripts . zpoppush ( self . _key ( SCHEDULED , queue ) , self . _key ( QUEUED , queue ) , self . config [ 'SCHEDULED_TASK_BATCH_SIZE' ] , now , now , if_exists = ( 'noupdate' , ) , on_success = ( 'update_sets' , queue , self . _key ( SCHEDULED ) , self . _key ( QUEUED ) ) , ) self . log . debug ( 'scheduled tasks' , queue = queue , qty = len ( result ) ) if result : self . connection . publish ( self . _key ( 'activity' ) , queue ) self . _did_work = True
Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution . This should be called periodically .
42,880
def _wait_for_new_tasks ( self , timeout = 0 , batch_timeout = 0 ) : new_queue_found = False start_time = batch_exit = time . time ( ) while True : if batch_exit > start_time : pubsub_sleep = batch_exit - time . time ( ) else : pubsub_sleep = start_time + timeout - time . time ( ) message = self . _pubsub . get_message ( timeout = 0 if pubsub_sleep < 0 or self . _did_work else pubsub_sleep ) while message : if message [ 'type' ] == 'message' : new_queue_found , batch_exit = self . _process_queue_message ( message [ 'data' ] , new_queue_found , batch_exit , start_time , timeout , batch_timeout ) message = self . _pubsub . get_message ( ) if self . _did_work : break elif time . time ( ) >= batch_exit and new_queue_found : break elif time . time ( ) - start_time > timeout : break
Check activity channel and wait as necessary .
42,881
def _execute_forked ( self , tasks , log ) : success = False execution = { } assert len ( tasks ) task_func = tasks [ 0 ] . serialized_func assert all ( [ task_func == task . serialized_func for task in tasks [ 1 : ] ] ) execution [ 'time_started' ] = time . time ( ) exc = None exc_info = None try : func = tasks [ 0 ] . func is_batch_func = getattr ( func , '_task_batch' , False ) g [ 'current_task_is_batch' ] = is_batch_func if is_batch_func : params = [ { 'args' : task . args , 'kwargs' : task . kwargs , } for task in tasks ] task_timeouts = [ task . hard_timeout for task in tasks if task . hard_timeout is not None ] hard_timeout = ( ( max ( task_timeouts ) if task_timeouts else None ) or getattr ( func , '_task_hard_timeout' , None ) or self . config [ 'DEFAULT_HARD_TIMEOUT' ] ) g [ 'current_tasks' ] = tasks with UnixSignalDeathPenalty ( hard_timeout ) : func ( params ) else : for task in tasks : hard_timeout = ( task . hard_timeout or getattr ( func , '_task_hard_timeout' , None ) or self . config [ 'DEFAULT_HARD_TIMEOUT' ] ) g [ 'current_tasks' ] = [ task ] with UnixSignalDeathPenalty ( hard_timeout ) : func ( * task . args , ** task . kwargs ) except RetryException as exc : execution [ 'retry' ] = True if exc . method : execution [ 'retry_method' ] = serialize_retry_method ( exc . method ) execution [ 'log_error' ] = exc . log_error execution [ 'exception_name' ] = serialize_func_name ( exc . __class__ ) exc_info = exc . exc_info or sys . exc_info ( ) except ( JobTimeoutException , Exception ) as exc : execution [ 'exception_name' ] = serialize_func_name ( exc . __class__ ) exc_info = sys . exc_info ( ) else : success = True if not success : execution [ 'time_failed' ] = time . time ( ) if self . store_tracebacks : execution [ 'traceback' ] = '' . join ( traceback . format_exception ( * exc_info ) ) execution [ 'success' ] = success execution [ 'host' ] = socket . gethostname ( ) serialized_execution = json . dumps ( execution ) for task in tasks : self . connection . rpush ( self . _key ( 'task' , task . id , 'executions' ) , serialized_execution ) return success
Executes the tasks in the forked process . Multiple tasks can be passed for batch processing . However they must all use the same function and will share the execution entry .
42,882
def _get_queue_batch_size ( self , queue ) : batch_queues = self . config [ 'BATCH_QUEUES' ] batch_size = 1 for part in dotted_parts ( queue ) : if part in batch_queues : batch_size = batch_queues [ part ] return batch_size
Get queue batch size .
42,883
def _get_queue_lock ( self , queue , log ) : max_workers = self . max_workers_per_queue for part in dotted_parts ( queue ) : if part in self . single_worker_queues : log . debug ( 'single worker queue' ) max_workers = 1 break if max_workers : queue_lock = Semaphore ( self . connection , self . _key ( LOCK_REDIS_KEY , queue ) , self . id , max_locks = max_workers , timeout = self . config [ 'ACTIVE_TASK_UPDATE_TIMEOUT' ] ) acquired , locks = queue_lock . acquire ( ) if not acquired : return None , True log . debug ( 'acquired queue lock' , locks = locks ) else : queue_lock = None return queue_lock , False
Get queue lock for max worker queues .
42,884
def _heartbeat ( self , queue , task_ids ) : now = time . time ( ) self . connection . zadd ( self . _key ( ACTIVE , queue ) , ** { task_id : now for task_id in task_ids } )
Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued .
42,885
def _execute ( self , queue , tasks , log , locks , queue_lock , all_task_ids ) : assert len ( tasks ) task_func = tasks [ 0 ] . serialized_func assert all ( [ task_func == task . serialized_func for task in tasks [ 1 : ] ] ) if task_func in self . tiger . periodic_task_funcs : tasks [ 0 ] . _queue_for_next_period ( ) with g_fork_lock : child_pid = os . fork ( ) if child_pid == 0 : log = log . bind ( child_pid = os . getpid ( ) ) self . connection . connection_pool . disconnect ( ) random . seed ( ) signal . signal ( signal . SIGINT , signal . SIG_IGN ) with WorkerContextManagerStack ( self . config [ 'CHILD_CONTEXT_MANAGERS' ] ) : success = self . _execute_forked ( tasks , log ) threading . _shutdown ( ) os . _exit ( int ( not success ) ) else : log = log . bind ( child_pid = child_pid ) for task in tasks : log . info ( 'processing' , func = task_func , task_id = task . id , params = { 'args' : task . args , 'kwargs' : task . kwargs } ) signal . signal ( signal . SIGCHLD , sigchld_handler ) pipe_r , pipe_w = os . pipe ( ) flags = fcntl . fcntl ( pipe_w , fcntl . F_GETFL , 0 ) flags = flags | os . O_NONBLOCK fcntl . fcntl ( pipe_w , fcntl . F_SETFL , flags ) old_wakeup_fd = signal . set_wakeup_fd ( pipe_w ) def check_child_exit ( ) : try : pid , return_code = os . waitpid ( child_pid , os . WNOHANG ) if pid != 0 : return return_code except OSError as e : if e . errno == errno . EINTR : return check_child_exit ( ) else : raise while True : return_code = check_child_exit ( ) if return_code is not None : break try : select . select ( [ pipe_r ] , [ ] , [ ] , self . config [ 'ACTIVE_TASK_UPDATE_TIMER' ] ) except select . error as e : if e . args [ 0 ] != errno . EINTR : raise return_code = check_child_exit ( ) if return_code is not None : break try : self . _heartbeat ( queue , all_task_ids ) for lock in locks : lock . renew ( self . config [ 'ACTIVE_TASK_UPDATE_TIMEOUT' ] ) if queue_lock : acquired , current_locks = queue_lock . renew ( ) if not acquired : log . debug ( 'queue lock renew failure' ) except OSError as e : if e . errno != errno . EINTR : raise signal . signal ( signal . SIGCHLD , signal . SIG_DFL ) signal . set_wakeup_fd ( old_wakeup_fd ) os . close ( pipe_r ) os . close ( pipe_w ) success = ( return_code == 0 ) return success
Executes the given tasks . Returns a boolean indicating whether the tasks were executed successfully .
42,886
def _process_queue_message ( self , message_queue , new_queue_found , batch_exit , start_time , timeout , batch_timeout ) : for queue in self . _filter_queues ( [ message_queue ] ) : if queue not in self . _queue_set : if not new_queue_found : new_queue_found = True batch_exit = time . time ( ) + batch_timeout if batch_exit > start_time + timeout : batch_exit = start_time + timeout self . _queue_set . add ( queue ) self . log . debug ( 'new queue' , queue = queue ) return new_queue_found , batch_exit
Process a queue message from activity channel .
42,887
def _process_queue_tasks ( self , queue , queue_lock , task_ids , now , log ) : processed_count = 0 serialized_tasks = self . connection . mget ( [ self . _key ( 'task' , task_id ) for task_id in task_ids ] ) tasks = [ ] for task_id , serialized_task in zip ( task_ids , serialized_tasks ) : if serialized_task : task_data = json . loads ( serialized_task ) else : task_data = { 'id' : task_id } task = Task ( self . tiger , queue = queue , _data = task_data , _state = ACTIVE , _ts = now ) if not serialized_task : log . error ( 'not found' , task_id = task_id ) task . _move ( ) elif task . id != task_id : log . error ( 'task ID mismatch' , task_id = task_id ) task . _move ( ) else : tasks . append ( task ) valid_task_ids = set ( task . id for task in tasks ) tasks_by_func = OrderedDict ( ) for task in tasks : func = task . serialized_func if func in tasks_by_func : tasks_by_func [ func ] . append ( task ) else : tasks_by_func [ func ] = [ task ] for tasks in tasks_by_func . values ( ) : success , processed_tasks = self . _execute_task_group ( queue , tasks , valid_task_ids , queue_lock ) processed_count = processed_count + len ( processed_tasks ) log . debug ( 'processed' , attempted = len ( tasks ) , processed = processed_count ) for task in processed_tasks : self . _finish_task_processing ( queue , task , success ) return processed_count
Process tasks in queue .
42,888
def _process_from_queue ( self , queue ) : now = time . time ( ) log = self . log . bind ( queue = queue ) batch_size = self . _get_queue_batch_size ( queue ) queue_lock , failed_to_acquire = self . _get_queue_lock ( queue , log ) if failed_to_acquire : return [ ] , - 1 later = time . time ( ) + self . config [ 'LOCK_RETRY' ] task_ids = self . scripts . zpoppush ( self . _key ( QUEUED , queue ) , self . _key ( ACTIVE , queue ) , batch_size , None , now , if_exists = ( 'add' , self . _key ( SCHEDULED , queue ) , later , 'min' ) , on_success = ( 'update_sets' , queue , self . _key ( QUEUED ) , self . _key ( ACTIVE ) , self . _key ( SCHEDULED ) ) ) log . debug ( 'moved tasks' , src_queue = QUEUED , dest_queue = ACTIVE , qty = len ( task_ids ) ) processed_count = 0 if task_ids : processed_count = self . _process_queue_tasks ( queue , queue_lock , task_ids , now , log ) if queue_lock : queue_lock . release ( ) log . debug ( 'released swq lock' ) return task_ids , processed_count
Internal method to process a task batch from the given queue .
42,889
def _execute_task_group ( self , queue , tasks , all_task_ids , queue_lock ) : log = self . log . bind ( queue = queue ) locks = [ ] lock_ids = set ( ) ready_tasks = [ ] for task in tasks : if task . lock : if task . lock_key : kwargs = task . kwargs lock_id = gen_unique_id ( task . serialized_func , None , { key : kwargs . get ( key ) for key in task . lock_key } , ) else : lock_id = gen_unique_id ( task . serialized_func , task . args , task . kwargs , ) if lock_id not in lock_ids : lock = Lock ( self . connection , self . _key ( 'lock' , lock_id ) , timeout = self . config [ 'ACTIVE_TASK_UPDATE_TIMEOUT' ] ) acquired = lock . acquire ( blocking = False ) if acquired : lock_ids . add ( lock_id ) locks . append ( lock ) else : log . info ( 'could not acquire lock' , task_id = task . id ) when = time . time ( ) + self . config [ 'LOCK_RETRY' ] task . _move ( from_state = ACTIVE , to_state = SCHEDULED , when = when , mode = 'min' ) all_task_ids . remove ( task . id ) continue ready_tasks . append ( task ) if not ready_tasks : return True , [ ] if self . stats_thread : self . stats_thread . report_task_start ( ) success = self . _execute ( queue , ready_tasks , log , locks , queue_lock , all_task_ids ) if self . stats_thread : self . stats_thread . report_task_end ( ) for lock in locks : lock . release ( ) return success , ready_tasks
Executes the given tasks in the queue . Updates the heartbeat for task IDs passed in all_task_ids . This internal method is only meant to be called from within _process_from_queue .
42,890
def _finish_task_processing ( self , queue , task , success ) : log = self . log . bind ( queue = queue , task_id = task . id ) def _mark_done ( ) : task . _move ( from_state = ACTIVE ) log . info ( 'done' ) if success : _mark_done ( ) else : should_retry = False should_log_error = True execution = self . connection . lindex ( self . _key ( 'task' , task . id , 'executions' ) , - 1 ) if execution : execution = json . loads ( execution ) if execution and execution . get ( 'retry' ) : if 'retry_method' in execution : retry_func , retry_args = execution [ 'retry_method' ] else : retry_func , retry_args = serialize_retry_method ( self . config [ 'DEFAULT_RETRY_METHOD' ] ) should_log_error = execution [ 'log_error' ] should_retry = True if task . retry_method and not should_retry : retry_func , retry_args = task . retry_method if task . retry_on : if execution : exception_name = execution . get ( 'exception_name' ) try : exception_class = import_attribute ( exception_name ) except TaskImportError : log . error ( 'could not import exception' , exception_name = exception_name ) else : if task . should_retry_on ( exception_class , logger = log ) : should_retry = True else : should_retry = True state = ERROR when = time . time ( ) log_context = { 'func' : task . serialized_func } if should_retry : retry_num = task . n_executions ( ) log_context [ 'retry_func' ] = retry_func log_context [ 'retry_num' ] = retry_num try : func = import_attribute ( retry_func ) except TaskImportError : log . error ( 'could not import retry function' , func = retry_func ) else : try : retry_delay = func ( retry_num , * retry_args ) log_context [ 'retry_delay' ] = retry_delay when += retry_delay except StopRetry : pass else : state = SCHEDULED if execution : if state == ERROR and should_log_error : log_func = log . error else : log_func = log . warning log_context . update ( { 'time_failed' : execution . get ( 'time_failed' ) , 'traceback' : execution . get ( 'traceback' ) , 'exception_name' : execution . get ( 'exception_name' ) , } ) log_func ( 'task error' , ** log_context ) else : log . error ( 'execution not found' , ** log_context ) if state == ERROR and not should_log_error : _mark_done ( ) else : task . _move ( from_state = ACTIVE , to_state = state , when = when )
After a task is executed this method is called and ensures that the task gets properly removed from the ACTIVE queue and in case of an error retried or marked as failed .
42,891
def run ( self , once = False , force_once = False ) : self . log . info ( 'ready' , id = self . id , queues = sorted ( self . only_queues ) , exclude_queues = sorted ( self . exclude_queues ) , single_worker_queues = sorted ( self . single_worker_queues ) , max_workers = self . max_workers_per_queue ) if not self . scripts . can_replicate_commands : self . log . warn ( 'using old Redis version' ) if self . config [ 'STATS_INTERVAL' ] : self . stats_thread = StatsThread ( self ) self . stats_thread . start ( ) self . _queue_periodic_tasks ( ) self . _pubsub = self . connection . pubsub ( ) self . _pubsub . subscribe ( self . _key ( 'activity' ) ) self . _queue_set = set ( self . _filter_queues ( self . connection . smembers ( self . _key ( QUEUED ) ) ) ) try : while True : self . _wait_for_new_tasks ( timeout = self . config [ 'SELECT_TIMEOUT' ] , batch_timeout = self . config [ 'SELECT_BATCH_TIMEOUT' ] ) self . _install_signal_handlers ( ) self . _did_work = False self . _worker_run ( ) self . _uninstall_signal_handlers ( ) if once and ( not self . _queue_set or force_once ) : break if self . _stop_requested : raise KeyboardInterrupt ( ) except KeyboardInterrupt : pass except Exception as e : self . log . exception ( event = 'exception' ) raise finally : if self . stats_thread : self . stats_thread . stop ( ) self . stats_thread = None self . _pubsub . reset ( ) self . log . info ( 'done' )
Main loop of the worker .
42,892
def can_replicate_commands ( self ) : if not hasattr ( self , '_can_replicate_commands' ) : info = self . redis . info ( 'server' ) version_info = info [ 'redis_version' ] . split ( '.' ) major , minor = int ( version_info [ 0 ] ) , int ( version_info [ 1 ] ) result = major > 3 or major == 3 and minor >= 2 self . _can_replicate_commands = result return self . _can_replicate_commands
Whether Redis supports single command replication .
42,893
def zpoppush ( self , source , destination , count , score , new_score , client = None , withscores = False , on_success = None , if_exists = None ) : if score is None : score = '+inf' if withscores : if on_success : raise NotImplementedError ( ) return self . _zpoppush_withscores ( keys = [ source , destination ] , args = [ score , count , new_score ] , client = client ) else : if if_exists and if_exists [ 0 ] == 'add' : _ , if_exists_key , if_exists_score , if_exists_mode = if_exists if if_exists_mode != 'min' : raise NotImplementedError ( ) if not on_success or on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) set_value , remove_from_set , add_to_set , add_to_set_if_exists = on_success [ 1 : ] return self . _zpoppush_exists_min_update_sets ( keys = [ source , destination , remove_from_set , add_to_set , add_to_set_if_exists , if_exists_key ] , args = [ score , count , new_score , set_value , if_exists_score ] , ) elif if_exists and if_exists [ 0 ] == 'noupdate' : if not on_success or on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) set_value , remove_from_set , add_to_set = on_success [ 1 : ] return self . _zpoppush_exists_ignore_update_sets ( keys = [ source , destination , remove_from_set , add_to_set ] , args = [ score , count , new_score , set_value ] , ) if on_success : if on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) else : set_value , remove_from_set , add_to_set = on_success [ 1 : ] return self . _zpoppush_update_sets ( keys = [ source , destination , remove_from_set , add_to_set ] , args = [ score , count , new_score , set_value ] , client = client ) else : return self . _zpoppush ( keys = [ source , destination ] , args = [ score , count , new_score ] , client = client )
Pops the first count members from the ZSET source and adds them to the ZSET destination with a score of new_score . If score is not None only members up to a score of score are used . Returns the members that were moved and if withscores is True their original scores .
42,894
def execute_pipeline ( self , pipeline , client = None ) : client = client or self . redis executing_pipeline = None try : stack = pipeline . command_stack script_args = [ int ( self . can_replicate_commands ) , len ( stack ) ] for args , options in stack : script_args += [ len ( args ) - 1 ] + list ( args ) if self . can_replicate_commands : if pipeline . scripts : pipeline . load_scripts ( ) raw_results = self . _execute_pipeline ( args = script_args , client = client ) else : executing_pipeline = client . pipeline ( ) for s in pipeline . scripts : executing_pipeline . script_load ( s . script ) self . _execute_pipeline ( args = script_args , client = executing_pipeline ) raw_results = executing_pipeline . execute ( ) [ - 1 ] results = [ ] response_callbacks = pipeline . response_callbacks for ( ( args , options ) , result ) in zip ( stack , raw_results ) : command_name = args [ 0 ] if command_name in response_callbacks : result = response_callbacks [ command_name ] ( result , ** options ) results . append ( result ) return results finally : if executing_pipeline : executing_pipeline . reset ( ) pipeline . reset ( )
Executes the given Redis pipeline as a Lua script . When an error occurs the transaction stops executing and an exception is raised . This differs from Redis transactions where execution continues after an error . On success a list of results is returned . The pipeline is cleared after execution and can no longer be reused .
42,895
def gen_unique_id ( serialized_name , args , kwargs ) : return hashlib . sha256 ( json . dumps ( { 'func' : serialized_name , 'args' : args , 'kwargs' : kwargs , } , sort_keys = True ) . encode ( 'utf8' ) ) . hexdigest ( )
Generates and returns a hex - encoded 256 - bit ID for the given task name and args . Used to generate IDs for unique tasks or for task locks .
42,896
def serialize_func_name ( func ) : if func . __module__ == '__main__' : raise ValueError ( 'Functions from the __main__ module cannot be ' 'processed by workers.' ) try : func_name = func . __qualname__ except AttributeError : func_name = func . __name__ return ':' . join ( [ func . __module__ , func_name ] )
Returns the dotted serialized path to the passed function .
42,897
def dotted_parts ( s ) : idx = - 1 while s : idx = s . find ( '.' , idx + 1 ) if idx == - 1 : yield s break yield s [ : idx ]
For a string a . b . c yields a a . b a . b . c .
42,898
def reversed_dotted_parts ( s ) : idx = - 1 if s : yield s while s : idx = s . rfind ( '.' , 0 , idx ) if idx == - 1 : break yield s [ : idx ]
For a string a . b . c yields a . b . c a . b a .
42,899
def tasktiger_processor ( logger , method_name , event_dict ) : if g [ 'current_tasks' ] is not None and not g [ 'current_task_is_batch' ] : event_dict [ 'task_id' ] = g [ 'current_tasks' ] [ 0 ] . id return event_dict
TaskTiger structlog processor .