signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _get_flatchoices ( self ) : """Redefine standard method . Return constants themselves instead of their names for right rendering in admin ' s ' change _ list ' view , if field is present in ' list _ display ' attribute of model ' s admin ."""
return [ ( self . to_python ( choice ) , value ) for choice , value in self . _choices ]
def globalsfilter ( input_dict , check_all = False , filters = None , exclude_private = None , exclude_capitalized = None , exclude_uppercase = None , exclude_unsupported = None , excluded_names = None ) : """Keep only objects that can be pickled"""
output_dict = { } for key , value in list ( input_dict . items ( ) ) : excluded = ( exclude_private and key . startswith ( '_' ) ) or ( exclude_capitalized and key [ 0 ] . isupper ( ) ) or ( exclude_uppercase and key . isupper ( ) and len ( key ) > 1 and not key [ 1 : ] . isdigit ( ) ) or ( key in excluded_names ) or ( exclude_unsupported and not is_supported ( value , check_all = check_all , filters = filters ) ) if not excluded : output_dict [ key ] = value return output_dict
def assure_cache ( project_path = None ) : """Assure that a project directory has a cache folder . If not , it will create it ."""
project_path = path ( project_path , ISDIR ) cache_path = os . path . join ( project_path , CACHE_NAME ) if not os . path . isdir ( cache_path ) : os . mkdir ( cache_path )
def index_of_coincidence ( * texts ) : """Calculate the index of coincidence for one or more ` ` texts ` ` . The results are averaged over multiple texts to return the delta index of coincidence . Examples : > > > index _ of _ coincidence ( " aabbc " ) 0.2 > > > index _ of _ coincidence ( " aabbc " , " abbcc " ) 0.2 Args : * texts ( variable length argument list ) : The texts to analyze Returns : Decimal value of the index of coincidence Raises : ValueError : If texts is empty ValueError : If any text is less that 2 character long"""
if not texts : raise ValueError ( "texts must not be empty" ) return statistics . mean ( _calculate_index_of_coincidence ( frequency_analyze ( text ) , len ( text ) ) for text in texts )
def transaction_insert_dict_auto_inc ( self , transaction_cursor , tblname , d , unique_id_fields = [ ] , fields = None , check_existing = False , id_field = 'ID' ) : '''A transaction wrapper for inserting dicts into fields with an autoincrementing ID . Insert the record and return the associated ID ( long ) .'''
sql , params , record_exists = self . create_insert_dict_string ( tblname , d , PKfields = unique_id_fields , fields = fields , check_existing = check_existing ) if not record_exists : transaction_cursor . execute ( sql , params ) id = transaction_cursor . lastrowid if id == None : id = self . get_unique_record ( 'SELECT * FROM {0} WHERE {1}' . format ( tblname , ' AND ' . join ( [ f + '=%s' for f in unique_id_fields ] ) ) , parameters = tuple ( [ d [ f ] for f in unique_id_fields ] ) ) [ id_field ] assert ( id ) return id
def optimise_xy ( xy , * args ) : """Return negative pore diameter for x and y coordinates optimisation ."""
z , elements , coordinates = args window_com = np . array ( [ xy [ 0 ] , xy [ 1 ] , z ] ) return - pore_diameter ( elements , coordinates , com = window_com ) [ 0 ]
def send_message_event ( self , room_id , event_type , content , txn_id = None , timestamp = None ) : """Perform PUT / rooms / $ room _ id / send / $ event _ type Args : room _ id ( str ) : The room ID to send the message event in . event _ type ( str ) : The event type to send . content ( dict ) : The JSON content to send . txn _ id ( int ) : Optional . The transaction ID to use . timestamp ( int ) : Set origin _ server _ ts ( For application services only )"""
if not txn_id : txn_id = self . _make_txn_id ( ) path = "/rooms/%s/send/%s/%s" % ( quote ( room_id ) , quote ( event_type ) , quote ( str ( txn_id ) ) , ) params = { } if timestamp : params [ "ts" ] = timestamp return self . _send ( "PUT" , path , content , query_params = params )
def to_python ( self , value ) : """Return a str representation of the hexadecimal"""
if isinstance ( value , six . string_types ) : return value if value is None : return value return _unsigned_integer_to_hex_string ( value )
def primaryName ( self , value : Optional [ str ] ) -> None : """Set the value of isPrimary . : param value : the value to set isPrimary to"""
if value is not None : self . warned_no_primary = False self . primaryNames [ self . viewNo ] = value self . compact_primary_names ( ) if value != self . _primaryName : self . _primaryName = value self . logger . info ( "{} setting primaryName for view no {} to: {}" . format ( self , self . viewNo , value ) ) if value is None : # Since the GC needs to happen after a primary has been # decided . return self . _gc_before_new_view ( ) if self . __should_reset_watermarks_before_new_view ( ) : self . _reset_watermarks_before_new_view ( )
def create_menu ( self ) : """Create the MenuBar for the GUI current structure is : File : Change Working Directory , Import Interpretations from LSQ file , Import interpretations from a redo file , Save interpretations to a redo file , Save MagIC tables , Save Plots Edit : New Interpretation , Delete Interpretation , Next Interpretation , Previous Interpretation , Next Specimen , Previous Speciemen , Flag Measurement Data , Coordinate Systems Analysis : Acceptance Criteria , Sample Orientation , Flag Interpretaions Tools : Interpretation Editor , VGP Viewer Help : Usage and Tips , PmagPy Cookbook , Open Docs , Github Page , Open Debugger"""
self . menubar = wx . MenuBar ( ) # File Menu menu_file = wx . Menu ( ) m_change_WD = menu_file . Append ( - 1 , "Change Working Directory\tCtrl-W" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_change_working_directory , m_change_WD ) m_import_meas_file = menu_file . Append ( - 1 , "Change measurements file" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_import_meas_file , m_import_meas_file ) m_import_LSQ = menu_file . Append ( - 1 , "&Import Interpretations from LSQ file\tCtrl-L" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_read_from_LSQ , m_import_LSQ ) m_previous_interpretation = menu_file . Append ( - 1 , "&Import interpretations from a redo file\tCtrl-R" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_previous_interpretation , m_previous_interpretation ) m_save_interpretation = menu_file . Append ( - 1 , "&Save interpretations to a redo file\tCtrl-S" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_save_interpretation , m_save_interpretation ) m_make_MagIC_results_tables = menu_file . Append ( - 1 , "&Save MagIC tables\tCtrl-Shift-S" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_make_MagIC_results_tables , m_make_MagIC_results_tables ) submenu_save_plots = wx . Menu ( ) m_save_zij_plot = submenu_save_plots . Append ( - 1 , "&Save Zijderveld plot" , "" ) self . Bind ( wx . EVT_MENU , self . on_save_Zij_plot , m_save_zij_plot , "Zij" ) m_save_eq_plot = submenu_save_plots . Append ( - 1 , "&Save specimen equal area plot" , "" ) self . Bind ( wx . EVT_MENU , self . on_save_Eq_plot , m_save_eq_plot , "specimen-Eq" ) m_save_M_t_plot = submenu_save_plots . Append ( - 1 , "&Save M-t plot" , "" ) self . Bind ( wx . EVT_MENU , self . on_save_M_t_plot , m_save_M_t_plot , "M_t" ) m_save_high_level = submenu_save_plots . Append ( - 1 , "&Save high level plot" , "" ) self . Bind ( wx . EVT_MENU , self . on_save_high_level , m_save_high_level , "Eq" ) m_save_all_plots = submenu_save_plots . Append ( - 1 , "&Save all plots" , "" ) self . Bind ( wx . EVT_MENU , self . on_save_all_figures , m_save_all_plots ) m_new_sub_plots = menu_file . AppendSubMenu ( submenu_save_plots , "&Save plot" ) menu_file . AppendSeparator ( ) m_exit = menu_file . Append ( - 1 , "E&xit\tCtrl-Q" , "Exit" ) self . Bind ( wx . EVT_MENU , self . on_menu_exit , m_exit ) # Edit Menu menu_edit = wx . Menu ( ) m_new = menu_edit . Append ( - 1 , "&New interpretation\tCtrl-N" , "" ) self . Bind ( wx . EVT_MENU , self . on_btn_add_fit , m_new ) m_delete = menu_edit . Append ( - 1 , "&Delete interpretation\tCtrl-D" , "" ) self . Bind ( wx . EVT_MENU , self . on_btn_delete_fit , m_delete ) m_next_interp = menu_edit . Append ( - 1 , "&Next interpretation\tCtrl-Up" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_next_interp , m_next_interp ) m_previous_interp = menu_edit . Append ( - 1 , "&Previous interpretation\tCtrl-Down" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_prev_interp , m_previous_interp ) m_next_specimen = menu_edit . Append ( - 1 , "&Next Specimen\tCtrl-Right" , "" ) self . Bind ( wx . EVT_MENU , self . on_next_button , m_next_specimen ) m_previous_specimen = menu_edit . Append ( - 1 , "&Previous Specimen\tCtrl-Left" , "" ) self . Bind ( wx . EVT_MENU , self . on_prev_button , m_previous_specimen ) menu_flag_meas = wx . Menu ( ) m_good = menu_flag_meas . Append ( - 1 , "&Good Measurement\tCtrl-Alt-G" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_flag_meas_good , m_good ) m_bad = menu_flag_meas . Append ( - 1 , "&Bad Measurement\tCtrl-Alt-B" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_flag_meas_bad , m_bad ) m_flag_meas = menu_edit . AppendSubMenu ( menu_flag_meas , "&Flag Measurement Data" ) menu_coordinates = wx . Menu ( ) m_speci = menu_coordinates . Append ( - 1 , "&Specimen Coordinates\tCtrl-P" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_change_speci_coord , m_speci ) if "geographic" in self . coordinate_list : m_geo = menu_coordinates . Append ( - 1 , "&Geographic Coordinates\tCtrl-G" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_change_geo_coord , m_geo ) if "tilt-corrected" in self . coordinate_list : m_tilt = menu_coordinates . Append ( - 1 , "&Tilt-Corrected Coordinates\tCtrl-T" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_change_tilt_coord , m_tilt ) m_coords = menu_edit . AppendSubMenu ( menu_coordinates , "&Coordinate Systems" ) # Analysis Menu menu_Analysis = wx . Menu ( ) submenu_criteria = wx . Menu ( ) m_change_criteria_file = submenu_criteria . Append ( - 1 , "&Change acceptance criteria" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_change_criteria , m_change_criteria_file ) m_import_criteria_file = submenu_criteria . Append ( - 1 , "&Import criteria file" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_criteria_file , m_import_criteria_file ) m_new_sub = menu_Analysis . AppendSubMenu ( submenu_criteria , "Acceptance criteria" ) menu_flag_fit = wx . Menu ( ) m_good_fit = menu_flag_fit . Append ( - 1 , "&Good Interpretation\tCtrl-Shift-G" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_flag_fit_good , m_good_fit ) m_bad_fit = menu_flag_fit . Append ( - 1 , "&Bad Interpretation\tCtrl-Shift-B" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_flag_fit_bad , m_bad_fit ) m_flag_fit = menu_Analysis . AppendSubMenu ( menu_flag_fit , "&Flag Interpretations" ) submenu_sample_check = wx . Menu ( ) m_check_orient = submenu_sample_check . Append ( - 1 , "&Check Sample Orientations\tCtrl-O" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_check_orient , m_check_orient ) m_mark_samp_bad = submenu_sample_check . Append ( - 1 , "&Mark Sample Bad\tCtrl-." , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_mark_samp_bad , m_mark_samp_bad ) m_mark_samp_good = submenu_sample_check . Append ( - 1 , "&Mark Sample Good\tCtrl-," , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_mark_samp_good , m_mark_samp_good ) m_submenu = menu_Analysis . AppendSubMenu ( submenu_sample_check , "Sample Orientation" ) submenu_toggle_mean_display = wx . Menu ( ) lines = [ "m_%s_toggle_mean = submenu_toggle_mean_display.AppendCheckItem(-1, '&%s', ''); self.Bind(wx.EVT_MENU, self.on_menu_toggle_mean, m_%s_toggle_mean)" % ( f , f ) for f in self . all_fits_list ] for line in lines : exec ( line ) menu_Analysis . AppendSubMenu ( submenu_toggle_mean_display , "Toggle Mean Display" ) # Tools Menu menu_Tools = wx . Menu ( ) # m _ auto _ interpret = menu _ Tools . Append ( - 1 , " & Auto interpret ( alpha version ) \ tCtrl - A " , " " ) # self . Bind ( wx . EVT _ MENU , self . autointerpret , m _ auto _ interpret ) m_edit_interpretations = menu_Tools . Append ( - 1 , "&Interpretation editor\tCtrl-E" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_edit_interpretations , m_edit_interpretations ) m_view_VGP = menu_Tools . Append ( - 1 , "&View VGPs\tCtrl-Shift-V" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_view_vgps , m_view_VGP ) # Help Menu menu_Help = wx . Menu ( ) m_help = menu_Help . Append ( - 1 , "&Usage and Tips\tCtrl-H" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_help , m_help ) m_cookbook = menu_Help . Append ( - 1 , "&PmagPy Cookbook\tCtrl-Shift-W" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_cookbook , m_cookbook ) m_docs = menu_Help . Append ( - 1 , "&Open Docs\tCtrl-Shift-H" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_docs , m_docs ) m_git = menu_Help . Append ( - 1 , "&Github Page\tCtrl-Shift-G" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_git , m_git ) m_debug = menu_Help . Append ( - 1 , "&Open Debugger\tCtrl-Shift-D" , "" ) self . Bind ( wx . EVT_MENU , self . on_menu_debug , m_debug ) # self . menubar . Append ( menu _ preferences , " & Preferences " ) self . menubar . Append ( menu_file , "&File" ) self . menubar . Append ( menu_edit , "&Edit" ) self . menubar . Append ( menu_Analysis , "&Analysis" ) self . menubar . Append ( menu_Tools , "&Tools" ) self . menubar . Append ( menu_Help , "&Help" ) # self . menubar . Append ( menu _ Plot , " & Plot " ) # self . menubar . Append ( menu _ results _ table , " & Table " ) # self . menubar . Append ( menu _ MagIC , " & MagIC " ) self . SetMenuBar ( self . menubar )
def do_genesis ( args , data_dir = None ) : """Given the command args , take an series of input files containing GenesisData , combine all the batches into one GenesisData , and output the result into a new file ."""
if data_dir is None : data_dir = get_data_dir ( ) if not os . path . exists ( data_dir ) : raise CliException ( "Data directory does not exist: {}" . format ( data_dir ) ) genesis_batches = [ ] for input_file in args . input_file : print ( 'Processing {}...' . format ( input_file ) ) input_data = BatchList ( ) try : with open ( input_file , 'rb' ) as in_file : input_data . ParseFromString ( in_file . read ( ) ) except : raise CliException ( 'Unable to read {}' . format ( input_file ) ) genesis_batches += input_data . batches _validate_depedencies ( genesis_batches ) _check_required_settings ( genesis_batches ) if args . output : genesis_file = args . output else : genesis_file = os . path . join ( data_dir , 'genesis.batch' ) print ( 'Generating {}' . format ( genesis_file ) ) output_data = GenesisData ( batches = genesis_batches ) with open ( genesis_file , 'wb' ) as out_file : out_file . write ( output_data . SerializeToString ( ) )
def get_avatar_metadata ( self ) : """Gets the metadata for an asset . return : ( osid . Metadata ) - metadata for the asset * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template metadata = dict ( self . _mdata [ 'avatar' ] ) metadata . update ( { 'existing_id_values' : self . _my_map [ 'avatarId' ] } ) return Metadata ( ** metadata )
def parallel_loop ( func , n_jobs = 1 , verbose = 1 ) : """run loops in parallel , if joblib is available . Parameters func : function function to be executed in parallel n _ jobs : int | None Number of jobs . If set to None , do not attempt to use joblib . verbose : int verbosity level Notes Execution of the main script must be guarded with ` if _ _ name _ _ = = ' _ _ main _ _ ' : ` when using parallelization ."""
if n_jobs : try : from joblib import Parallel , delayed except ImportError : try : from sklearn . externals . joblib import Parallel , delayed except ImportError : n_jobs = None if not n_jobs : if verbose : print ( 'running ' , func , ' serially' ) par = lambda x : list ( x ) else : if verbose : print ( 'running ' , func , ' in parallel' ) func = delayed ( func ) par = Parallel ( n_jobs = n_jobs , verbose = verbose ) return par , func
def process_tag ( self , tag ) : """Processes tag and detects which function to use"""
try : if not self . _is_function ( tag ) : self . _tag_type_processor [ tag . data_type ] ( tag ) except KeyError as ex : raise Exception ( 'Tag type {0} not recognized for tag {1}' . format ( tag . data_type , tag . name ) , ex )
def _prepare_summary ( evolve_file , ssm_file , cnv_file , work_dir , somatic_info ) : """Prepare a summary with gene - labelled heterogeneity from PhyloWGS predictions ."""
out_file = os . path . join ( work_dir , "%s-phylowgs.txt" % somatic_info . tumor_name ) if not utils . file_uptodate ( out_file , evolve_file ) : with file_transaction ( somatic_info . tumor_data , out_file ) as tx_out_file : with open ( tx_out_file , "w" ) as out_handle : ssm_locs = _read_ssm_locs ( ssm_file ) cnv_ssms = _read_cnv_ssms ( cnv_file ) for i , ( ids , tree ) in enumerate ( _evolve_reader ( evolve_file ) ) : out_handle . write ( "* Tree %s\n" % ( i + 1 ) ) out_handle . write ( "\n" + "\n" . join ( tree ) + "\n\n" ) for nid , freq , gids in ids : genes = _gids_to_genes ( gids , ssm_locs , cnv_ssms , somatic_info . tumor_data ) out_handle . write ( "%s\t%s\t%s\n" % ( nid , freq , "," . join ( genes ) ) ) out_handle . write ( "\n" ) return out_file
def hash_starts_numeric ( records ) : """Very useful function that only accepts records with a numeric start to their sha - 1 hash ."""
for record in records : seq_hash = hashlib . sha1 ( str ( record . seq ) ) . hexdigest ( ) if seq_hash [ 0 ] . isdigit ( ) : yield record
def gaussApprox ( self , xy , ** kwargs ) : """NAME : gaussApprox PURPOSE : return the mean and variance of a Gaussian approximation to the stream DF at a given phase - space point in Galactocentric rectangular coordinates ( distribution is over missing directions ) INPUT : xy - phase - space point [ X , Y , Z , vX , vY , vZ ] ; the distribution of the dimensions set to None is returned interp = ( object - wide interp default ) if True , use the interpolated stream track cindx = index of the closest point on the ( interpolated ) stream track if not given , determined from the dimensions given lb = ( False ) if True , xy contains [ l , b , D , vlos , pmll , pmbb ] in [ deg , deg , kpc , km / s , mas / yr , mas / yr ] and the Gaussian approximation in these coordinates is returned OUTPUT : ( mean , variance ) of the approximate Gaussian DF for the missing directions in xy HISTORY : 2013-12-12 - Written - Bovy ( IAS )"""
interp = kwargs . get ( 'interp' , self . _useInterp ) lb = kwargs . get ( 'lb' , False ) # What are we looking for coordGiven = numpy . array ( [ not x is None for x in xy ] , dtype = 'bool' ) nGiven = numpy . sum ( coordGiven ) # First find the nearest track point if not 'cindx' in kwargs and lb : cindx = self . _find_closest_trackpointLB ( * xy , interp = interp , usev = True ) elif not 'cindx' in kwargs and not lb : cindx = self . _find_closest_trackpoint ( * xy , xy = True , interp = interp , usev = True ) else : cindx = kwargs [ 'cindx' ] # Get the covariance matrix if interp and lb : tcov = self . _interpolatedAllErrCovsLBUnscaled [ cindx ] tmean = self . _interpolatedObsTrackLB [ cindx ] elif interp and not lb : tcov = self . _interpolatedAllErrCovsXY [ cindx ] tmean = self . _interpolatedObsTrackXY [ cindx ] elif not interp and lb : tcov = self . _allErrCovsLBUnscaled [ cindx ] tmean = self . _ObsTrackLB [ cindx ] elif not interp and not lb : tcov = self . _allErrCovsXY [ cindx ] tmean = self . _ObsTrackXY [ cindx ] if lb : # Apply scale factors tcov = copy . copy ( tcov ) tcov *= numpy . tile ( self . _ErrCovsLBScale , ( 6 , 1 ) ) tcov *= numpy . tile ( self . _ErrCovsLBScale , ( 6 , 1 ) ) . T # Fancy indexing to recover V22 , V11 , and V12 ; V22 , V11 , V12 as in Appendix B of 0905.2979v1 V11indx0 = numpy . array ( [ [ ii for jj in range ( 6 - nGiven ) ] for ii in range ( 6 ) if not coordGiven [ ii ] ] ) V11indx1 = numpy . array ( [ [ ii for ii in range ( 6 ) if not coordGiven [ ii ] ] for jj in range ( 6 - nGiven ) ] ) V11 = tcov [ V11indx0 , V11indx1 ] V22indx0 = numpy . array ( [ [ ii for jj in range ( nGiven ) ] for ii in range ( 6 ) if coordGiven [ ii ] ] ) V22indx1 = numpy . array ( [ [ ii for ii in range ( 6 ) if coordGiven [ ii ] ] for jj in range ( nGiven ) ] ) V22 = tcov [ V22indx0 , V22indx1 ] V12indx0 = numpy . array ( [ [ ii for jj in range ( nGiven ) ] for ii in range ( 6 ) if not coordGiven [ ii ] ] ) V12indx1 = numpy . array ( [ [ ii for ii in range ( 6 ) if coordGiven [ ii ] ] for jj in range ( 6 - nGiven ) ] ) V12 = tcov [ V12indx0 , V12indx1 ] # Also get m1 and m2 , again following Appendix B of 0905.2979v1 m1 = tmean [ True ^ coordGiven ] m2 = tmean [ coordGiven ] # conditional mean and variance V22inv = numpy . linalg . inv ( V22 ) v2 = numpy . array ( [ xy [ ii ] for ii in range ( 6 ) if coordGiven [ ii ] ] ) condMean = m1 + numpy . dot ( V12 , numpy . dot ( V22inv , v2 - m2 ) ) condVar = V11 - numpy . dot ( V12 , numpy . dot ( V22inv , V12 . T ) ) return ( condMean , condVar )
def mxmg ( m1 , m2 , nrow1 , ncol1 , ncol2 ) : """Multiply two double precision matrices of arbitrary size . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / mxmg _ c . html : param m1 : nrow1 X ncol1 double precision matrix . : type m1 : NxM - Element Array of floats : param m2 : ncol1 X ncol2 double precision matrix . : type m2 : NxM - Element Array of floats : param nrow1 : Row dimension of m1 : type nrow1 : int : param ncol1 : Column dimension of m1 and row dimension of m2. : type ncol1 : int : param ncol2 : Column dimension of m2 : type ncol2 : int : return : nrow1 X ncol2 double precision matrix . : rtype : NxM - Element Array of floats"""
m1 = stypes . toDoubleMatrix ( m1 ) m2 = stypes . toDoubleMatrix ( m2 ) mout = stypes . emptyDoubleMatrix ( x = ncol2 , y = nrow1 ) nrow1 = ctypes . c_int ( nrow1 ) ncol1 = ctypes . c_int ( ncol1 ) ncol2 = ctypes . c_int ( ncol2 ) libspice . mxmg_c ( m1 , m2 , nrow1 , ncol1 , ncol2 , mout ) return stypes . cMatrixToNumpy ( mout )
def cart2sph ( x , y , z ) : """Converts cartesian coordinates ` x ` , ` y ` , ` z ` into a longitude and latitude . x = 0 , y = 0 , z = 0 is assumed to correspond to the center of the globe . Returns lon and lat in radians . Parameters ` x ` , ` y ` , ` z ` : Arrays of cartesian coordinates Returns lon : Longitude in radians lat : Latitude in radians"""
r = np . sqrt ( x ** 2 + y ** 2 + z ** 2 ) lat = np . arcsin ( z / r ) lon = np . arctan2 ( y , x ) return lon , lat
def ping ( ) : '''Check to see if the host is responding . Returns False if the host didn ' t respond , True otherwise . CLI Example : . . code - block : : bash salt cisco - nso test . ping'''
try : client = _get_client ( ) client . info ( ) except SaltSystemExit as err : log . warning ( err ) return False return True
def _objective_function ( X , W , R , S , gamma ) : """Evaluate the objective function . . . math : : \\ sum _ { i = 1 } ^ { N } 1/2 \\ | X _ i - W _ i R - S _ i \\ | _ F ^ 2 . . math : : + / \\ gamma * \\ | S _ i \\ | _ 1 Parameters X : list of array , element i has shape = [ voxels _ i , timepoints ] Each element in the list contains the fMRI data for alignment of one subject . W : list of array , element i has shape = [ voxels _ i , features ] The orthogonal transforms ( mappings ) : math : ` W _ i ` for each subject . R : array , shape = [ features , timepoints ] The shared response . S : list of array , element i has shape = [ voxels _ i , timepoints ] The individual component : math : ` S _ i ` for each subject . gamma : float , default : 1.0 Regularization parameter for the sparseness of the individual components . Returns func : float The RSRM objective function evaluated on the parameters to this function ."""
subjs = len ( X ) func = .0 for i in range ( subjs ) : func += 0.5 * np . sum ( ( X [ i ] - W [ i ] . dot ( R ) - S [ i ] ) ** 2 ) + gamma * np . sum ( np . abs ( S [ i ] ) ) return func
def str_kwargs ( self ) : """Generator that yields a dict of values corresponding to the calendar date and time for the internal JD values ."""
iys , ims , ids , ihmsfs = d2dtf ( self . scale . upper ( ) . encode ( 'utf8' ) , 6 , self . jd1 , self . jd2 ) # Get the str _ fmt element of the first allowed output subformat _ , _ , str_fmt = self . _select_subfmts ( self . out_subfmt ) [ 0 ] yday = None has_yday = '{yday:' in str_fmt or False ihrs = ihmsfs [ ... , 0 ] imins = ihmsfs [ ... , 1 ] isecs = ihmsfs [ ... , 2 ] ifracs = ihmsfs [ ... , 3 ] for iy , im , iday , ihr , imin , isec , ifracsec in numpy . nditer ( [ iys , ims , ids , ihrs , imins , isecs , ifracs ] ) : if has_yday : yday = datetime ( iy , im , iday ) . timetuple ( ) . tm_yday fracday = ( ( ( ( ( ifracsec / 1000000.0 + isec ) / 60.0 + imin ) / 60.0 ) + ihr ) / 24.0 ) * ( 10 ** 6 ) fracday = '{0:06g}' . format ( fracday ) [ 0 : self . precision ] yield { 'year' : int ( iy ) , 'mon' : int ( im ) , 'day' : int ( iday ) , 'hour' : int ( ihr ) , 'min' : int ( imin ) , 'sec' : int ( isec ) , 'fracsec' : int ( ifracsec ) , 'yday' : yday , 'fracday' : fracday }
def get_record ( self , name , record_id ) : """Retrieve a record with a given type name and record id . Args : name ( string ) : The name which the record is stored under . record _ id ( int ) : The id of the record requested . Returns : : class : ` cinder _ data . model . CinderModel ` : The cached model ."""
if name in self . _cache : if record_id in self . _cache [ name ] : return self . _cache [ name ] [ record_id ]
def get_project_config ( self , * args , ** kwargs ) : """Returns a ProjectConfig for the given project"""
warnings . warn ( "BaseGlobalConfig.get_project_config is pending deprecation" , DeprecationWarning , ) return self . project_config_class ( self , * args , ** kwargs )
def subscribe_to_quorum_channel ( self ) : """In case the experiment enforces a quorum , listen for notifications before creating Partipant objects ."""
from dallinger . experiment_server . sockets import chat_backend self . log ( "Bot subscribing to quorum channel." ) chat_backend . subscribe ( self , "quorum" )
def addBaseType ( self , item ) : '''Add a Type instance to the data model .'''
ctor = '.' . join ( [ item . __class__ . __module__ , item . __class__ . __qualname__ ] ) self . _modeldef [ 'ctors' ] . append ( ( ( item . name , ctor , dict ( item . opts ) , dict ( item . info ) ) ) ) self . types [ item . name ] = item
def superuser_only ( view_func ) : """Limit a view to superuser only ."""
def _inner ( request , * args , ** kwargs ) : if not request . user . is_superuser : raise PermissionDenied return view_func ( request , * args , ** kwargs ) return _inner
def process_request ( self , request , response ) : """Logs the basic endpoint requested"""
self . logger . info ( 'Requested: {0} {1} {2}' . format ( request . method , request . relative_uri , request . content_type ) )
def bbox_vert_aligned_right ( box1 , box2 ) : """Returns true if the right boundary of both boxes is within 2 pts"""
if not ( box1 and box2 ) : return False return abs ( box1 . right - box2 . right ) <= 2
def compact ( self , term_doc_matrix ) : '''Parameters term _ doc _ matrix : TermDocMatrix Term document matrix object to compact Returns TermDocMatrix'''
rank_df = self . scorer . get_rank_df ( term_doc_matrix ) return self . _prune_higher_ranked_terms ( term_doc_matrix , rank_df , self . rank )
def _Soundex ( self ) : """The Soundex phonetic indexing algorithm adapted to ME phonology . Algorithm : Let w the original word and W the resulting one 1 ) Capitalize the first letter of w and append it to W 2 ) Apply the following replacement rules p , b , f , v , gh ( non - nasal fricatives ) - > 1 t , d , s , sh , z , r , k , g , w ( non - nasal alveolars and velars ) - > 2 l ( alveolar lateral ) - > 3 m , n ( nasals ) - > 4 r ( alveolar approximant ) - > 5 3 ) Concetate multiple occurrences of numbers into one 4 ) Remove non - numerical characters Notes : / h / was thought to be either a voiceless or velar fricative when occurring in the coda with its most used grapheme being < gh > . Those phonemes either disappeared , resulting in the lengthening of preceding vowel clusters , or were developed into / f / as evident by modern spelling ( e . g . ' enough ' : / ɪˈnʌf / and ' though ' : / ðəʊ / ) Examples : > > > Word ( " midel " ) . phonetic _ indexing ( p = " SE " ) ' M230' > > > Word ( " myddle " ) . phonetic _ indexing ( p = " SE " ) ' M230' > > > Word ( " might " ) . phonetic _ indexing ( p = " SE " ) ' M120' > > > Word ( " myghtely " ) . phonetic _ indexing ( p = " SE " ) ' M123'"""
word = self . word [ 1 : ] for w , val in zip ( dict_SE . keys ( ) , dict_SE . values ( ) ) : word = word . replace ( w , val ) # Remove multiple adjacent occurences of digit word = re . sub ( r"(\d)\1+" , r"\1" , word ) # Strip remaining letters word = re . sub ( r"[a-zðþƿ]+" , "" , word ) # Add trailing zeroes and return return ( self . word [ 0 ] . upper ( ) + word + "0" * 3 ) [ : 4 ]
def return_hdr ( self ) : """Return the header for further use . Returns subj _ id : str subject identification code start _ time : datetime start time of the dataset s _ freq : float sampling frequency chan _ name : list of str list of all the channels n _ samples : int number of samples in the dataset orig : dict additional information taken directly from the header"""
subj_id = self . task . subject sampling_freq = set ( self . task . channels . get ( map_lambda = lambda x : x [ 'sampling_frequency' ] ) ) if len ( sampling_freq ) > 1 : raise ValueError ( 'Multiple sampling frequencies not supported' ) s_freq = float ( next ( iter ( sampling_freq ) ) ) chan_name = self . task . channels . get ( map_lambda = lambda x : x [ 'name' ] ) self . chan_name = array ( chan_name ) # read these values directly from dataset orig = self . baseformat . header start_time = orig [ 'start_time' ] n_samples = orig [ 'n_samples' ] return subj_id , start_time , s_freq , chan_name , n_samples , orig
def server_bind ( self ) : """Override to enable IPV4 mapping for IPV6 sockets when desired . The main use case for this is so that when no host is specified , TensorBoard can listen on all interfaces for both IPv4 and IPv6 connections , rather than having to choose v4 or v6 and hope the browser didn ' t choose the other one ."""
socket_is_v6 = ( hasattr ( socket , 'AF_INET6' ) and self . socket . family == socket . AF_INET6 ) has_v6only_option = ( hasattr ( socket , 'IPPROTO_IPV6' ) and hasattr ( socket , 'IPV6_V6ONLY' ) ) if self . _auto_wildcard and socket_is_v6 and has_v6only_option : try : self . socket . setsockopt ( socket . IPPROTO_IPV6 , socket . IPV6_V6ONLY , 0 ) except socket . error as e : # Log a warning on failure to dual - bind , except for EAFNOSUPPORT # since that ' s expected if IPv4 isn ' t supported at all ( IPv6 - only ) . if hasattr ( errno , 'EAFNOSUPPORT' ) and e . errno != errno . EAFNOSUPPORT : logger . warn ( 'Failed to dual-bind to IPv4 wildcard: %s' , str ( e ) ) super ( WerkzeugServer , self ) . server_bind ( )
def log_download ( self , log_num , filename ) : '''download a log file'''
print ( "Downloading log %u as %s" % ( log_num , filename ) ) self . download_lognum = log_num self . download_file = open ( filename , "wb" ) self . master . mav . log_request_data_send ( self . target_system , self . target_component , log_num , 0 , 0xFFFFFFFF ) self . download_filename = filename self . download_set = set ( ) self . download_start = time . time ( ) self . download_last_timestamp = time . time ( ) self . download_ofs = 0 self . retries = 0
def private_dir_path ( app_name ) : """Returns the private directory path : param str app _ name : the name of the app : rtype : str : returns : directory path"""
_private_dir_path = os . path . expanduser ( click . get_app_dir ( app_name , force_posix = True , # forces to ~ / . tigerhost on Mac and Unix ) ) return _private_dir_path
def mediate_transfer ( state : MediatorTransferState , possible_routes : List [ 'RouteState' ] , payer_channel : NettingChannelState , channelidentifiers_to_channels : ChannelMap , nodeaddresses_to_networkstates : NodeNetworkStateMap , pseudo_random_generator : random . Random , payer_transfer : LockedTransferSignedState , block_number : BlockNumber , ) -> TransitionResult [ MediatorTransferState ] : """Try a new route or fail back to a refund . The mediator can safely try a new route knowing that the tokens from payer _ transfer will cover the expenses of the mediation . If there is no route available that may be used at the moment of the call the mediator may send a refund back to the payer , allowing the payer to try a different route ."""
reachable_routes = filter_reachable_routes ( possible_routes , nodeaddresses_to_networkstates , ) available_routes = filter_used_routes ( state . transfers_pair , reachable_routes , ) assert payer_channel . partner_state . address == payer_transfer . balance_proof . sender transfer_pair , mediated_events = forward_transfer_pair ( payer_transfer , available_routes , channelidentifiers_to_channels , pseudo_random_generator , block_number , ) if transfer_pair is None : assert not mediated_events if state . transfers_pair : original_pair = state . transfers_pair [ 0 ] original_channel = get_payer_channel ( channelidentifiers_to_channels , original_pair , ) else : original_channel = payer_channel if original_channel : transfer_pair , mediated_events = backward_transfer_pair ( original_channel , payer_transfer , pseudo_random_generator , block_number , ) else : transfer_pair = None mediated_events = list ( ) if transfer_pair is None : assert not mediated_events mediated_events = list ( ) state . waiting_transfer = WaitingTransferState ( payer_transfer ) else : # the list must be ordered from high to low expiration , expiration # handling depends on it state . transfers_pair . append ( transfer_pair ) return TransitionResult ( state , mediated_events )
def cli ( env , identifier ) : """Delete an image ."""
image_mgr = SoftLayer . ImageManager ( env . client ) image_id = helpers . resolve_id ( image_mgr . resolve_ids , identifier , 'image' ) image_mgr . delete_image ( image_id )
def bvlpdu_contents ( self , use_dict = None , as_class = dict ) : """Return the contents of an object as a dict ."""
# make / extend the dictionary of content if use_dict is None : use_dict = as_class ( ) # save the content use_dict . __setitem__ ( 'address' , str ( self . fdAddress ) ) use_dict . __setitem__ ( 'ttl' , self . fdTTL ) use_dict . __setitem__ ( 'remaining' , self . fdRemain ) # return what we built / updated return use_dict
def _update_repo ( ret , name , target , clean , user , identity , rev , opts , update_head ) : '''Update the repo to a given revision . Using clean passes - C to the hg up'''
log . debug ( 'target %s is found, "hg pull && hg up is probably required"' , target ) current_rev = __salt__ [ 'hg.revision' ] ( target , user = user , rev = '.' ) if not current_rev : return _fail ( ret , 'Seems that {0} is not a valid hg repo' . format ( target ) ) if __opts__ [ 'test' ] : test_result = ( 'Repository {0} update is probably required (current ' 'revision is {1})' ) . format ( target , current_rev ) return _neutral_test ( ret , test_result ) try : pull_out = __salt__ [ 'hg.pull' ] ( target , user = user , identity = identity , opts = opts , repository = name ) except CommandExecutionError as err : ret [ 'result' ] = False ret [ 'comment' ] = err return ret if update_head is False : changes = 'no changes found' not in pull_out if changes : ret [ 'comment' ] = 'Update is probably required but update_head=False so we will skip updating.' else : ret [ 'comment' ] = 'No changes found and update_head=False so will skip updating.' return ret if rev : try : __salt__ [ 'hg.update' ] ( target , rev , force = clean , user = user ) except CommandExecutionError as err : ret [ 'result' ] = False ret [ 'comment' ] = err return ret else : try : __salt__ [ 'hg.update' ] ( target , 'tip' , force = clean , user = user ) except CommandExecutionError as err : ret [ 'result' ] = False ret [ 'comment' ] = err return ret new_rev = __salt__ [ 'hg.revision' ] ( cwd = target , user = user , rev = '.' ) if current_rev != new_rev : revision_text = '{0} => {1}' . format ( current_rev , new_rev ) log . info ( 'Repository %s updated: %s' , target , revision_text ) ret [ 'comment' ] = 'Repository {0} updated.' . format ( target ) ret [ 'changes' ] [ 'revision' ] = revision_text elif 'error:' in pull_out : return _fail ( ret , 'An error was thrown by hg:\n{0}' . format ( pull_out ) ) return ret
def split_s3_path ( url : str ) -> Tuple [ str , str ] : """Split a full s3 path into the bucket name and path ."""
parsed = urlparse ( url ) if not parsed . netloc or not parsed . path : raise ValueError ( "bad s3 path {}" . format ( url ) ) bucket_name = parsed . netloc s3_path = parsed . path # Remove ' / ' at beginning of path . if s3_path . startswith ( "/" ) : s3_path = s3_path [ 1 : ] return bucket_name , s3_path
def _filter_subgraph ( self , subgraph , predicate ) : """Given a subgraph of the manifest , and a predicate , filter the subgraph using that predicate . Generates a list of nodes ."""
to_return = [ ] for unique_id , item in subgraph . items ( ) : if predicate ( item ) : to_return . append ( item ) return to_return
def _gen_cache_key_for_slice ( url_dict , start_int , total_int , authn_subj_list ) : """Generate cache key for the REST URL the client is currently accessing or is expected to access in order to get the slice starting at the given ` ` start _ int ` ` of a multi - slice result set . When used for finding the key to check in the current call , ` ` start _ int ` ` is 0 , or the start that was passed in the current call . When used for finding the key to set for the anticipated call , ` ` start _ int ` ` is current ` ` start _ int ` ` + ` ` count _ int ` ` , the number of objects the current call will return . The URL for the slice is the same as for the current slice , except that the ` start ` query parameter has been increased by the number of items returned in the current slice . Except for advancing the start value and potentially adjusting the desired slice size , it doesn ' t make sense for the client to change the REST URL during slicing , but such queries are supported . They will , however , trigger potentially expensive database queries to find the current slice position . To support adjustments in desired slice size during slicing , the count is not used when generating the key . The active subjects are used in the key in order to prevent potential security issues if authenticated subjects change during slicing . The url _ dict is normalized by encoding it to a JSON string with sorted keys . A hash of the JSON is used for better distribution in a hash map and to avoid the 256 bytes limit on keys in some caches ."""
# logging . debug ( ' Gen key . result _ record _ count = { } ' . format ( result _ record _ count ) ) key_url_dict = copy . deepcopy ( url_dict ) key_url_dict [ 'query' ] . pop ( 'start' , None ) key_url_dict [ 'query' ] . pop ( 'count' , None ) key_json = d1_common . util . serialize_to_normalized_compact_json ( { 'url_dict' : key_url_dict , 'start' : start_int , 'total' : total_int , 'subject' : authn_subj_list , } ) logging . debug ( 'key_json={}' . format ( key_json ) ) return hashlib . sha256 ( key_json . encode ( 'utf-8' ) ) . hexdigest ( )
def validate_unit_process_ids ( self , expected , actual ) : """Validate process id quantities for services on units ."""
self . log . debug ( 'Checking units for running processes...' ) self . log . debug ( 'Expected PIDs: {}' . format ( expected ) ) self . log . debug ( 'Actual PIDs: {}' . format ( actual ) ) if len ( actual ) != len ( expected ) : return ( 'Unit count mismatch. expected, actual: {}, ' '{} ' . format ( len ( expected ) , len ( actual ) ) ) for ( e_sentry , e_proc_names ) in six . iteritems ( expected ) : e_sentry_name = e_sentry . info [ 'unit_name' ] if e_sentry in actual . keys ( ) : a_proc_names = actual [ e_sentry ] else : return ( 'Expected sentry ({}) not found in actual dict data.' '{}' . format ( e_sentry_name , e_sentry ) ) if len ( e_proc_names . keys ( ) ) != len ( a_proc_names . keys ( ) ) : return ( 'Process name count mismatch. expected, actual: {}, ' '{}' . format ( len ( expected ) , len ( actual ) ) ) for ( e_proc_name , e_pids ) , ( a_proc_name , a_pids ) in zip ( e_proc_names . items ( ) , a_proc_names . items ( ) ) : if e_proc_name != a_proc_name : return ( 'Process name mismatch. expected, actual: {}, ' '{}' . format ( e_proc_name , a_proc_name ) ) a_pids_length = len ( a_pids ) fail_msg = ( 'PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})' . format ( e_sentry_name , e_proc_name , e_pids , a_pids_length , a_pids ) ) # If expected is a list , ensure at least one PID quantity match if isinstance ( e_pids , list ) and a_pids_length not in e_pids : return fail_msg # If expected is not bool and not list , # ensure PID quantities match elif not isinstance ( e_pids , bool ) and not isinstance ( e_pids , list ) and a_pids_length != e_pids : return fail_msg # If expected is bool True , ensure 1 or more PIDs exist elif isinstance ( e_pids , bool ) and e_pids is True and a_pids_length < 1 : return fail_msg # If expected is bool False , ensure 0 PIDs exist elif isinstance ( e_pids , bool ) and e_pids is False and a_pids_length != 0 : return fail_msg else : self . log . debug ( 'PID check OK: {} {} {}: ' '{}' . format ( e_sentry_name , e_proc_name , e_pids , a_pids ) ) return None
def arguments ( ) : """Pulls in command line arguments ."""
DESCRIPTION = """\ """ parser = argparse . ArgumentParser ( description = DESCRIPTION , formatter_class = Raw ) parser . add_argument ( "--email" , dest = "email" , action = 'store' , required = False , default = False , help = "An email address is required for querying Entrez databases." ) parser . add_argument ( "--api" , dest = "api_key" , action = 'store' , required = False , default = False , help = "A users ENTREZ API Key. Will speed up download." ) parser . add_argument ( "--query" , dest = "query" , action = 'store' , required = True , help = "Query to submit to Entrez." ) parser . add_argument ( "--host" , dest = "host" , action = 'store' , required = False , default = 'localhost' , help = "Location of an already running database." ) parser . add_argument ( "--port" , dest = "port" , action = 'store' , type = int , required = False , default = 27017 , help = "Mongo database port." ) parser . add_argument ( "--db" , dest = "db" , action = 'store' , required = False , default = 'sramongo' , help = "Name of the database." ) parser . add_argument ( "--debug" , dest = "debug" , action = 'store_true' , required = False , help = "Turn on debug output." ) parser . add_argument ( "--force" , dest = "force" , action = 'store_true' , required = False , help = "Forces clearing the cache." ) args = parser . parse_args ( ) if not ( args . email or args . api_key ) : logger . error ( 'You must provide either an `--email` or `--api`.' ) sys . exit ( ) return args
def get_doc_entries ( target : typing . Callable ) -> list : """Gets the lines of documentation from the given target , which are formatted so that each line is a documentation entry . : param target : : return : A list of strings containing the documentation block entries"""
raw = get_docstring ( target ) if not raw : return [ ] raw_lines = [ line . strip ( ) for line in raw . replace ( '\r' , '' ) . split ( '\n' ) ] def compactify ( compacted : list , entry : str ) -> list : chars = entry . strip ( ) if not chars : return compacted if len ( compacted ) < 1 or chars . startswith ( ':' ) : compacted . append ( entry . rstrip ( ) ) else : compacted [ - 1 ] = '{}\n{}' . format ( compacted [ - 1 ] , entry . rstrip ( ) ) return compacted return [ textwrap . dedent ( block ) . strip ( ) for block in functools . reduce ( compactify , raw_lines , [ ] ) ]
def _extract_value ( self , value ) : """If the value is true / false / null replace with Python equivalent ."""
return ModelEndpoint . _value_map . get ( smart_str ( value ) . lower ( ) , value )
def _update_case ( self , bs , ln , gn , base_mva , Yf , Yt , Va , Vm , Pg , Qg , lmbda ) : """Calculates the result attribute values ."""
V = Vm * exp ( 1j * Va ) # Va _ var = self . om . get _ var ( " Va " ) Vm_var = self . _Vm Pmis = self . om . get_nln_constraint ( "Pmis" ) Qmis = self . om . get_nln_constraint ( "Qmis" ) Pg_var = self . _Pg Qg_var = self . _Qg # mu _ l = lmbda [ " mu _ l " ] # mu _ u = lmbda [ " mu _ u " ] lower = lmbda [ "lower" ] upper = lmbda [ "upper" ] ineqnonlin = lmbda [ "ineqnonlin" ] eqnonlin = lmbda [ "eqnonlin" ] # Indexes of constrained lines . nl2 = len ( [ i for i , l in enumerate ( ln ) if 0.0 < l . rate_a < 1e10 ] ) for i , bus in enumerate ( bs ) : bus . v_angle = Va [ i ] * 180.0 / pi bus . v_magnitude = Vm [ i ] bus . p_lmbda = eqnonlin [ Pmis . i1 : Pmis . iN + 1 ] [ i ] / base_mva bus . q_lmbda = eqnonlin [ Qmis . i1 : Qmis . iN + 1 ] [ i ] / base_mva bus . mu_vmax = upper [ Vm_var . i1 : Vm_var . iN + 1 ] [ i ] bus . mu_vmin = lower [ Vm_var . i1 : Vm_var . iN + 1 ] [ i ] for l , branch in enumerate ( ln ) : Sf = V [ branch . from_bus . _i ] * conj ( Yf [ l , : ] * V ) * base_mva St = V [ branch . to_bus . _i ] * conj ( Yt [ l , : ] * V ) * base_mva branch . p_from = Sf . real [ 0 ] branch . q_from = Sf . imag [ 0 ] branch . p_to = St . real [ 0 ] branch . q_to = St . imag [ 0 ] if 0.0 < branch . rate_a < 1e10 : branch . mu_s_from = 2 * ineqnonlin [ : nl2 ] [ l ] * branch . rate_a / base_mva / base_mva branch . mu_s_to = 2 * ineqnonlin [ nl2 : 2 * nl2 ] [ l ] * branch . rate_a / base_mva / base_mva for k , generator in enumerate ( gn ) : generator . p = Pg [ k ] * base_mva generator . q = Qg [ k ] * base_mva generator . v_magnitude = generator . bus . v_magnitude generator . mu_pmax = upper [ Pg_var . i1 : Pg_var . iN + 1 ] [ k ] / base_mva generator . mu_pmin = lower [ Pg_var . i1 : Pg_var . iN + 1 ] [ k ] / base_mva generator . mu_qmax = upper [ Qg_var . i1 : Qg_var . iN + 1 ] [ k ] / base_mva generator . mu_qmin = lower [ Qg_var . i1 : Qg_var . iN + 1 ] [ k ] / base_mva
def encode ( self , obj ) : """Returns ` ` obj ` ` serialized as a pickle binary string . Raises ~ ipfsapi . exceptions . EncodingError Parameters obj : object Serializable Python object Returns bytes"""
try : return pickle . dumps ( obj ) except pickle . PicklingError as error : raise exceptions . EncodingError ( 'pickle' , error )
def get_default_config ( self ) : """Return the default config for the handler"""
config = super ( MultiGraphitePickleHandler , self ) . get_default_config ( ) config . update ( { 'host' : [ 'localhost' ] , 'port' : 2003 , 'proto' : 'tcp' , 'timeout' : 15 , 'batch' : 1 , 'max_backlog_multiplier' : 5 , 'trim_backlog_multiplier' : 4 , } ) return config
def get_template ( self , template_id ) : """Get a particular lightcurve template Parameters template _ id : str id of desired template Returns phase : ndarray array of phases mag : ndarray array of normalized magnitudes"""
try : data = np . loadtxt ( self . data . extractfile ( template_id + '.dat' ) ) except KeyError : raise ValueError ( "invalid star id: {0}" . format ( template_id ) ) return data [ : , 0 ] , data [ : , 1 ]
def _verify_configs ( configs ) : """Verify a Molecule config was found and returns None . : param configs : A list containing absolute paths to Molecule config files . : return : None"""
if configs : scenario_names = [ c . scenario . name for c in configs ] for scenario_name , n in collections . Counter ( scenario_names ) . items ( ) : if n > 1 : msg = ( "Duplicate scenario name '{}' found. " 'Exiting.' ) . format ( scenario_name ) util . sysexit_with_message ( msg ) else : msg = "'{}' glob failed. Exiting." . format ( MOLECULE_GLOB ) util . sysexit_with_message ( msg )
def decode_dict ( data ) : """解析json字典 , 转换成utf - 8"""
if six . PY3 : return data rv = { } for key , value in iteritems ( data ) : if isinstance ( key , text_type ) : key = key . encode ( 'utf-8' ) if isinstance ( value , text_type ) : value = value . encode ( 'utf-8' ) elif isinstance ( value , list ) : value = _decode_list ( value ) # no need to recurse into dict , json library will do that rv [ key ] = value return rv
def _copy_stream_position ( position ) : """Copy a StreamPosition . Args : position ( Union [ dict , ~ google . cloud . bigquery _ storage _ v1beta1 . types . StreamPosition ] ) : StreamPostion ( or dictionary in StreamPosition format ) to copy . Returns : ~ google . cloud . bigquery _ storage _ v1beta1 . types . StreamPosition : A copy of the input StreamPostion ."""
if isinstance ( position , types . StreamPosition ) : output = types . StreamPosition ( ) output . CopyFrom ( position ) return output return types . StreamPosition ( ** position )
def extract_relationtypes ( urml_xml_tree ) : """extracts the allowed RST relation names and relation types from an URML XML file . Parameters urml _ xml _ tree : lxml . etree . _ ElementTree lxml ElementTree representation of an URML XML file Returns relations : dict of ( str , str ) Returns a dictionary with RST relation names as keys ( str ) and relation types ( either ' par ' or ' hyp ' ) as values ( str ) ."""
return { rel . attrib [ 'name' ] : rel . attrib [ 'type' ] for rel in urml_xml_tree . iterfind ( '//header/reltypes/rel' ) if 'type' in rel . attrib }
def histogram_cover ( self , minAcc , maxAcc , groupBy = None , new_reg_fields = None ) : """* Wrapper of * ` ` COVER ` ` Variant of the function : meth : ` ~ . cover ` that returns all regions contributing to the COVER divided in different ( contiguous ) parts according to their accumulation index value ( one part for each different accumulation value ) , which is assigned to the AccIndex region attribute . Equivalent to calling : : cover ( " histogram " , . . . )"""
return self . cover ( minAcc , maxAcc , groupBy , new_reg_fields , cover_type = "histogram" )
def on_pubmsg ( self , connection , event ) : """Messages received in the channel - send them to the WebSocket ."""
for message in event . arguments ( ) : nickname = self . get_nickname ( event ) nickname_color = self . nicknames [ nickname ] self . namespace . emit ( "message" , nickname , message , nickname_color )
def bucket_type ( self , name ) : """Gets the bucket - type by the specified name . Bucket - types do not always exist ( unlike buckets ) , but this will always return a : class : ` BucketType < riak . bucket . BucketType > ` object . : param name : the bucket - type name : type name : str : rtype : : class : ` BucketType < riak . bucket . BucketType > `"""
if not isinstance ( name , string_types ) : raise TypeError ( 'BucketType name must be a string' ) btype = BucketType ( self , name ) return self . _setdefault_handle_none ( self . _bucket_types , name , btype )
def _set_snmp ( self , v , load = False ) : """Setter method for snmp , mapped from YANG variable / interface / tengigabitethernet / snmp ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ snmp is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ snmp ( ) directly . YANG Description : The SNMP configurations for an interface ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = snmp . snmp , is_container = 'container' , presence = False , yang_name = "snmp" , rest_name = "snmp" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'The Simple Network Management Protocol (SNMP).' , u'cli-incomplete-no' : None , u'sort-priority' : u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG' , u'cli-incomplete-command' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'container' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """snmp must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=snmp.snmp, is_container='container', presence=False, yang_name="snmp", rest_name="snmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The Simple Network Management Protocol (SNMP).', u'cli-incomplete-no': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_BASIC_CONFIG', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""" , } ) self . __snmp = t if hasattr ( self , '_set' ) : self . _set ( )
def sudoku ( G ) : """Solving Sudoku : param G : integer matrix with 0 at empty cells : returns bool : True if grid could be solved : modifies : G will contain the solution : complexity : huge , but linear for usual published 9x9 grids"""
global N , N2 , N4 if len ( G ) == 16 : # for a 16 x 16 sudoku grid N , N2 , N4 = 4 , 16 , 256 e = 4 * N4 universe = e + 1 S = [ [ rc ( a ) , rv ( a ) , cv ( a ) , bv ( a ) ] for a in range ( N4 * N2 ) ] A = [ e ] for r in range ( N2 ) : for c in range ( N2 ) : if G [ r ] [ c ] != 0 : a = assignation ( r , c , G [ r ] [ c ] - 1 ) A += S [ a ] sol = dancing_links ( universe , S + [ A ] ) if sol : for a in sol : if a < len ( S ) : G [ row ( a ) ] [ col ( a ) ] = val ( a ) + 1 return True else : return False
def get_column ( matrix : list , col_index : int ) -> list : """This function retrieves the specified column from the provided nested list ( matrix ) . Args : matrix ( list ) : The nested list from which we need to extract the column . col _ index ( int ) : The index of the column to be extracted . Returns : list : The desired column from the nested list . Examples : > > > get _ column ( [ [ 1 , 2 , 3 ] , [ 2 , 4 , 5 ] , [ 1 , 1 , 1 ] ] , 0) [1 , 2 , 1] > > > get _ column ( [ [ 1 , 2 , 3 ] , [ ( - 2 ) , 4 , ( - 5 ) ] , [ 1 , ( - 1 ) , 1 ] ] , 2) [3 , ( - 5 ) , 1] > > > get _ column ( [ [ 1 , 3 ] , [ 5 , 7 ] , [ 1 , 3 ] , [ 13 , 15 , 17 ] , [ 5 , 7 ] , [ 9 , 11 ] ] , 0) [1 , 5 , 1 , 13 , 5 , 9]"""
return [ row [ col_index ] for row in matrix ]
def _validate_nested_list_type ( self , name , obj , nested_level , * args ) : """Helper function that checks the input object as a list then recursively until nested _ level is 1. : param name : Name of the object . : param obj : Object to check the type of . : param nested _ level : Integer with the current nested level . : param args : List of classes . : raises TypeError : if the input object is not of any of the allowed types ."""
if nested_level <= 1 : self . _validate_list_type ( name , obj , * args ) else : if obj is None : return if not isinstance ( obj , list ) : raise TypeError ( self . __class__ . __name__ + '.' + name + ' contains value of type ' + type ( obj ) . __name__ + ' where a list is expected' ) for sub_obj in obj : self . _validate_nested_list_type ( name , sub_obj , nested_level - 1 , * args )
def create ( gandi , domain , zone_id , name , type , value , ttl ) : """Create new DNS zone record entry for a domain ."""
if not zone_id : result = gandi . domain . info ( domain ) zone_id = result [ 'zone_id' ] if not zone_id : gandi . echo ( 'No zone records found, domain %s doesn\'t seems to be ' 'managed at Gandi.' % domain ) return record = { 'type' : type , 'name' : name , 'value' : value } if ttl : record [ 'ttl' ] = ttl result = gandi . record . create ( zone_id , record ) return result
def _get_attributes ( self , path ) : """: param path : filepath within fast5 : return : dictionary of attributes found at ` ` path ` ` : rtype dict"""
path_grp = self . handle [ path ] path_attr = path_grp . attrs return dict ( path_attr )
def set_from_json ( self , obj , json , models = None , setter = None ) : '''Sets the value of this property from a JSON value . This method first Args : obj ( HasProps ) : json ( JSON - dict ) : models ( seq [ Model ] , optional ) : setter ( ClientSession or ServerSession or None , optional ) : This is used to prevent " boomerang " updates to Bokeh apps . ( default : None ) In the context of a Bokeh server application , incoming updates to properties will be annotated with the session that is doing the updating . This value is propagated through any subsequent change notifications that the update triggers . The session can compare the event setter to itself , and suppress any updates that originate from itself . Returns : None'''
if isinstance ( json , dict ) : # we want to try to keep the " format " of the data spec as string , dict , or number , # assuming the serialized dict is compatible with that . old = getattr ( obj , self . name ) if old is not None : try : self . property . _type . validate ( old , False ) if 'value' in json : json = json [ 'value' ] except ValueError : if isinstance ( old , string_types ) and 'field' in json : json = json [ 'field' ] # leave it as a dict if ' old ' was a dict super ( DataSpecPropertyDescriptor , self ) . set_from_json ( obj , json , models , setter )
def parseURIReference ( self , str ) : """Parse an URI reference string based on RFC 3986 and fills in the appropriate fields of the @ uri structure URI - reference = URI / relative - ref"""
ret = libxml2mod . xmlParseURIReference ( self . _o , str ) return ret
def to_internal_value_single ( self , data , serializer ) : """Return the underlying object , given the serialized form ."""
related_model = serializer . Meta . model if isinstance ( data , related_model ) : return data try : instance = related_model . objects . get ( pk = data ) except related_model . DoesNotExist : raise ValidationError ( "Invalid value for '%s': %s object with ID=%s not found" % ( self . field_name , related_model . __name__ , data ) ) return instance
def workerScript ( jobStore , config , jobName , jobStoreID , redirectOutputToLogFile = True ) : """Worker process script , runs a job . : param str jobName : The " job name " ( a user friendly name ) of the job to be run : param str jobStoreLocator : Specifies the job store to use : param str jobStoreID : The job store ID of the job to be run : param bool redirectOutputToLogFile : Redirect standard out and standard error to a log file"""
logging . basicConfig ( ) setLogLevel ( config . logLevel ) # Create the worker killer , if requested logFileByteReportLimit = config . maxLogFileSize if config . badWorker > 0 and random . random ( ) < config . badWorker : # We need to kill the process we are currently in , to simulate worker # failure . We don ' t want to just send SIGKILL , because we can ' t tell # that from a legitimate OOM on our CI runner . We ' re going to send # SIGUSR1 so our terminations are distinctive , and then SIGKILL if that # didn ' t stick . We definitely don ' t want to do this from * within * the # process we are trying to kill , so we fork off . TODO : We can still # leave the killing code running after the main Toil flow is done , but # since it ' s now in a process instead of a thread , the main Python # process won ' t wait around for its timeout to expire . I think this is # better than the old thread - based way where all of Toil would wait # around to be killed . killTarget = os . getpid ( ) sleepTime = config . badWorkerFailInterval * random . random ( ) if os . fork ( ) == 0 : # We are the child # Let the parent run some amount of time time . sleep ( sleepTime ) # Kill it gently os . kill ( killTarget , signal . SIGUSR1 ) # Wait for that to stick time . sleep ( 0.01 ) try : # Kill it harder . Hope the PID hasn ' t already been reused . # If we succeeded the first time , this will OSError os . kill ( killTarget , signal . SIGKILL ) except OSError : pass # Exit without doing any of Toil ' s cleanup os . _exit ( ) # We don ' t need to reap the child . Either it kills us , or we finish # before it does . Either way , init will have to clean it up for us . # Load the environment for the jobGraph # First load the environment for the jobGraph . with jobStore . readSharedFileStream ( "environment.pickle" ) as fileHandle : environment = safeUnpickleFromStream ( fileHandle ) for i in environment : if i not in ( "TMPDIR" , "TMP" , "HOSTNAME" , "HOSTTYPE" ) : os . environ [ i ] = environment [ i ] # sys . path is used by _ _ import _ _ to find modules if "PYTHONPATH" in environment : for e in environment [ "PYTHONPATH" ] . split ( ':' ) : if e != '' : sys . path . append ( e ) toilWorkflowDir = Toil . getWorkflowDir ( config . workflowID , config . workDir ) # Setup the temporary directories . # Dir to put all this worker ' s temp files in . localWorkerTempDir = tempfile . mkdtemp ( dir = toilWorkflowDir ) os . chmod ( localWorkerTempDir , 0o755 ) # Setup the logging # This is mildly tricky because we don ' t just want to # redirect stdout and stderr for this Python process ; we want to redirect it # for this process and all children . Consequently , we can ' t just replace # sys . stdout and sys . stderr ; we need to mess with the underlying OS - level # file descriptors . See < http : / / stackoverflow . com / a / 11632982/402891 > # When we start , standard input is file descriptor 0 , standard output is # file descriptor 1 , and standard error is file descriptor 2. # What file do we want to point FDs 1 and 2 to ? tempWorkerLogPath = os . path . join ( localWorkerTempDir , "worker_log.txt" ) if redirectOutputToLogFile : # Announce that we are redirecting logging , and where it will now go . # This is important if we are trying to manually trace a faulty worker invocation . logger . info ( "Redirecting logging to %s" , tempWorkerLogPath ) sys . stdout . flush ( ) sys . stderr . flush ( ) # Save the original stdout and stderr ( by opening new file descriptors # to the same files ) origStdOut = os . dup ( 1 ) origStdErr = os . dup ( 2 ) # Open the file to send stdout / stderr to . logFh = os . open ( tempWorkerLogPath , os . O_WRONLY | os . O_CREAT | os . O_APPEND ) # Replace standard output with a descriptor for the log file os . dup2 ( logFh , 1 ) # Replace standard error with a descriptor for the log file os . dup2 ( logFh , 2 ) # Since we only opened the file once , all the descriptors duped from # the original will share offset information , and won ' t clobber each # others ' writes . See < http : / / stackoverflow . com / a / 5284108/402891 > . This # shouldn ' t matter , since O _ APPEND seeks to the end of the file before # every write , but maybe there ' s something odd going on . . . # Close the descriptor we used to open the file os . close ( logFh ) debugging = logging . getLogger ( ) . isEnabledFor ( logging . DEBUG ) # Worker log file trapped from here on in workerFailed = False statsDict = MagicExpando ( ) statsDict . jobs = [ ] statsDict . workers . logsToMaster = [ ] blockFn = lambda : True listOfJobs = [ jobName ] job = None try : # Put a message at the top of the log , just to make sure it ' s working . logger . info ( "---TOIL WORKER OUTPUT LOG---" ) sys . stdout . flush ( ) logProcessContext ( config ) # Load the jobGraph jobGraph = jobStore . load ( jobStoreID ) listOfJobs [ 0 ] = str ( jobGraph ) logger . debug ( "Parsed job wrapper" ) # Cleanup from any earlier invocation of the jobGraph if jobGraph . command == None : logger . debug ( "Wrapper has no user job to run." ) # Cleanup jobs already finished f = lambda jobs : [ z for z in [ [ y for y in x if jobStore . exists ( y . jobStoreID ) ] for x in jobs ] if len ( z ) > 0 ] jobGraph . stack = f ( jobGraph . stack ) jobGraph . services = f ( jobGraph . services ) logger . debug ( "Cleaned up any references to completed successor jobs" ) # This cleans the old log file which may # have been left if the job is being retried after a job failure . oldLogFile = jobGraph . logJobStoreFileID if oldLogFile != None : jobGraph . logJobStoreFileID = None jobStore . update ( jobGraph ) # Update first , before deleting any files jobStore . deleteFile ( oldLogFile ) # If a checkpoint exists , restart from the checkpoint # The job is a checkpoint , and is being restarted after previously completing if jobGraph . checkpoint != None : logger . debug ( "Job is a checkpoint" ) # If the checkpoint still has extant jobs in its # ( flattened ) stack and services , its subtree didn ' t # complete properly . We handle the restart of the # checkpoint here , removing its previous subtree . if len ( [ i for l in jobGraph . stack for i in l ] ) > 0 or len ( jobGraph . services ) > 0 : logger . debug ( "Checkpoint has failed." ) # Reduce the retry count assert jobGraph . remainingRetryCount >= 0 jobGraph . remainingRetryCount = max ( 0 , jobGraph . remainingRetryCount - 1 ) jobGraph . restartCheckpoint ( jobStore ) # Otherwise , the job and successors are done , and we can cleanup stuff we couldn ' t clean # because of the job being a checkpoint else : logger . debug ( "The checkpoint jobs seems to have completed okay, removing any checkpoint files to delete." ) # Delete any remnant files list ( map ( jobStore . deleteFile , list ( filter ( jobStore . fileExists , jobGraph . checkpointFilesToDelete ) ) ) ) # Setup the stats , if requested if config . stats : startClock = getTotalCpuTime ( ) startTime = time . time ( ) while True : # Run the jobGraph , if there is one if jobGraph . command is not None : assert jobGraph . command . startswith ( "_toil " ) logger . debug ( "Got a command to run: %s" % jobGraph . command ) # Load the job job = Job . _loadJob ( jobGraph . command , jobStore ) # If it is a checkpoint job , save the command if job . checkpoint : jobGraph . checkpoint = jobGraph . command # Create a fileStore object for the job fileStore = FileStore . createFileStore ( jobStore , jobGraph , localWorkerTempDir , blockFn , caching = not config . disableCaching ) with job . _executor ( jobGraph = jobGraph , stats = statsDict if config . stats else None , fileStore = fileStore ) : with fileStore . open ( job ) : # Get the next block function and list that will contain any messages blockFn = fileStore . _blockFn job . _runner ( jobGraph = jobGraph , jobStore = jobStore , fileStore = fileStore ) # Accumulate messages from this job & any subsequent chained jobs statsDict . workers . logsToMaster += fileStore . loggingMessages else : # The command may be none , in which case # the jobGraph is either a shell ready to be deleted or has # been scheduled after a failure to cleanup logger . debug ( "No user job to run, so finishing" ) break if FileStore . _terminateEvent . isSet ( ) : raise RuntimeError ( "The termination flag is set" ) # Establish if we can run another jobGraph within the worker successorJobGraph = nextChainableJobGraph ( jobGraph , jobStore ) if successorJobGraph is None or config . disableChaining : # Can ' t chain any more jobs . break # We have a single successor job that is not a checkpoint job . # We transplant the successor jobGraph command and stack # into the current jobGraph object so that it can be run # as if it were a command that were part of the current jobGraph . # We can then delete the successor jobGraph in the jobStore , as it is # wholly incorporated into the current jobGraph . # add the successor to the list of jobs run listOfJobs . append ( str ( successorJobGraph ) ) # Clone the jobGraph and its stack jobGraph = copy . deepcopy ( jobGraph ) # Remove the successor jobGraph jobGraph . stack . pop ( ) # Transplant the command and stack to the current jobGraph jobGraph . command = successorJobGraph . command jobGraph . stack += successorJobGraph . stack # include some attributes for better identification of chained jobs in # logging output jobGraph . unitName = successorJobGraph . unitName jobGraph . jobName = successorJobGraph . jobName assert jobGraph . memory >= successorJobGraph . memory assert jobGraph . cores >= successorJobGraph . cores # Build a fileStore to update the job fileStore = FileStore . createFileStore ( jobStore , jobGraph , localWorkerTempDir , blockFn , caching = not config . disableCaching ) # Update blockFn blockFn = fileStore . _blockFn # Add successorJobGraph to those to be deleted fileStore . jobsToDelete . add ( successorJobGraph . jobStoreID ) # This will update the job once the previous job is done fileStore . _updateJobWhenDone ( ) # Clone the jobGraph and its stack again , so that updates to it do # not interfere with this update jobGraph = copy . deepcopy ( jobGraph ) logger . debug ( "Starting the next job" ) # Finish up the stats if config . stats : totalCPUTime , totalMemoryUsage = getTotalCpuTimeAndMemoryUsage ( ) statsDict . workers . time = str ( time . time ( ) - startTime ) statsDict . workers . clock = str ( totalCPUTime - startClock ) statsDict . workers . memory = str ( totalMemoryUsage ) # log the worker log path here so that if the file is truncated the path can still be found if redirectOutputToLogFile : logger . info ( "Worker log can be found at %s. Set --cleanWorkDir to retain this log" , localWorkerTempDir ) logger . info ( "Finished running the chain of jobs on this node, we ran for a total of %f seconds" , time . time ( ) - startTime ) # Trapping where worker goes wrong except : # Case that something goes wrong in worker traceback . print_exc ( ) logger . error ( "Exiting the worker because of a failed job on host %s" , socket . gethostname ( ) ) FileStore . _terminateEvent . set ( ) # Wait for the asynchronous chain of writes / updates to finish blockFn ( ) # All the asynchronous worker / update threads must be finished now , # so safe to test if they completed okay if FileStore . _terminateEvent . isSet ( ) : jobGraph = jobStore . load ( jobStoreID ) jobGraph . setupJobAfterFailure ( config ) workerFailed = True if job and jobGraph . remainingRetryCount == 0 : job . _succeeded = False # Cleanup # Close the worker logging # Flush at the Python level sys . stdout . flush ( ) sys . stderr . flush ( ) if redirectOutputToLogFile : # Flush at the OS level os . fsync ( 1 ) os . fsync ( 2 ) # Close redirected stdout and replace with the original standard output . os . dup2 ( origStdOut , 1 ) # Close redirected stderr and replace with the original standard error . os . dup2 ( origStdErr , 2 ) # sys . stdout and sys . stderr don ' t need to be modified at all . We don ' t # need to call redirectLoggerStreamHandlers since they still log to # sys . stderr # Close our extra handles to the original standard output and standard # error streams , so we don ' t leak file handles . os . close ( origStdOut ) os . close ( origStdErr ) # Now our file handles are in exactly the state they were in before . # Copy back the log file to the global dir , if needed if workerFailed and redirectOutputToLogFile : jobGraph . logJobStoreFileID = jobStore . getEmptyFileStoreID ( jobGraph . jobStoreID ) jobGraph . chainedJobs = listOfJobs with jobStore . updateFileStream ( jobGraph . logJobStoreFileID ) as w : with open ( tempWorkerLogPath , "r" ) as f : if os . path . getsize ( tempWorkerLogPath ) > logFileByteReportLimit != 0 : if logFileByteReportLimit > 0 : f . seek ( - logFileByteReportLimit , 2 ) # seek to last tooBig bytes of file elif logFileByteReportLimit < 0 : f . seek ( logFileByteReportLimit , 0 ) # seek to first tooBig bytes of file w . write ( f . read ( ) . encode ( 'utf-8' ) ) # TODO load file using a buffer jobStore . update ( jobGraph ) elif debugging and redirectOutputToLogFile : # write log messages with open ( tempWorkerLogPath , 'r' ) as logFile : if os . path . getsize ( tempWorkerLogPath ) > logFileByteReportLimit != 0 : if logFileByteReportLimit > 0 : logFile . seek ( - logFileByteReportLimit , 2 ) # seek to last tooBig bytes of file elif logFileByteReportLimit < 0 : logFile . seek ( logFileByteReportLimit , 0 ) # seek to first tooBig bytes of file logMessages = logFile . read ( ) . splitlines ( ) statsDict . logs . names = listOfJobs statsDict . logs . messages = logMessages if ( debugging or config . stats or statsDict . workers . logsToMaster ) and not workerFailed : # We have stats / logging to report back jobStore . writeStatsAndLogging ( json . dumps ( statsDict , ensure_ascii = True ) ) # Remove the temp dir cleanUp = config . cleanWorkDir if cleanUp == 'always' or ( cleanUp == 'onSuccess' and not workerFailed ) or ( cleanUp == 'onError' and workerFailed ) : shutil . rmtree ( localWorkerTempDir ) # This must happen after the log file is done with , else there is no place to put the log if ( not workerFailed ) and jobGraph . command == None and len ( jobGraph . stack ) == 0 and len ( jobGraph . services ) == 0 : # We can now safely get rid of the jobGraph jobStore . delete ( jobGraph . jobStoreID )
def raw_update ( self , attributes = None ) : """Run a raw update against the base query . : type attributes : dict : rtype : int"""
if attributes is None : attributes = { } if self . _query is not None : return self . _query . update ( attributes )
def _handle_remark_msg ( self , msg ) : """Try to parse the message provided with the remark tag or element . : param str msg : The message : raises overpy . exception . OverpassRuntimeError : If message starts with ' runtime error : ' : raises overpy . exception . OverpassRuntimeRemark : If message starts with ' runtime remark : ' : raises overpy . exception . OverpassUnknownError : If we are unable to identify the error"""
msg = msg . strip ( ) if msg . startswith ( "runtime error:" ) : raise exception . OverpassRuntimeError ( msg = msg ) elif msg . startswith ( "runtime remark:" ) : raise exception . OverpassRuntimeRemark ( msg = msg ) raise exception . OverpassUnknownError ( msg = msg )
def write_handle ( self , handle : int , value : bytes ) : """Write a handle to the device ."""
if not self . is_connected ( ) : raise BluetoothBackendException ( 'Not connected to device!' ) self . _device . char_write_handle ( handle , value , True ) return True
def parse ( self , s ) : """Parse reaction string ."""
global_comp = None if self . _parse_global : # Split by colon for global compartment information m = re . match ( r'^\s*\[(\w+)\]\s*:\s*(.*)' , s ) if m is not None : global_comp = m . group ( 1 ) s = m . group ( 2 ) expect_operator = False direction = None left = [ ] right = [ ] current_side = left saved_token = None def tokenize ( ) : for match in re . finditer ( self . _scanner , s ) : for i , group in enumerate ( match . groups ( ) ) : if group is not None : token = self . _tokens [ i ] [ 1 ] span = match . start ( ) , match . end ( ) if token is None : raise ParseError ( 'Invalid token in expression string:' ' {!r}' . format ( group ) , span = span ) yield self . _tokens [ i ] [ 1 ] , group , span break for token , value , span in tokenize ( ) : # Handle partially parsed compound . if saved_token is not None and ( token in ( _ReactionToken . Plus , _ReactionToken . Arrow , _ReactionToken . End ) ) : compound = parse_compound ( saved_token , global_comp ) current_side . append ( ( compound , 1 ) ) expect_operator = True saved_token = None if token == _ReactionToken . Plus : # Compound separator . Expect compound name or compound count # next . if not expect_operator : raise ParseError ( 'Unexpected token: {!r}' . format ( value ) , span = span ) expect_operator = False elif token == _ReactionToken . Arrow : # Reaction arrow . Expect compound name or compound count next . if direction is not None : raise ParseError ( 'More than one equation arrow: {!r}' . format ( value ) , span = span ) if not expect_operator and len ( left ) > 0 : raise ParseError ( 'Unexpected token: {!r}' . format ( value ) , span = span ) expect_operator = False direction = self . _arrows [ value ] current_side = right elif token == _ReactionToken . Group : # Compound count . Expect compound name next . if expect_operator : raise ParseError ( 'Expected plus or arrow: {!r}' . format ( value ) , span = span ) if saved_token is not None : raise ParseError ( 'Expected compound name: {!r}' . format ( value ) , span = span ) saved_token = value elif token == _ReactionToken . Quoted : # Compound name . Expect operator next . if expect_operator : raise ParseError ( 'Expected plus or arrow: {!r}' . format ( value ) , span = span ) if saved_token is not None : try : count = parse_compound_count ( saved_token ) except ValueError as e : raise_from ( ParseError ( 'Unable to parse compound count: {!r}' . format ( saved_token ) , span = span ) , e ) else : count = 1 compound = parse_compound ( value , global_comp ) current_side . append ( ( compound , count ) ) expect_operator = True saved_token = None elif token == _ReactionToken . Other : # Could be count or compound name . Store and expect other , # quoted , operator or end . if expect_operator : raise ParseError ( 'Expected plus or arrow: {!r}' . format ( value ) , span = span ) if saved_token is not None : try : count = parse_compound_count ( saved_token ) except ValueError as e : raise_from ( ParseError ( 'Unable to parse compound count: {!r}' . format ( saved_token ) , span = span ) , e ) compound = parse_compound ( value , global_comp ) current_side . append ( ( compound , count ) ) expect_operator = True saved_token = None else : saved_token = value if direction is None : raise ParseError ( 'Missing equation arrow' ) return Reaction ( direction , left , right )
def play ( self ) : """Change state to playing ."""
if self . state == STATE_PAUSED : self . _player . set_state ( Gst . State . PLAYING ) self . state = STATE_PLAYING
def _read_header ( self ) : """Reads the message header from the input stream . : returns : tuple containing deserialized header and header _ auth objects : rtype : tuple of aws _ encryption _ sdk . structures . MessageHeader and aws _ encryption _ sdk . internal . structures . MessageHeaderAuthentication : raises CustomMaximumValueExceeded : if frame length is greater than the custom max value"""
header , raw_header = deserialize_header ( self . source_stream ) self . __unframed_bytes_read += len ( raw_header ) if ( self . config . max_body_length is not None and header . content_type == ContentType . FRAMED_DATA and header . frame_length > self . config . max_body_length ) : raise CustomMaximumValueExceeded ( "Frame Size in header found larger than custom value: {found:d} > {custom:d}" . format ( found = header . frame_length , custom = self . config . max_body_length ) ) decrypt_materials_request = DecryptionMaterialsRequest ( encrypted_data_keys = header . encrypted_data_keys , algorithm = header . algorithm , encryption_context = header . encryption_context , ) decryption_materials = self . config . materials_manager . decrypt_materials ( request = decrypt_materials_request ) if decryption_materials . verification_key is None : self . verifier = None else : self . verifier = Verifier . from_key_bytes ( algorithm = header . algorithm , key_bytes = decryption_materials . verification_key ) if self . verifier is not None : self . verifier . update ( raw_header ) header_auth = deserialize_header_auth ( stream = self . source_stream , algorithm = header . algorithm , verifier = self . verifier ) self . _derived_data_key = derive_data_encryption_key ( source_key = decryption_materials . data_key . data_key , algorithm = header . algorithm , message_id = header . message_id ) validate_header ( header = header , header_auth = header_auth , raw_header = raw_header , data_key = self . _derived_data_key ) return header , header_auth
def new ( self , name ) : """Build a stub migration with name + auto - id in config [ ' migration _ home ' ] There is no guarantee this id will be unique for all remote migration configurations . Conflicts will require manual management ."""
# XXX : dc : assert that the name is somewhat sane and follows python # naming conventions next_id = 0 cls_name = '_' . join ( ( name , next_id ) ) with open ( pjoin ( self . migration_home , name ) , "w+" ) as new_migration : print >> new_migration , migration_tmpl . format ( cls_name = cls_name , migration_name = name )
def plot_grid ( images , slices = None , axes = 2 , # general figure arguments figsize = 1. , rpad = 0 , cpad = 0 , # title arguments title = None , tfontsize = 20 , title_dx = 0 , title_dy = 0 , # row arguments rlabels = None , rfontsize = 14 , rfontcolor = 'white' , rfacecolor = 'black' , # column arguments clabels = None , cfontsize = 14 , cfontcolor = 'white' , cfacecolor = 'black' , # save arguments filename = None , dpi = 400 , transparent = True , # other args ** kwargs ) : """Plot a collection of images in an arbitrarily - defined grid Matplotlib named colors : https : / / matplotlib . org / examples / color / named _ colors . html Arguments images : list of ANTsImage types image ( s ) to plot . if one image , this image will be used for all grid locations . if multiple images , they should be arrange in a list the same shape as the ` gridsize ` argument . slices : integer or list of integers slice indices to plot if one integer , this slice index will be used for all images if multiple integers , they should be arranged in a list the same shape as the ` gridsize ` argument axes : integer or list of integers axis or axes along which to plot image slices if one integer , this axis will be used for all images if multiple integers , they should be arranged in a list the same shape as the ` gridsize ` argument Example > > > import ants > > > import numpy as np > > > mni1 = ants . image _ read ( ants . get _ data ( ' mni ' ) ) > > > mni2 = mni1 . smooth _ image ( 1 . ) > > > mni3 = mni1 . smooth _ image ( 2 . ) > > > mni4 = mni1 . smooth _ image ( 3 . ) > > > images = np . asarray ( [ [ mni1 , mni2 ] , . . . [ mni3 , mni4 ] ] ) > > > slices = np . asarray ( [ [ 100 , 100 ] , . . . [ 100 , 100 ] ] ) > > > # axes = np . asarray ( [ [ 2,2 ] , [ 2,2 ] ] ) > > > # standard plotting > > > ants . plot _ grid ( images = images , slices = slices , title = ' 2x2 Grid ' ) > > > ants . plot _ grid ( images . reshape ( 1,4 ) , slices . reshape ( 1,4 ) , title = ' 1x4 Grid ' ) > > > ants . plot _ grid ( images . reshape ( 4,1 ) , slices . reshape ( 4,1 ) , title = ' 4x1 Grid ' ) > > > # Padding between rows and / or columns > > > ants . plot _ grid ( images , slices , cpad = 0.02 , title = ' Col Padding ' ) > > > ants . plot _ grid ( images , slices , rpad = 0.02 , title = ' Row Padding ' ) > > > ants . plot _ grid ( images , slices , rpad = 0.02 , cpad = 0.02 , title = ' Row and Col Padding ' ) > > > # Adding plain row and / or column labels > > > ants . plot _ grid ( images , slices , title = ' Adding Row Labels ' , rlabels = [ ' Row # 1 ' , ' Row # 2 ' ] ) > > > ants . plot _ grid ( images , slices , title = ' Adding Col Labels ' , clabels = [ ' Col # 1 ' , ' Col # 2 ' ] ) > > > ants . plot _ grid ( images , slices , title = ' Row and Col Labels ' , rlabels = [ ' Row 1 ' , ' Row 2 ' ] , clabels = [ ' Col 1 ' , ' Col 2 ' ] ) > > > # Making a publication - quality image > > > images = np . asarray ( [ [ mni1 , mni2 , mni2 ] , . . . [ mni3 , mni4 , mni4 ] ] ) > > > slices = np . asarray ( [ [ 100 , 100 , 100 ] , . . . [ 100 , 100 , 100 ] ] ) > > > axes = np . asarray ( [ [ 0 , 1 , 2 ] , [0 , 1 , 2 ] ] ) > > > ants . plot _ grid ( images , slices , axes , title = ' Publication Figures with ANTsPy ' , tfontsize = 20 , title _ dy = 0.03 , title _ dx = - 0.04, rlabels = [ ' Row 1 ' , ' Row 2 ' ] , clabels = [ ' Col 1 ' , ' Col 2 ' , ' Col 3 ' ] , rfontsize = 16 , cfontsize = 16)"""
def mirror_matrix ( x ) : return x [ : : - 1 , : ] def rotate270_matrix ( x ) : return mirror_matrix ( x . T ) def rotate180_matrix ( x ) : return x [ : : - 1 , : ] def rotate90_matrix ( x ) : return mirror_matrix ( x ) . T def flip_matrix ( x ) : return mirror_matrix ( rotate180_matrix ( x ) ) def reorient_slice ( x , axis ) : if ( axis != 1 ) : x = rotate90_matrix ( x ) if ( axis == 1 ) : x = rotate90_matrix ( x ) x = mirror_matrix ( x ) return x def slice_image ( img , axis , idx ) : if axis == 0 : return img [ idx , : , : ] elif axis == 1 : return img [ : , idx , : ] elif axis == 2 : return img [ : , : , idx ] elif axis == - 1 : return img [ : , : , idx ] elif axis == - 2 : return img [ : , idx , : ] elif axis == - 3 : return img [ idx , : , : ] else : raise ValueError ( 'axis %i not valid' % axis ) if isinstance ( images , np . ndarray ) : images = images . tolist ( ) if not isinstance ( images , list ) : raise ValueError ( 'images argument must be of type list' ) if not isinstance ( images [ 0 ] , list ) : images = [ images ] if isinstance ( slices , int ) : one_slice = True if isinstance ( slices , np . ndarray ) : slices = slices . tolist ( ) if isinstance ( slices , list ) : one_slice = False if not isinstance ( slices [ 0 ] , list ) : slices = [ slices ] nslicerow = len ( slices ) nslicecol = len ( slices [ 0 ] ) nrow = len ( images ) ncol = len ( images [ 0 ] ) if rlabels is None : rlabels = [ None ] * nrow if clabels is None : clabels = [ None ] * ncol if ( not one_slice ) : if ( nrow != nslicerow ) or ( ncol != nslicecol ) : raise ValueError ( '`images` arg shape (%i,%i) must equal `slices` arg shape (%i,%i)!' % ( nrow , ncol , nslicerow , nslicecol ) ) fig = plt . figure ( figsize = ( ( ncol + 1 ) * 2.5 * figsize , ( nrow + 1 ) * 2.5 * figsize ) ) if title is not None : basex = 0.5 basey = 0.9 if clabels [ 0 ] is None else 0.95 fig . suptitle ( title , fontsize = tfontsize , x = basex + title_dx , y = basey + title_dy ) if ( cpad > 0 ) and ( rpad > 0 ) : bothgridpad = max ( cpad , rpad ) cpad = 0 rpad = 0 else : bothgridpad = 0.0 gs = gridspec . GridSpec ( nrow , ncol , wspace = bothgridpad , hspace = 0.0 , top = 1. - 0.5 / ( nrow + 1 ) , bottom = 0.5 / ( nrow + 1 ) + cpad , left = 0.5 / ( ncol + 1 ) + rpad , right = 1 - 0.5 / ( ncol + 1 ) ) for rowidx in range ( nrow ) : for colidx in range ( ncol ) : ax = plt . subplot ( gs [ rowidx , colidx ] ) if colidx == 0 : if rlabels [ rowidx ] is not None : bottom , height = .25 , .5 top = bottom + height # add label text ax . text ( - 0.07 , 0.5 * ( bottom + top ) , rlabels [ rowidx ] , horizontalalignment = 'right' , verticalalignment = 'center' , rotation = 'vertical' , transform = ax . transAxes , color = rfontcolor , fontsize = rfontsize ) # add label background extra = 0.3 if rowidx == 0 else 0.0 rect = patches . Rectangle ( ( - 0.3 , 0 ) , 0.3 , 1.0 + extra , facecolor = rfacecolor , alpha = 1. , transform = ax . transAxes , clip_on = False ) ax . add_patch ( rect ) if rowidx == 0 : if clabels [ colidx ] is not None : bottom , height = .25 , .5 left , width = .25 , .5 right = left + width top = bottom + height ax . text ( 0.5 * ( left + right ) , 0.09 + top + bottom , clabels [ colidx ] , horizontalalignment = 'center' , verticalalignment = 'center' , rotation = 'horizontal' , transform = ax . transAxes , color = cfontcolor , fontsize = cfontsize ) # add label background rect = patches . Rectangle ( ( 0 , 1. ) , 1.0 , 0.3 , facecolor = cfacecolor , alpha = 1. , transform = ax . transAxes , clip_on = False ) ax . add_patch ( rect ) tmpimg = images [ rowidx ] [ colidx ] if isinstance ( axes , int ) : tmpaxis = axes else : tmpaxis = axes [ rowidx ] [ colidx ] sliceidx = slices [ rowidx ] [ colidx ] if not one_slice else slices tmpslice = slice_image ( tmpimg , tmpaxis , sliceidx ) tmpslice = reorient_slice ( tmpslice , tmpaxis ) ax . imshow ( tmpslice , cmap = 'Greys_r' , aspect = 'auto' ) ax . axis ( 'off' ) if filename is not None : filename = os . path . expanduser ( filename ) plt . savefig ( filename , dpi = dpi , transparent = transparent , bbox_inches = 'tight' ) plt . close ( fig ) else : plt . show ( )
def _endless_page ( self , number , label = None ) : """Factory function that returns a * EndlessPage * instance . This method works just like a partial constructor ."""
return EndlessPage ( self . _request , number , self . _page . number , len ( self ) , self . _querystring_key , label = label , default_number = self . _default_number , override_path = self . _override_path , )
def rate_url ( obj , score = 1 ) : """Generates a link to " rate " the given object with the provided score - this can be used as a form target or for POSTing via Ajax ."""
return reverse ( 'ratings_rate_object' , args = ( ContentType . objects . get_for_model ( obj ) . pk , obj . pk , score , ) )
def get_validators_description ( view ) : """Returns validators description in format : # # # Validators : * validator1 name * validator1 docstring * validator2 name * validator2 docstring"""
action = getattr ( view , 'action' , None ) if action is None : return '' description = '' validators = getattr ( view , action + '_validators' , [ ] ) for validator in validators : validator_description = get_entity_description ( validator ) description += '\n' + validator_description if description else validator_description return '### Validators:\n' + description if description else ''
def render_to_string ( self , template_file , context ) : """Render given template to string and add object to context"""
context = context if context else { } if self . object : context [ 'object' ] = self . object context [ self . object . __class__ . __name__ . lower ( ) ] = self . object return render_to_string ( template_file , context , self . request )
def set_current_language ( self , language_code , initialize = False ) : """Switch the currently activate language of the object ."""
self . _current_language = normalize_language_code ( language_code or get_language ( ) ) # Ensure the translation is present for _ _ get _ _ queries . if initialize : self . _get_translated_model ( use_fallback = False , auto_create = True )
def uncertainty_timescales ( self ) : """Estimate of the element - wise asymptotic standard deviation in the model relaxation timescales ."""
if self . information_ is None : self . _build_information ( ) sigma_timescales = _ratematrix . sigma_timescales ( self . information_ , theta = self . theta_ , n = self . n_states_ ) if self . n_timescales is None : return sigma_timescales return sigma_timescales [ : self . n_timescales ]
def submit_and_connect ( self , spec ) : """Submit a new skein application , and wait to connect to it . If an error occurs before the application connects , the application is killed . Parameters spec : ApplicationSpec , str , or dict A description of the application to run . Can be an ` ` ApplicationSpec ` ` object , a path to a yaml / json file , or a dictionary description of an application specification . Returns app _ client : ApplicationClient"""
spec = ApplicationSpec . _from_any ( spec ) app_id = self . submit ( spec ) try : return self . connect ( app_id , security = spec . master . security ) except BaseException : self . kill_application ( app_id ) raise
def write_cache_decorator ( self , node_or_pagetag , name , args , buffered , identifiers , inline = False , toplevel = False ) : """write a post - function decorator to replace a rendering callable with a cached version of itself ."""
self . printer . writeline ( "__M_%s = %s" % ( name , name ) ) cachekey = node_or_pagetag . parsed_attributes . get ( 'cache_key' , repr ( name ) ) cache_args = { } if self . compiler . pagetag is not None : cache_args . update ( ( pa [ 6 : ] , self . compiler . pagetag . parsed_attributes [ pa ] ) for pa in self . compiler . pagetag . parsed_attributes if pa . startswith ( 'cache_' ) and pa != 'cache_key' ) cache_args . update ( ( pa [ 6 : ] , node_or_pagetag . parsed_attributes [ pa ] ) for pa in node_or_pagetag . parsed_attributes if pa . startswith ( 'cache_' ) and pa != 'cache_key' ) if 'timeout' in cache_args : cache_args [ 'timeout' ] = int ( eval ( cache_args [ 'timeout' ] ) ) self . printer . writeline ( "def %s(%s):" % ( name , ',' . join ( args ) ) ) # form " arg1 , arg2 , arg3 = arg3 , arg4 = arg4 " , etc . pass_args = [ "%s=%s" % ( ( a . split ( '=' ) [ 0 ] , ) * 2 ) if '=' in a else a for a in args ] self . write_variable_declares ( identifiers , toplevel = toplevel , limit = node_or_pagetag . undeclared_identifiers ( ) ) if buffered : s = "context.get('local')." "cache._ctx_get_or_create(" "%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % ( cachekey , name , ',' . join ( pass_args ) , '' . join ( [ "%s=%s, " % ( k , v ) for k , v in cache_args . items ( ) ] ) , name ) # apply buffer _ filters s = self . create_filter_callable ( self . compiler . buffer_filters , s , False ) self . printer . writelines ( "return " + s , None ) else : self . printer . writelines ( "__M_writer(context.get('local')." "cache._ctx_get_or_create(" "%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" % ( cachekey , name , ',' . join ( pass_args ) , '' . join ( [ "%s=%s, " % ( k , v ) for k , v in cache_args . items ( ) ] ) , name , ) , "return ''" , None )
def from_string ( cls , public_key ) : """Construct an Verifier instance from a public key or public certificate string . Args : public _ key ( Union [ str , bytes ] ) : The public key in PEM format or the x509 public key certificate . Returns : Verifier : The constructed verifier . Raises : ValueError : If the public key can ' t be parsed ."""
public_key_data = _helpers . to_bytes ( public_key ) if _CERTIFICATE_MARKER in public_key_data : cert = cryptography . x509 . load_pem_x509_certificate ( public_key_data , _BACKEND ) pubkey = cert . public_key ( ) else : pubkey = serialization . load_pem_public_key ( public_key_data , _BACKEND ) return cls ( pubkey )
def load_building_sample_data ( bd ) : """Sample data for the Building object : param bd : : return :"""
number_of_storeys = 6 interstorey_height = 3.4 masses = 40.0e3 # kg bd . interstorey_heights = interstorey_height * np . ones ( number_of_storeys ) bd . floor_length = 18.0 bd . floor_width = 16.0 bd . storey_masses = masses * np . ones ( number_of_storeys )
def parse_event_xml ( self , event_data ) -> dict : """Parse metadata xml ."""
event = { } event_xml = event_data . decode ( ) message = MESSAGE . search ( event_xml ) if not message : return { } event [ EVENT_OPERATION ] = message . group ( EVENT_OPERATION ) topic = TOPIC . search ( event_xml ) if topic : event [ EVENT_TOPIC ] = topic . group ( EVENT_TOPIC ) source = SOURCE . search ( event_xml ) if source : event [ EVENT_SOURCE ] = source . group ( EVENT_SOURCE ) event [ EVENT_SOURCE_IDX ] = source . group ( EVENT_SOURCE_IDX ) data = DATA . search ( event_xml ) if data : event [ EVENT_TYPE ] = data . group ( EVENT_TYPE ) event [ EVENT_VALUE ] = data . group ( EVENT_VALUE ) _LOGGER . debug ( event ) return event
def findAll ( self , tag_name , params = None , fn = None , case_sensitive = False ) : """Search for elements by their parameters using ` Depth - first algorithm < http : / / en . wikipedia . org / wiki / Depth - first _ search > ` _ . Args : tag _ name ( str ) : Name of the tag you are looking for . Set to " " if you wish to use only ` fn ` parameter . params ( dict , default None ) : Parameters which have to be present in tag to be considered matching . fn ( function , default None ) : Use this function to match tags . Function expects one parameter which is HTMLElement instance . case _ sensitive ( bool , default False ) : Use case sensitive search . Returns : list : List of : class : ` HTMLElement ` instances matching your criteria ."""
output = [ ] if self . isAlmostEqual ( tag_name , params , fn , case_sensitive ) : output . append ( self ) tmp = [ ] for el in self . childs : tmp = el . findAll ( tag_name , params , fn , case_sensitive ) if tmp : output . extend ( tmp ) return output
def randbelow ( num : int ) -> int : """Return a random int in the range [ 0 , num ) . Raises ValueError if num < = 0 , and TypeError if it ' s not an integer . > > > randbelow ( 16 ) # doctest : + SKIP 13"""
if not isinstance ( num , int ) : raise TypeError ( 'number must be an integer' ) if num <= 0 : raise ValueError ( 'number must be greater than zero' ) if num == 1 : return 0 # https : / / github . com / python / cpython / blob / 3.6 / Lib / random . py # L223 nbits = num . bit_length ( ) # don ' t use ( n - 1 ) here because n can be 1 randnum = random_randint ( nbits ) # 0 < = randnum < 2 * * nbits while randnum >= num : randnum = random_randint ( nbits ) return randnum
def complete_use ( self , text , * _ ) : """Autocomplete for use"""
return [ t + " " for t in REGIONS if t . startswith ( text ) ]
def clean_process_tree ( * signal_handler_args ) : """Stop all Processes in the current Process tree , recursively ."""
parent = psutil . Process ( ) procs = parent . children ( recursive = True ) if procs : print ( f"[ZProc] Cleaning up {parent.name()!r} ({os.getpid()})..." ) for p in procs : with suppress ( psutil . NoSuchProcess ) : p . terminate ( ) _ , alive = psutil . wait_procs ( procs , timeout = 0.5 ) # 0.5 seems to work for p in alive : with suppress ( psutil . NoSuchProcess ) : p . kill ( ) try : signum = signal_handler_args [ 0 ] except IndexError : pass else : os . _exit ( signum )
def stop ( self ) : """Stops a paused pipeline . This will a trigger a ` ` StopIteration ` ` in the inputs of the pipeline . And retrieve the buffered results . This will stop all ` ` Pipers ` ` and ` ` NuMaps ` ` . Python will not terminate cleanly if a pipeline is running or paused ."""
if self . _started . isSet ( ) and not self . _running . isSet ( ) and not self . _pausing . isSet ( ) : # stops the dagger super ( Plumber , self ) . stop ( ) # disconnects all pipers self . disconnect ( ) self . stats [ 'run_time' ] = time ( ) - self . stats [ 'start_time' ] self . _started . clear ( ) else : raise PlumberError
def _sign_threshold_signature_fulfillment ( cls , input_ , message , key_pairs ) : """Signs a ThresholdSha256. Args : input _ ( : class : ` ~ bigchaindb . common . transaction . Input ` ) The Input to be signed . message ( str ) : The message to be signed key _ pairs ( dict ) : The keys to sign the Transaction with ."""
input_ = deepcopy ( input_ ) message = sha3_256 ( message . encode ( ) ) if input_ . fulfills : message . update ( '{}{}' . format ( input_ . fulfills . txid , input_ . fulfills . output ) . encode ( ) ) for owner_before in set ( input_ . owners_before ) : # TODO : CC should throw a KeypairMismatchException , instead of # our manual mapping here # TODO FOR CC : Naming wise this is not so smart , # ` get _ subcondition ` in fact doesn ' t return a # condition but a fulfillment # TODO FOR CC : ` get _ subcondition ` is singular . One would not # expect to get a list back . ccffill = input_ . fulfillment subffills = ccffill . get_subcondition_from_vk ( base58 . b58decode ( owner_before ) ) if not subffills : raise KeypairMismatchException ( 'Public key {} cannot be found ' 'in the fulfillment' . format ( owner_before ) ) try : private_key = key_pairs [ owner_before ] except KeyError : raise KeypairMismatchException ( 'Public key {} is not a pair ' 'to any of the private keys' . format ( owner_before ) ) # cryptoconditions makes no assumptions of the encoding of the # message to sign or verify . It only accepts bytestrings for subffill in subffills : subffill . sign ( message . digest ( ) , base58 . b58decode ( private_key . encode ( ) ) ) return input_
def appliance_time_and_locale_configuration ( self ) : """Gets the ApplianceTimeAndLocaleConfiguration API client . Returns : ApplianceTimeAndLocaleConfiguration :"""
if not self . __appliance_time_and_locale_configuration : self . __appliance_time_and_locale_configuration = ApplianceTimeAndLocaleConfiguration ( self . __connection ) return self . __appliance_time_and_locale_configuration
def _prepend_name ( self , prefix , dict_ ) : '''changes the keys of the dictionary prepending them with " name . "'''
return dict ( [ '.' . join ( [ prefix , name ] ) , msg ] for name , msg in dict_ . iteritems ( ) )
def on_connect ( self , client , userdata , flags , result_code ) : """Callback when the MQTT client is connected . : param client : the client being connected . : param userdata : unused . : param flags : unused . : param result _ code : result code ."""
self . log_info ( "Connected with result code {}" . format ( result_code ) ) self . state_handler . set_state ( State . welcome )
def parse ( cls , msg ) : """Parse message string to request object ."""
lines = msg . splitlines ( ) method , uri , version = lines [ 0 ] . split ( ) headers = cls . parse_headers ( '\r\n' . join ( lines [ 1 : ] ) ) return cls ( version = version , uri = uri , method = method , headers = headers )
import math def min_steps_to_equate_numbers ( num1 , num2 ) : """Python function to determine the minimum number of operations required to make two numbers equal . Examples : > > > min _ steps _ to _ equate _ numbers ( 2 , 4) > > > min _ steps _ to _ equate _ numbers ( 4 , 10) > > > min _ steps _ to _ equate _ numbers ( 1 , 4)"""
if ( num1 > num2 ) : num1 , num2 = num2 , num1 num2 = num2 // math . gcd ( num1 , num2 ) return num2 - 1
def _assume2point ( ) : """Convert global assumptions to a point ."""
point = dict ( ) for lit in _ASSUMPTIONS : if isinstance ( lit , Complement ) : point [ ~ lit ] = 0 elif isinstance ( lit , Variable ) : point [ lit ] = 1 return point
def add_request_handler ( self , request_handler ) : # type : ( AbstractRequestHandler ) - > None """Register input to the request handlers list . : param request _ handler : Request Handler instance to be registered . : type request _ handler : AbstractRequestHandler : return : None"""
if request_handler is None : raise RuntimeConfigException ( "Valid Request Handler instance to be provided" ) if not isinstance ( request_handler , AbstractRequestHandler ) : raise RuntimeConfigException ( "Input should be a RequestHandler instance" ) self . request_handler_chains . append ( GenericRequestHandlerChain ( request_handler = request_handler ) )
def spawn ( self , command ) : """Spawns a new process and adds it to the pool"""
# process _ name # output # time before starting ( wait for port ? ) # start _ new _ session = True : avoid sending parent signals to child env = dict ( os . environ ) env [ "MRQ_IS_SUBPROCESS" ] = "1" env . update ( self . extra_env or { } ) # Extract env variables from shell commands . parts = shlex . split ( command ) for p in list ( parts ) : if "=" in p : env [ p . split ( "=" ) [ 0 ] ] = p [ len ( p . split ( "=" ) [ 0 ] ) + 1 : ] parts . pop ( 0 ) else : break p = subprocess . Popen ( parts , shell = False , close_fds = True , env = env , cwd = os . getcwd ( ) ) self . processes . append ( { "subprocess" : p , "pid" : p . pid , "command" : command , "psutil" : psutil . Process ( pid = p . pid ) } )