signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def create_translations_model ( shared_model , related_name , meta , ** fields ) : """Dynamically create the translations model . Create the translations model for the shared model ' model ' . : param related _ name : The related name for the reverse FK from the translations model . : param meta : A ( optional ) dictionary of attributes for the translations model ' s inner Meta class . : param fields : A dictionary of fields to put on the translations model . Two fields are enforced on the translations model : language _ code : A 15 char , db indexed field . master : A ForeignKey back to the shared model . Those two fields are unique together ."""
if not meta : meta = { } if shared_model . _meta . abstract : # This can ' t be done , because ` master = ForeignKey ( shared _ model ) ` would fail . raise TypeError ( "Can't create TranslatedFieldsModel for abstract class {0}" . format ( shared_model . __name__ ) ) # Define inner Meta class meta [ 'app_label' ] = shared_model . _meta . app_label meta [ 'db_tablespace' ] = shared_model . _meta . db_tablespace meta [ 'managed' ] = shared_model . _meta . managed meta [ 'unique_together' ] = list ( meta . get ( 'unique_together' , [ ] ) ) + [ ( 'language_code' , 'master' ) ] meta . setdefault ( 'db_table' , '{0}_translation' . format ( shared_model . _meta . db_table ) ) meta . setdefault ( 'verbose_name' , _lazy_verbose_name ( shared_model ) ) # Avoid creating permissions for the translated model , these are not used at all . # This also avoids creating lengthy permission names above 50 chars . meta . setdefault ( 'default_permissions' , ( ) ) # Define attributes for translation table name = str ( '{0}Translation' . format ( shared_model . __name__ ) ) # makes it bytes , for type ( ) attrs = { } attrs . update ( fields ) attrs [ 'Meta' ] = type ( str ( 'Meta' ) , ( object , ) , meta ) attrs [ '__module__' ] = shared_model . __module__ attrs [ 'objects' ] = models . Manager ( ) attrs [ 'master' ] = TranslationsForeignKey ( shared_model , related_name = related_name , editable = False , null = True , on_delete = models . CASCADE ) # Create and return the new model translations_model = TranslatedFieldsModelBase ( name , ( TranslatedFieldsModel , ) , attrs ) # Register it as a global in the shared model ' s module . # This is needed so that Translation model instances , and objects which refer to them , can be properly pickled and unpickled . # The Django session and caching frameworks , in particular , depend on this behaviour . mod = sys . modules [ shared_model . __module__ ] setattr ( mod , name , translations_model ) return translations_model
def weighted_choice ( choices ) : """Returns a value from choices chosen by weighted random selection choices should be a list of ( value , weight ) tuples . eg . weighted _ choice ( [ ( ' val1 ' , 5 ) , ( ' val2 ' , 0.3 ) , ( ' val3 ' , 1 ) ] )"""
values , weights = zip ( * choices ) total = 0 cum_weights = [ ] for w in weights : total += w cum_weights . append ( total ) x = random . uniform ( 0 , total ) i = bisect . bisect ( cum_weights , x ) return values [ i ]
def AFF4AddChild ( self , subject , child , extra_attributes = None ) : """Adds a child to the specified parent ."""
precondition . AssertType ( child , Text ) attributes = { DataStore . AFF4_INDEX_DIR_TEMPLATE % child : [ DataStore . EMPTY_DATA_PLACEHOLDER ] } if extra_attributes : attributes . update ( extra_attributes ) self . MultiSet ( subject , attributes )
def gassist ( self , dg , dt , dt2 , na = None , nodiag = False , memlimit = - 1 ) : """Calculates probability of gene i regulating gene j with genotype data assisted method , with the recommended combination of multiple tests . Probabilities are converted from likelihood ratios separately for each A . This gives better predictions when the number of secondary targets ( dt2 ) is large . ( Check program warnings . ) dg : numpy . ndarray ( nt , ns , dtype = gtype ( = ' u1 ' by default ) ) Genotype data . Entry dg [ i , j ] is genotype i ' s value for sample j . Each value must be among 0,1 , . . . , na . Genotype i must be best ( and significant ) eQTL of gene i ( in dt ) . dt : numpy . ndarray ( nt , ns , dtype = ftype ( = ' = f4 ' by default ) ) Gene expression data for A Entry dt [ i , j ] is gene i ' s expression level for sample j . Genotype i ( in dg ) must be best ( and significant ) eQTL of gene i . dt2 : numpy . ndarray ( nt2 , ns , dtype = ftype ( = ' = f4 ' by default ) ) Gene expression data for B . dt2 has the same format as dt , and can be identical with , different from , or a superset of dt . When dt2 is a superset of ( or identical with ) dt , dt2 must be arranged to be identical with dt at its upper submatrix , i . e . dt2 [ : nt , : ] = dt , and set parameter nodiag = 1. na : Number of alleles the species have . It determintes the maximum number of values each genotype can take . When unspecified , it is automatically determined as the maximum of dg . nodiag : skip diagonal regulations , i . e . regulation A - > B for A = B . This should be set to True when A is a subset of B and aligned correspondingly . memlimit : The approximate memory usage limit in bytes for the library . For datasets require a larger memory , calculation will be split into smaller chunks . If the memory limit is smaller than minimum required , calculation can fail with an error message . memlimit = 0 defaults to unlimited memory usage . Return : dictionary with following keys : ret : 0 iff execution succeeded . p : numpy . ndarray ( ( nt , nt2 ) , dtype = ftype ( = ' = f4 ' by default ) ) . Probability function from for recommended combination of multiple tests . For more information on tests , see paper . ftype and gtype can be found in auto . py . Example : see findr . examples . geuvadis2 , findr . examples . geuvadis3"""
return _gassist_any ( self , dg , dt , dt2 , "pij_gassist" , na = na , nodiag = nodiag , memlimit = memlimit )
def actionAngleTorus_jacobian_c ( pot , jr , jphi , jz , angler , anglephi , anglez , tol = 0.003 , dJ = 0.001 ) : """NAME : actionAngleTorus _ jacobian _ c PURPOSE : compute d ( x , v ) / d ( J , theta ) on a single torus , also compute dO / dJ and the frequencies INPUT : pot - Potential object or list thereof jr - radial action ( scalar ) jphi - azimuthal action ( scalar ) jz - vertical action ( scalar ) angler - radial angle ( array [ N ] ) anglephi - azimuthal angle ( array [ N ] ) anglez - vertical angle ( array [ N ] ) tol = ( 0.003 ) goal for | dJ | / | J | along the torus dJ = ( 0.001 ) action difference when computing derivatives ( Hessian or Jacobian ) OUTPUT : ( d [ R , vR , vT , z , vz , phi ] / d [ J , theta ] , Omegar , Omegaphi , Omegaz , Autofit error message ) Note : dO / dJ is * not * symmetrized here HISTORY : 2016-07-19 - Written - Bovy ( UofT )"""
# Parse the potential from galpy . orbit . integrateFullOrbit import _parse_pot npot , pot_type , pot_args = _parse_pot ( pot , potfortorus = True ) # Set up result R = numpy . empty ( len ( angler ) ) vR = numpy . empty ( len ( angler ) ) vT = numpy . empty ( len ( angler ) ) z = numpy . empty ( len ( angler ) ) vz = numpy . empty ( len ( angler ) ) phi = numpy . empty ( len ( angler ) ) dxvOdJaT = numpy . empty ( 36 * len ( angler ) ) dOdJT = numpy . empty ( 9 ) Omegar = numpy . empty ( 1 ) Omegaphi = numpy . empty ( 1 ) Omegaz = numpy . empty ( 1 ) flag = ctypes . c_int ( 0 ) # Set up the C code ndarrayFlags = ( 'C_CONTIGUOUS' , 'WRITEABLE' ) actionAngleTorus_JacFunc = _lib . actionAngleTorus_jacobianFreqs actionAngleTorus_JacFunc . argtypes = [ ctypes . c_double , ctypes . c_double , ctypes . c_double , ctypes . c_int , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ctypes . c_int , ndpointer ( dtype = numpy . int32 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ctypes . c_double , ctypes . c_double , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ndpointer ( dtype = numpy . float64 , flags = ndarrayFlags ) , ctypes . POINTER ( ctypes . c_int ) ] # Array requirements , first store old order f_cont = [ angler . flags [ 'F_CONTIGUOUS' ] , anglephi . flags [ 'F_CONTIGUOUS' ] , anglez . flags [ 'F_CONTIGUOUS' ] ] angler = numpy . require ( angler , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) anglephi = numpy . require ( anglephi , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) anglez = numpy . require ( anglez , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) R = numpy . require ( R , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) vR = numpy . require ( vR , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) vT = numpy . require ( vT , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) z = numpy . require ( z , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) vz = numpy . require ( vz , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) phi = numpy . require ( phi , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) dxvOdJaT = numpy . require ( dxvOdJaT , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) dOdJT = numpy . require ( dOdJT , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) Omegar = numpy . require ( Omegar , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) Omegaphi = numpy . require ( Omegaphi , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) Omegaz = numpy . require ( Omegaz , dtype = numpy . float64 , requirements = [ 'C' , 'W' ] ) # Run the C code actionAngleTorus_JacFunc ( ctypes . c_double ( jr ) , ctypes . c_double ( jphi ) , ctypes . c_double ( jz ) , ctypes . c_int ( len ( angler ) ) , angler , anglephi , anglez , ctypes . c_int ( npot ) , pot_type , pot_args , ctypes . c_double ( tol ) , ctypes . c_double ( dJ ) , R , vR , vT , z , vz , phi , dxvOdJaT , dOdJT , Omegar , Omegaphi , Omegaz , ctypes . byref ( flag ) ) # Reset input arrays if f_cont [ 0 ] : angler = numpy . asfortranarray ( angler ) if f_cont [ 1 ] : anglephi = numpy . asfortranarray ( anglephi ) if f_cont [ 2 ] : anglez = numpy . asfortranarray ( anglez ) dxvOdJaT = numpy . reshape ( dxvOdJaT , ( len ( angler ) , 6 , 6 ) , order = 'C' ) dxvOdJa = numpy . swapaxes ( dxvOdJaT , 1 , 2 ) return ( R , vR , vT , z , vz , phi , dxvOdJa , dOdJT . reshape ( ( 3 , 3 ) ) . T , Omegar [ 0 ] , Omegaphi [ 0 ] , Omegaz [ 0 ] , flag . value )
def _initDiskStats ( self ) : """Parse and initialize block device I / O stats in / proc / diskstats ."""
self . _diskStats = { } self . _mapMajorMinor2dev = { } try : fp = open ( diskStatsFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading disk stats from file: %s' % diskStatsFile ) for line in data . splitlines ( ) : cols = line . split ( ) dev = cols . pop ( 2 ) if len ( cols ) == 13 : self . _diskStats [ dev ] = dict ( zip ( ( 'major' , 'minor' , 'rios' , 'rmerges' , 'rsect' , 'rticks' , 'wios' , 'wmerges' , 'wsect' , 'wticks' , 'ios_active' , 'totticks' , 'rqticks' ) , [ int ( x ) for x in cols ] ) ) elif len ( cols ) == 6 : self . _diskStats [ dev ] = dict ( zip ( ( 'major' , 'minor' , 'rios' , 'rsect' , 'wios' , 'wsect' ) , [ int ( x ) for x in cols ] ) ) else : continue self . _diskStats [ dev ] [ 'rbytes' ] = ( self . _diskStats [ dev ] [ 'rsect' ] * sectorSize ) self . _diskStats [ dev ] [ 'wbytes' ] = ( self . _diskStats [ dev ] [ 'wsect' ] * sectorSize ) self . _mapMajorMinor2dev [ ( int ( cols [ 0 ] ) , int ( cols [ 1 ] ) ) ] = dev
def remove_imports ( ) : """Remove Imports"""
text , ok = QtGui . QInputDialog . getText ( None , 'Remove Import' , 'Enter an import line to remove (example: os.path or from os import path):' ) if ok : sort_kate_imports ( remove_imports = text . split ( ";" ) )
def _return ( self , load ) : '''Handle the return data sent from the minions . Takes the return , verifies it and fires it on the master event bus . Typically , this event is consumed by the Salt CLI waiting on the other end of the event bus but could be heard by any listener on the bus . : param dict load : The minion payload'''
if self . opts [ 'require_minion_sign_messages' ] and 'sig' not in load : log . critical ( '_return: Master is requiring minions to sign their ' 'messages, but there is no signature in this payload from ' '%s.' , load [ 'id' ] ) return False if 'sig' in load : log . trace ( 'Verifying signed event publish from minion' ) sig = load . pop ( 'sig' ) this_minion_pubkey = os . path . join ( self . opts [ 'pki_dir' ] , 'minions/{0}' . format ( load [ 'id' ] ) ) serialized_load = salt . serializers . msgpack . serialize ( load ) if not salt . crypt . verify_signature ( this_minion_pubkey , serialized_load , sig ) : log . info ( 'Failed to verify event signature from minion %s.' , load [ 'id' ] ) if self . opts [ 'drop_messages_signature_fail' ] : log . critical ( 'Drop_messages_signature_fail is enabled, dropping ' 'message from %s' , load [ 'id' ] ) return False else : log . info ( 'But \'drop_message_signature_fail\' is disabled, so message is still accepted.' ) load [ 'sig' ] = sig try : salt . utils . job . store_job ( self . opts , load , event = self . event , mminion = self . mminion ) except salt . exceptions . SaltCacheError : log . error ( 'Could not store job information for load: %s' , load )
def store_file ( self , folder , name ) : """Stores the uploaded file in the given path ."""
path = os . path . join ( folder , name ) length = self . headers [ 'content-length' ] with open ( path , 'wb' ) as sample : sample . write ( self . rfile . read ( int ( length ) ) ) return path
def serialize ( self , data ) : """Determine & invoke the proper serializer method If data is a list then the serialize _ datas method will be run otherwise serialize _ data ."""
super ( Serializer , self ) . serialize ( data ) body = { 'jsonapi' : { 'version' : goldman . config . JSONAPI_VERSION , } , 'links' : { 'self' : self . req . path , } , 'meta' : { 'included_count' : 0 , 'primary_count' : 0 , 'total_primary' : self . req . pages . total , } , } included = data [ 'included' ] if included : body [ 'included' ] = self . _serialize_datas ( included ) body [ 'meta' ] [ 'included_count' ] = len ( included ) _data = data [ 'data' ] if isinstance ( _data , list ) : body . update ( { 'data' : self . _serialize_datas ( _data ) } ) body . update ( { 'links' : self . _serialize_pages ( ) } ) body [ 'meta' ] [ 'primary_count' ] = len ( _data ) elif _data : body . update ( { 'data' : self . _serialize_data ( _data ) } ) body [ 'meta' ] [ 'primary_count' ] = 1 else : body . update ( { 'data' : None } ) self . resp . body = json . dumps ( body )
def newProp ( self , name , value ) : """Create a new property carried by a node ."""
ret = libxml2mod . xmlNewProp ( self . _o , name , value ) if ret is None : raise treeError ( 'xmlNewProp() failed' ) __tmp = xmlAttr ( _obj = ret ) return __tmp
def predict_log_proba ( self , X ) : """Apply transforms , and predict _ log _ proba of the final estimator Parameters X : iterable Data to predict on . Must fulfill input requirements of first step of the pipeline . Returns y _ score : array - like , shape = [ n _ samples , n _ classes ]"""
Xt , _ , _ = self . _transform ( X ) return self . _final_estimator . predict_log_proba ( Xt )
def get_channel_access ( channel = 14 , read_mode = 'non_volatile' , ** kwargs ) : ''': param kwargs : api _ host = ' 127.0.0.1 ' api _ user = ' admin ' api _ pass = ' example ' api _ port = 623 : param channel : number [ 1:7] : param read _ mode : - non _ volatile = get non - volatile Channel Access - volatile = get present volatile ( active ) setting of Channel Access : param kwargs : - api _ host = 127.0.0.1 - api _ user = admin - api _ pass = example - api _ port = 623 - api _ kg = None Return Data A Python dict with the following keys / values : . . code - block : : python alerting : per _ msg _ auth : user _ level _ auth : access _ mode : { ( ONE OF ) 0 : ' disabled ' , 1 : ' pre _ boot ' , 2 : ' always ' , 3 : ' shared ' privilege _ level : { ( ONE OF ) 1 : ' callback ' , 2 : ' user ' , 3 : ' operator ' , 4 : ' administrator ' , 5 : ' proprietary ' , CLI Examples : . . code - block : : bash salt - call ipmi . get _ channel _ access channel = 1'''
with _IpmiCommand ( ** kwargs ) as s : return s . get_channel_access ( channel )
def obtainInfo ( self ) : """Method for obtaining information about the movie ."""
try : info = self . ytdl . extract_info ( self . yid , download = False ) except youtube_dl . utils . DownloadError : raise ConnectionError if not self . preferences [ 'stream' ] : self . url = ( info [ 'requested_formats' ] [ 0 ] [ 'url' ] , info [ 'requested_formats' ] [ 1 ] [ 'url' ] ) return True # else : for f in info [ 'formats' ] : if 'filesize' not in f or not f [ 'filesize' ] : f [ 'filesize' ] = float ( 'inf' ) # next line won ' t fail , infinity , because unknown filesize is the least preferred # - for easy sorting - we ' ll get best quality and lowest filsize aud = { ( - int ( f [ 'abr' ] ) , f [ 'filesize' ] , f [ 'url' ] ) for f in info [ 'formats' ] if f . get ( 'abr' ) and not f . get ( 'height' ) } vid = { ( - int ( f [ 'height' ] ) , f [ 'filesize' ] , f [ 'url' ] ) for f in info [ 'formats' ] if not f . get ( 'abr' ) and f . get ( 'height' ) } full = { ( - int ( f [ 'height' ] ) , f [ 'filesize' ] , f [ 'url' ] ) for f in info [ 'formats' ] if f . get ( 'abr' ) and f . get ( 'height' ) } try : _f = int ( self . preferences . get ( 'format' ) ) # if valid format is present , then choose closes value _k = lambda x : abs ( x [ 0 ] + _f ) # + , because x [ 0 ] is negative except ( ValueError , TypeError ) : _k = lambda d : d if self . preferences [ 'audio' ] and self . preferences [ 'video' ] : fm = sorted ( full , key = _k ) elif self . preferences [ 'audio' ] : fm = sorted ( aud , key = _k ) elif self . preferences [ 'video' ] : fm = sorted ( vid , key = _k ) filesize = 0 i = - 1 try : while filesize == 0 : # some videos are problematic , we will try to find format with non - zero filesize i += 1 self . url = fm [ i ] [ 2 ] if fm [ i ] [ 1 ] == float ( 'inf' ) : filesize = int ( self . r_session . head ( self . url ) . headers [ 'content-length' ] ) else : filesize = int ( fm [ i ] [ 1 ] ) except IndexError : # finding filesize failed for every format self . url = ( info [ 'requested_formats' ] [ 0 ] [ 'url' ] , info [ 'requested_formats' ] [ 1 ] [ 'url' ] ) self . preferences [ 'stream' ] = False # hopefully non - stream download will work return True self . filesize = filesize return True
def star_stats_table ( self ) : """Take the parsed stats from the STAR report and add them to the basic stats table at the top of the report"""
headers = OrderedDict ( ) headers [ 'uniquely_mapped_percent' ] = { 'title' : '% Aligned' , 'description' : '% Uniquely mapped reads' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'scale' : 'YlGn' } headers [ 'uniquely_mapped' ] = { 'title' : '{} Aligned' . format ( config . read_count_prefix ) , 'description' : 'Uniquely mapped reads ({})' . format ( config . read_count_desc ) , 'min' : 0 , 'scale' : 'PuRd' , 'modify' : lambda x : x * config . read_count_multiplier , 'shared_key' : 'read_count' } self . general_stats_addcols ( self . star_data , headers )
def set_unobserved_before ( self , tlen , qlen , nt , p ) : """Set the unobservable sequence data before this base : param tlen : target homopolymer length : param qlen : query homopolymer length : param nt : nucleotide : param p : p is the probability of attributing this base to the unobserved error : type tlen : int : type qlen : int : type nt : char : type p : float"""
self . _unobservable . set_before ( tlen , qlen , nt , p )
def to_pydatetime ( self ) : """Converts datetime2 object into Python ' s datetime . datetime object @ return : naive datetime . datetime"""
return datetime . datetime . combine ( self . _date . to_pydate ( ) , self . _time . to_pytime ( ) )
def cublasChpr2 ( handle , uplo , n , alpha , x , inx , y , incy , AP ) : """Rank - 2 operation on Hermitian - packed matrix ."""
status = _libcublas . cublasChpr2_v2 ( handle , _CUBLAS_FILL_MODE [ uplo ] , n , ctypes . byref ( cuda . cuFloatComplex ( alpha . real , alpha . imag ) ) , int ( x ) , incx , int ( y ) , incy , int ( AP ) ) cublasCheckStatus ( status )
def rand_zipfian ( true_classes , num_sampled , range_max ) : """Draw random samples from an approximately log - uniform or Zipfian distribution . This operation randomly samples * num _ sampled * candidates the range of integers [ 0 , range _ max ) . The elements of sampled _ candidates are drawn with replacement from the base distribution . The base distribution for this operator is an approximately log - uniform or Zipfian distribution : P ( class ) = ( log ( class + 2 ) - log ( class + 1 ) ) / log ( range _ max + 1) This sampler is useful when the true classes approximately follow such a distribution . For example , if the classes represent words in a lexicon sorted in decreasing order of frequency . If your classes are not ordered by decreasing frequency , do not use this op . Additionaly , it also returns the number of times each of the true classes and the sampled classes is expected to occur . Parameters true _ classes : Symbol The target classes in 1 - D . num _ sampled : int The number of classes to randomly sample . range _ max : int The number of possible classes . Returns samples : Symbol The sampled candidate classes in 1 - D ` int64 ` dtype . expected _ count _ true : Symbol The expected count for true classes in 1 - D ` float64 ` dtype . expected _ count _ sample : Symbol The expected count for sampled candidates in 1 - D ` float64 ` dtype . Examples > > > true _ cls = mx . sym . Variable ( ' true _ cls ' ) > > > samples , exp _ count _ true , exp _ count _ sample = mx . sym . contrib . rand _ zipfian ( true _ cls , 4 , 5) > > > samples . eval ( true _ cls = mx . nd . array ( [ 3 ] ) ) [ 0 ] . asnumpy ( ) array ( [ 1 , 3 , 3 , 3 ] ) > > > exp _ count _ true . eval ( true _ cls = mx . nd . array ( [ 3 ] ) ) [ 0 ] . asnumpy ( ) array ( [ 0.12453879 ] ) > > > exp _ count _ sample . eval ( true _ cls = mx . nd . array ( [ 3 ] ) ) [ 0 ] . asnumpy ( ) array ( [ 0.22629439 , 0.12453879 , 0.12453879 , 0.12453879 ] )"""
assert ( isinstance ( true_classes , Symbol ) ) , "unexpected type %s" % type ( true_classes ) log_range = math . log ( range_max + 1 ) rand = uniform ( 0 , log_range , shape = ( num_sampled , ) , dtype = 'float64' ) # make sure sampled _ classes are in the range of [ 0 , range _ max ) sampled_classes = ( rand . exp ( ) - 1 ) . astype ( 'int64' ) % range_max true_classes = true_classes . astype ( 'float64' ) expected_prob_true = ( ( true_classes + 2.0 ) / ( true_classes + 1.0 ) ) . log ( ) / log_range expected_count_true = expected_prob_true * num_sampled # cast sampled classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes . astype ( 'float64' ) expected_prob_sampled = ( ( sampled_cls_fp64 + 2.0 ) / ( sampled_cls_fp64 + 1.0 ) ) . log ( ) / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes , expected_count_true , expected_count_sampled
def i18n ( msg , event = None , lang = 'en' , domain = 'backend' ) : """Gettext function wrapper to return a message in a specified language by domain To use internationalization ( i18n ) on your messages , import it as ' _ ' and use as usual . Do not forget to supply the client ' s language setting ."""
if event is not None : language = event . client . language else : language = lang domain = Domain ( domain ) return domain . get ( language , msg )
def display ( text , mode = 'exec' , file = None ) : """Show ` text ` , rendered as AST and as Bytecode . Parameters text : str Text of Python code to render . mode : { ' exec ' , ' eval ' } , optional Mode for ` ast . parse ` and ` compile ` . Default is ' exec ' . file : None or file - like object , optional File to use to print output . If the default of ` None ` is passed , we use sys . stdout ."""
if file is None : file = sys . stdout ast_section = StringIO ( ) a ( text , mode = mode , file = ast_section ) code_section = StringIO ( ) d ( text , mode = mode , file = code_section ) rendered = _DISPLAY_TEMPLATE . format ( text = text , ast = ast_section . getvalue ( ) , code = code_section . getvalue ( ) , ) print ( rendered , file = file )
def get_attributes ( self , attributes = 'All' , callback = None ) : """Retrieves attributes about this queue object and returns them in an Attribute instance ( subclass of a Dictionary ) . : type attributes : string : param attributes : String containing one of : ApproximateNumberOfMessages , ApproximateNumberOfMessagesNotVisible , VisibilityTimeout , CreatedTimestamp , LastModifiedTimestamp , Policy : rtype : Attribute object : return : An Attribute object which is a mapping type holding the requested name / value pairs"""
return self . connection . get_queue_attributes ( self , attributes , callback = callback )
def fetch ( code ) : """Fetch keywords by Code"""
ret = { } code = KeywordFetcher . _remove_strings ( code ) result = KeywordFetcher . prog . findall ( code ) for keyword in result : if len ( keyword ) <= 1 : continue # Ignore single - length word if keyword . isdigit ( ) : continue # Ignore number if keyword [ 0 ] == '-' or keyword [ 0 ] == '*' : keyword = keyword [ 1 : ] # Remove first char if string is starting by ' - ' or ' * ' ( Pointer or Negative numbers ) if keyword [ - 1 ] == '-' or keyword [ - 1 ] == '*' : keyword = keyword [ 0 : - 1 ] # Remove last char if string is finished by ' - ' or ' * ' if len ( keyword ) <= 1 : continue ret [ keyword ] = ret . get ( keyword , 0 ) + 1 # ` ret [ keyword ] + = 1 ` with initial value return ret
def _set_ipv6_interface ( self , v , load = False ) : """Setter method for ipv6 _ interface , mapped from YANG variable / rbridge _ id / interface / ve / ipv6 / ipv6 _ local _ anycast _ gateway / ipv6 _ track / ipv6 _ interface ( list ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ ipv6 _ interface is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ ipv6 _ interface ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = YANGListType ( "ipv6_interface_type ipv6_interface_name" , ipv6_interface . ipv6_interface , yang_name = "ipv6-interface" , rest_name = "interface" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'ipv6-interface-type ipv6-interface-name' , extensions = { u'tailf-common' : { u'callpoint' : u'AnycastGatewayLocalIpv6TrackInterfaceConfig' , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'alt-name' : u'interface' , u'cli-incomplete-command' : None , u'cli-no-match-completion' : None , u'cli-full-no' : None } } ) , is_container = 'list' , yang_name = "ipv6-interface" , rest_name = "interface" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'AnycastGatewayLocalIpv6TrackInterfaceConfig' , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'alt-name' : u'interface' , u'cli-incomplete-command' : None , u'cli-no-match-completion' : None , u'cli-full-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-anycast-gateway' , defining_module = 'brocade-anycast-gateway' , yang_type = 'list' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """ipv6_interface must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("ipv6_interface_type ipv6_interface_name",ipv6_interface.ipv6_interface, yang_name="ipv6-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-interface-type ipv6-interface-name', extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="ipv6-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)""" , } ) self . __ipv6_interface = t if hasattr ( self , '_set' ) : self . _set ( )
def set_handler ( self , handler ) : """设置异步回调处理对象 : param handler : 回调处理对象 , 必须是以下类的子类实例 类名 说明 StockQuoteHandlerBase 报价处理基类 OrderBookHandlerBase 摆盘处理基类 CurKlineHandlerBase 实时k线处理基类 TickerHandlerBase 逐笔处理基类 RTDataHandlerBase 分时数据处理基类 BrokerHandlerBase 经济队列处理基类 : return : RET _ OK : 设置成功 RET _ ERROR : 设置失败"""
with self . _lock : if self . _handler_ctx is not None : return self . _handler_ctx . set_handler ( handler ) return RET_ERROR
def embedding_to_padding ( emb ) : """Input embeddings - > is _ padding ."""
emb_sum = tf . reduce_sum ( tf . abs ( emb ) , axis = - 1 , keep_dims = True ) return tf . to_float ( tf . equal ( emb_sum , 0.0 ) )
def update ( self , ** kwargs ) : """Updates the matching objects for specified fields . Note : Post / pre save hooks and signals will NOT triggered . Unlike RDBMS systems , this method makes individual save calls to backend DB store . So this is exists as more of a comfortable utility method and not a performance enhancement . Keyword Args : \*\*kwargs: Fields with their corresponding values to be updated. Returns : Int . Number of updated objects . Example : . . code - block : : python Entry . objects . filter ( pub _ date _ _ lte = 2014 ) . update ( comments _ on = False )"""
do_simple_update = kwargs . get ( 'simple_update' , True ) no_of_updates = 0 for model in self : no_of_updates += 1 model . _load_data ( kwargs ) model . save ( internal = True ) return no_of_updates
def close ( self ) : """Close any existing BFD structure before open a new one ."""
if self . _ptr : # try : # # Release inner BFD files in case we ' re an archive BFD . # if self . is _ archive : # [ inner _ bfd . close ( ) for inner _ bfd in self . archive _ files ] # except TypeError , err : # pass try : _bfd . close ( self . _ptr ) except TypeError , err : raise BfdException ( "Unable to close bfd (%s)" % err ) finally : self . _ptr = None
def add_dataset ( self , name = None , label = None , x_column_label = None , y_column_label = None , index = None , control = False ) : """Add a dataset to a specific plot . This method adds a dataset to a plot . Its functional use is imperative to the plot generation . It handles adding new files as well as indexing to files that are added to other plots . All Args default to None . However , these are note the defaults in the code . See DataImportContainer attributes for defaults in code . Args : name ( str , optional ) : Name ( path ) for file . Required if reading from a file ( at least one ) . Required if file _ name is not in " general " . Must be " . txt " or " . hdf5 " . Can include path from working directory . label ( str , optional ) : Column label in the dataset corresponding to desired SNR value . Required if reading from a file ( at least one ) . x _ column _ label / y _ column _ label ( str , optional ) : Column label from input file identifying x / y values . This can override setting in " general " . Default is ` x ` / ` y ` . index ( int , optional ) : Index of plot with preloaded data . Required if not loading a file . control ( bool , optional ) : If True , this dataset is set to the control . This is needed for Ratio plots . It sets the baseline . Default is False . Raises : ValueError : If no options are passes . This means no file indication nor index ."""
if name is None and label is None and index is None : raise ValueError ( "Attempting to add a dataset without" + "supplying index or file information." ) if index is None : trans_dict = DataImportContainer ( ) if name is not None : trans_dict . file_name = name if label is not None : trans_dict . label = label if x_column_label is not None : trans_dict . x_column_label = x_column_label if y_column_label is not None : trans_dict . y_column_label = y_column_label if control : self . control = trans_dict else : # need to append file to file list . if 'file' not in self . __dict__ : self . file = [ ] self . file . append ( trans_dict ) else : if control : self . control = DataImportContainer ( ) self . control . index = index else : # need to append index to index list . if 'indices' not in self . __dict__ : self . indices = [ ] self . indices . append ( index ) return
def delete_virtual_mfa_device ( iam_client , mfa_serial ) : """Delete a vritual MFA device given its serial number : param iam _ client : : param mfa _ serial : : return :"""
try : printInfo ( 'Deleting MFA device %s...' % mfa_serial ) iam_client . delete_virtual_mfa_device ( SerialNumber = mfa_serial ) except Exception as e : printException ( e ) printError ( 'Failed to delete MFA device %s' % mfa_serial ) pass
def _log_multivariate_normal_density_spherical ( X , means , covars ) : """Compute Gaussian log - density at X for a spherical model ."""
cv = covars . copy ( ) if covars . ndim == 1 : cv = cv [ : , np . newaxis ] if cv . shape [ 1 ] == 1 : cv = np . tile ( cv , ( 1 , X . shape [ - 1 ] ) ) return _log_multivariate_normal_density_diag ( X , means , cv )
def remove_directory ( self , directory_name , * args , ** kwargs ) : """: meth : ` . WNetworkClientProto . remove _ directory ` method implementation"""
client = self . dav_client ( ) remote_path = self . join_path ( self . session_path ( ) , directory_name ) if client . is_dir ( remote_path ) is False : raise ValueError ( 'Unable to remove non-directory entry' ) client . clean ( remote_path )
def add_comment_to_issue ( self , issue , comment , visibility = None ) : """Adds a comment to a specified issue from the current user . Arguments : | issue ( string ) | A JIRA Issue that a watcher needs added to , can be an issue ID or Key | | comment ( string ) | A body of text to add as a comment to an issue | | visibility ( string ) | ( Optional ) | Example : | * Keyword * | * Parameters * | | | | connect to jira | asimmons | options = { ' http : / / devjira01 ' } | | | $ { issue } | create issue | $ { issue _ field _ dict } | True | | add comment to issue | $ { issue } | Starting work on this issue | |"""
self . jira . add_comment ( issue = issue , body = comment )
def prof_pressure ( altitude , z_coef = ( 1.94170e-9 , - 5.14580e-7 , 4.57018e-5 , - 1.55620e-3 , - 4.61994e-2 , 2.99955 ) ) : """Return pressure for given altitude . This function evaluates a polynomial at altitudes values . Parameters altitude : array - like altitude values [ km ] . z _ coef : array - like coefficients of the polynomial ( default values are for the US Standard Atmosphere ) . Returns pressure : array - like pressure values [ hPa ] ( same shape than the altitude input array ) . See Also prof _ altitude : Returns altitude for given pressure . prof _ temperature : Returns air temperature for given altitude . Notes Default coefficient values represent a 5th degree polynomial which had been fitted to USA data from 0-100 km . Accuracy is on the order of 1 % for 0-100 km and 0.5 % below 30 km . This function , with default values , may thus produce bad results with altitude > 100 km . Examples > > > prof _ pressure ( [ 0 , 10 , 20 ] ) array ( [ 998.96437334 , 264.658697 , 55.28114631 ] )"""
altitude = np . asarray ( altitude ) pressure = np . power ( 10 , np . polyval ( z_coef , altitude . flatten ( ) ) ) return pressure . reshape ( altitude . shape )
def QA_fetch_stock_day_full_adv ( date ) : '''' 返回全市场某一天的数据 ' : param date : : return : QA _ DataStruct _ Stock _ day类 型数据'''
# 🛠 todo 检查日期data参数 res = QA_fetch_stock_full ( date , 'pd' ) if res is None : print ( "QA Error QA_fetch_stock_day_full_adv parameter date=%s call QA_fetch_stock_full return None" % ( date ) ) return None else : res_set_index = res . set_index ( [ 'date' , 'code' ] ) # if res _ set _ index is None : # print ( " QA Error QA _ fetch _ stock _ day _ full set index ' date , code ' return None " ) return QA_DataStruct_Stock_day ( res_set_index )
def mark ( self , n = 1 ) : """Mark the occurrence of a given number of events ."""
self . tick_if_necessary ( ) self . count += n self . m1_rate . update ( n ) self . m5_rate . update ( n ) self . m15_rate . update ( n )
def cost ( a , b , c , e , f , p_min , p ) : """cost : fuel cost based on " standard " parameters ( with valve - point loading effect )"""
return a + b * p + c * p * p + abs ( e * math . sin ( f * ( p_min - p ) ) )
def deriv ( * args ) : """; Copyright ( c ) 1984-2009 , ITT Visual Information Solutions . All ; rights reserved . Unauthorized reproduction is prohibited . ; NAME : ; DERIV ; PURPOSE : ; Perform numerical differentiation using 3 - point , Lagrangian ; interpolation . ; CATEGORY : ; Numerical analysis . ; CALLING SEQUENCE : ; Dy = Deriv ( Y ) ; Dy ( i ) / di , point spacing = 1. ; Dy = Deriv ( X , Y ) ; Dy / Dx , unequal point spacing . ; INPUTS : ; Y : Variable to be differentiated . ; X : Variable to differentiate with respect to . If omitted , unit ; spacing for Y ( i . e . , X ( i ) = i ) is assumed . ; OPTIONAL INPUT PARAMETERS : ; As above . ; OUTPUTS : ; Returns the derivative . ; COMMON BLOCKS : ; None . ; SIDE EFFECTS : ; None . ; RESTRICTIONS : ; None . ; PROCEDURE : ; See Hildebrand , Introduction to Numerical Analysis , Mc Graw ; Hill , 1956 . Page 82. ; MODIFICATION HISTORY : ; Written , DMS , Aug , 1984. ; Corrected formula for points with unequal spacing . DMS , Nov , 1999. ; on _ error , 2 ; Return to caller if an error occurs"""
x = args [ 0 ] n = x . size if n < 3 : raise Exception ( 'Parameters must have at least 3 points' ) if ( len ( args ) == 2 ) : y = args [ 1 ] if n != y . size : raise 'Vectors must have same size' # ; df / dx = y0 * ( 2x - x1 - x2 ) / ( x01 * x02 ) + y1 * ( 2x - x0 - x2 ) / ( x10 * x12 ) + y2 * ( 2x - x0 - x1 ) / ( x20 * x21) # ; Where : x01 = x0 - x1 , x02 = x0 - x2 , x12 = x1 - x2 , etc . if isinstance ( x , np . ma . masked_array ) : x = x . data # Convert masked arrays to classic arrays if not isinstance ( x , np . float ) : x . astype ( np . float ) # ; If not floating type , ensure floating . . . x12 = x - np . roll ( x , - 1 ) # ; x1 - x2 x01 = np . roll ( x , 1 ) - x # ; x0 - x1 x02 = np . roll ( x , 1 ) - np . roll ( x , - 1 ) # ; x0 - x2 d = np . roll ( y , 1 ) * ( x12 / ( x01 * x02 ) ) + y * ( 1. / x12 - 1. / x01 ) - np . roll ( y , - 1 ) * ( x01 / ( x02 * x12 ) ) # Middle points # Formulae for the first and last points : d [ 0 ] = y [ 0 ] * ( x01 [ 1 ] + x02 [ 1 ] ) / ( x01 [ 1 ] * x02 [ 1 ] ) - y [ 1 ] * x02 [ 1 ] / ( x01 [ 1 ] * x12 [ 1 ] ) + y [ 2 ] * x01 [ 1 ] / ( x02 [ 1 ] * x12 [ 1 ] ) # ; First point n2 = n - 2 d [ n - 1 ] = - y [ n - 3 ] * x12 [ n2 ] / ( x01 [ n2 ] * x02 [ n2 ] ) + y [ n - 2 ] * x02 [ n2 ] / ( x01 [ n2 ] * x12 [ n2 ] ) - y [ n - 1 ] * ( x02 [ n2 ] + x12 [ n2 ] ) / ( x02 [ n2 ] * x12 [ n2 ] ) # ; Last point # Equally spaced point case else : d = ( np . roll ( x , - 1 ) - np . roll ( x , 1 ) ) / 2. d [ 0 ] = ( - 3.0 * x [ 0 ] + 4.0 * x [ 1 ] - x [ 2 ] ) / 2. d [ n - 1 ] = ( 3. * x [ n - 1 ] - 4. * x [ n - 2 ] + x [ n - 3 ] ) / 2. return d
def _create_source ( self , src ) : """Create a pyLikelihood Source object from a ` ~ fermipy . roi _ model . Model ` object ."""
if src [ 'SpatialType' ] == 'SkyDirFunction' : pylike_src = pyLike . PointSource ( self . like . logLike . observation ( ) ) pylike_src . setDir ( src . skydir . ra . deg , src . skydir . dec . deg , False , False ) elif src [ 'SpatialType' ] == 'SpatialMap' : filepath = str ( utils . path_to_xmlpath ( src [ 'Spatial_Filename' ] ) ) sm = pyLike . SpatialMap ( filepath ) pylike_src = pyLike . DiffuseSource ( sm , self . like . logLike . observation ( ) , False ) elif src [ 'SpatialType' ] == 'RadialProfile' : filepath = str ( utils . path_to_xmlpath ( src [ 'Spatial_Filename' ] ) ) sm = pyLike . RadialProfile ( filepath ) sm . setCenter ( src [ 'ra' ] , src [ 'dec' ] ) pylike_src = pyLike . DiffuseSource ( sm , self . like . logLike . observation ( ) , False ) elif src [ 'SpatialType' ] == 'RadialGaussian' : sm = pyLike . RadialGaussian ( src . skydir . ra . deg , src . skydir . dec . deg , src . spatial_pars [ 'Sigma' ] [ 'value' ] ) pylike_src = pyLike . DiffuseSource ( sm , self . like . logLike . observation ( ) , False ) elif src [ 'SpatialType' ] == 'RadialDisk' : sm = pyLike . RadialDisk ( src . skydir . ra . deg , src . skydir . dec . deg , src . spatial_pars [ 'Radius' ] [ 'value' ] ) pylike_src = pyLike . DiffuseSource ( sm , self . like . logLike . observation ( ) , False ) elif src [ 'SpatialType' ] == 'MapCubeFunction' : filepath = str ( utils . path_to_xmlpath ( src [ 'Spatial_Filename' ] ) ) mcf = pyLike . MapCubeFunction2 ( filepath ) pylike_src = pyLike . DiffuseSource ( mcf , self . like . logLike . observation ( ) , False ) else : raise Exception ( 'Unrecognized spatial type: %s' , src [ 'SpatialType' ] ) if src [ 'SpectrumType' ] == 'FileFunction' : fn = gtutils . create_spectrum_from_dict ( src [ 'SpectrumType' ] , src . spectral_pars ) file_function = pyLike . FileFunction_cast ( fn ) filename = str ( os . path . expandvars ( src [ 'Spectrum_Filename' ] ) ) file_function . readFunction ( filename ) elif src [ 'SpectrumType' ] == 'DMFitFunction' : fn = pyLike . DMFitFunction ( ) fn = gtutils . create_spectrum_from_dict ( src [ 'SpectrumType' ] , src . spectral_pars , fn ) filename = str ( os . path . expandvars ( src [ 'Spectrum_Filename' ] ) ) fn . readFunction ( filename ) else : fn = gtutils . create_spectrum_from_dict ( src [ 'SpectrumType' ] , src . spectral_pars ) pylike_src . setSpectrum ( fn ) pylike_src . setName ( str ( src . name ) ) return pylike_src
def get_default_config ( self ) : """Returns the default collector settings"""
config = super ( KVMCollector , self ) . get_default_config ( ) config . update ( { 'path' : 'kvm' , } ) return config
def accept ( self , visitor : "BaseVisitor[ResultT]" ) -> ResultT : """Traverses the game in PGN order using the given * visitor * . Returns the * visitor * result ."""
if visitor . begin_game ( ) is not SKIP : for tagname , tagvalue in self . headers . items ( ) : visitor . visit_header ( tagname , tagvalue ) if visitor . end_headers ( ) is not SKIP : board = self . board ( ) visitor . visit_board ( board ) if self . comment : visitor . visit_comment ( self . comment ) if self . variations : self . variations [ 0 ] . accept ( visitor , _parent_board = board ) visitor . visit_result ( self . headers . get ( "Result" , "*" ) ) visitor . end_game ( ) return visitor . result ( )
def _parse_segments ( self ) : """Read the segment output file and parse into an SQLite database ."""
reader = csv . reader ( open ( self . _segment_file , 'rU' ) , delimiter = '\t' ) for row in reader : if reader . line_num == 1 : # skip header continue sql = '''INSERT INTO segments (id, multiplicon, genome, list, first, last, ord) VALUES (?,?,?,?,?,?,?)''' self . _dbconn . execute ( sql , row ) self . _dbconn . commit ( )
def with_headers ( self , headers = None , ** params ) : """Add headers to the request . : param headers : A dict , or a list of key , value pairs : param params : A dict of key value pairs"""
if isinstance ( headers , ( tuple , list ) ) : headers = dict ( headers ) if params : if isinstance ( headers , dict ) : headers . update ( params ) elif headers is None : headers = params self . _headers . update ( headers ) return self
def mgmt_nw_id ( cls ) : """Returns id of the management network ."""
if cls . _mgmt_nw_uuid is None : tenant_id = cls . l3_tenant_id ( ) if not tenant_id : return net = bc . get_plugin ( ) . get_networks ( bc . context . get_admin_context ( ) , { 'tenant_id' : [ tenant_id ] , 'name' : [ cfg . CONF . general . management_network ] } , [ 'id' , 'subnets' ] ) if len ( net ) == 1 : num_subnets = len ( net [ 0 ] [ 'subnets' ] ) if num_subnets == 0 : LOG . error ( 'The management network has no subnet. ' 'Please assign one.' ) return elif num_subnets > 1 : LOG . info ( 'The management network has %d subnets. The ' 'first one will be used.' , num_subnets ) cls . _mgmt_nw_uuid = net [ 0 ] . get ( 'id' ) cls . _mgmt_subnet_uuid = net [ 0 ] [ 'subnets' ] [ 0 ] elif len ( net ) > 1 : # Management network must have a unique name . LOG . error ( 'The management network for does not have ' 'unique name. Please ensure that it is.' ) else : # Management network has not been created . LOG . error ( 'There is no virtual management network. Please ' 'create one.' ) return cls . _mgmt_nw_uuid
def set_servo ( self , channel , pwm ) : '''set a servo value'''
self . mav . command_long_send ( self . target_system , self . target_component , mavlink . MAV_CMD_DO_SET_SERVO , 0 , channel , pwm , 0 , 0 , 0 , 0 , 0 )
def transform_record ( self , pid , record , links_factory = None ) : """Transform record into an intermediate representation . : param pid : The : class : ` invenio _ pidstore . models . PersistentIdentifier ` instance . : param record : The : class : ` invenio _ records . api . Record ` instance . : param links _ factory : The link factory . ( Default : ` ` None ` ` ) : returns : The intermediate representation for the record ."""
return self . dump ( self . preprocess_record ( pid , record , links_factory = links_factory ) )
def do_counter_reset ( self , element , decl , pseudo ) : """Clear specified counters ."""
step = self . state [ self . state [ 'current_step' ] ] counter_name = '' for term in decl . value : if type ( term ) is ast . WhitespaceToken : continue elif type ( term ) is ast . IdentToken : if counter_name : step [ 'counters' ] [ counter_name ] = 0 counter_name = term . value elif type ( term ) is ast . LiteralToken : if counter_name : step [ 'counters' ] [ counter_name ] = 0 counter_name = '' elif type ( term ) is ast . NumberToken : if counter_name : step [ 'counters' ] [ counter_name ] = int ( term . value ) counter_name = '' else : log ( WARN , u"Unrecognized counter-reset term {}" . format ( type ( term ) ) . encode ( 'utf-8' ) ) if counter_name : step [ 'counters' ] [ counter_name ] = 0
def getxattr ( self , req , ino , name , size ) : """Set an extended attribute Valid replies : reply _ buf reply _ data reply _ xattr reply _ err"""
self . reply_err ( req , errno . ENOSYS )
def update_items ( self , ocean_backend , enrich_backend ) : """Retrieve the commits not present in the original repository and delete the corresponding documents from the raw and enriched indexes"""
fltr = { 'name' : 'origin' , 'value' : [ self . perceval_backend . origin ] } logger . debug ( "[update-items] Checking commits for %s." , self . perceval_backend . origin ) git_repo = GitRepository ( self . perceval_backend . uri , self . perceval_backend . gitpath ) try : current_hashes = set ( [ commit for commit in git_repo . rev_list ( ) ] ) except Exception as e : logger . error ( "Skip updating branch info for repo %s, git rev-list command failed: %s" , git_repo . uri , e ) return raw_hashes = set ( [ item [ 'data' ] [ 'commit' ] for item in ocean_backend . fetch ( ignore_incremental = True , _filter = fltr ) ] ) hashes_to_delete = list ( raw_hashes . difference ( current_hashes ) ) to_process = [ ] for _hash in hashes_to_delete : to_process . append ( _hash ) if len ( to_process ) != MAX_BULK_UPDATE_SIZE : continue # delete documents from the raw index self . remove_commits ( to_process , ocean_backend . elastic . index_url , 'data.commit' , self . perceval_backend . origin ) # delete documents from the enriched index self . remove_commits ( to_process , enrich_backend . elastic . index_url , 'hash' , self . perceval_backend . origin ) to_process = [ ] if to_process : # delete documents from the raw index self . remove_commits ( to_process , ocean_backend . elastic . index_url , 'data.commit' , self . perceval_backend . origin ) # delete documents from the enriched index self . remove_commits ( to_process , enrich_backend . elastic . index_url , 'hash' , self . perceval_backend . origin ) logger . debug ( "[update-items] %s commits deleted from %s with origin %s." , len ( hashes_to_delete ) , ocean_backend . elastic . anonymize_url ( ocean_backend . elastic . index_url ) , self . perceval_backend . origin ) logger . debug ( "[update-items] %s commits deleted from %s with origin %s." , len ( hashes_to_delete ) , enrich_backend . elastic . anonymize_url ( enrich_backend . elastic . index_url ) , self . perceval_backend . origin ) # update branch info self . delete_commit_branches ( enrich_backend ) self . add_commit_branches ( git_repo , enrich_backend )
def get_env ( ) : """Return the Capitalize environment name It can be used to retrieve class base config Default : Development : returns : str"""
env = "Development" if _env_key in os . environ : env = os . environ [ _env_key ] . lower ( ) . capitalize ( ) return env
def enterbox ( message = 'Enter something.' , title = '' , argDefaultText = '' ) : """Original doc : Show a box in which a user can enter some text . You may optionally specify some default text , which will appear in the enterbox when it is displayed . Returns the text that the user entered , or None if he cancels the operation ."""
return psidialogs . ask_string ( message = message , title = title , default = argDefaultText )
def require_login ( self , view_func ) : """Use this to decorate view functions that require a user to be logged in . If the user is not already logged in , they will be sent to the Provider to log in , after which they will be returned . . . versionadded : : 1.0 This was : func : ` check ` before ."""
@ wraps ( view_func ) def decorated ( * args , ** kwargs ) : if g . oidc_id_token is None : return self . redirect_to_auth_server ( request . url ) return view_func ( * args , ** kwargs ) return decorated
def _write_arg_read_code ( builder , arg , args , name ) : """Writes the read code for the given argument , setting the arg . name variable to its read value . : param builder : The source code builder : param arg : The argument to write : param args : All the other arguments in TLObject same on _ send . This is required to determine the flags value : param name : The name of the argument . Defaults to " self . argname " This argument is an option because it ' s required when writing Vectors < >"""
if arg . generic_definition : return # Do nothing , this only specifies a later type # The argument may be a flag , only write that flag was given ! was_flag = False if arg . is_flag : # Treat ' true ' flags as a special case , since they ' re true if # they ' re set , and nothing else needs to actually be read . if 'true' == arg . type : builder . writeln ( '{} = bool(flags & {})' , name , 1 << arg . flag_index ) return was_flag = True builder . writeln ( 'if flags & {}:' , 1 << arg . flag_index ) # Temporary disable . is _ flag not to enter this if # again when calling the method recursively arg . is_flag = False if arg . is_vector : if arg . use_vector_id : # We have to read the vector ' s constructor ID builder . writeln ( "reader.read_int()" ) builder . writeln ( '{} = []' , name ) builder . writeln ( 'for _ in range(reader.read_int()):' ) # Temporary disable . is _ vector , not to enter this if again arg . is_vector = False _write_arg_read_code ( builder , arg , args , name = '_x' ) builder . writeln ( '{}.append(_x)' , name ) arg . is_vector = True elif arg . flag_indicator : # Read the flags , which will indicate what items we should read next builder . writeln ( 'flags = reader.read_int()' ) builder . writeln ( ) elif 'int' == arg . type : builder . writeln ( '{} = reader.read_int()' , name ) elif 'long' == arg . type : builder . writeln ( '{} = reader.read_long()' , name ) elif 'int128' == arg . type : builder . writeln ( '{} = reader.read_large_int(bits=128)' , name ) elif 'int256' == arg . type : builder . writeln ( '{} = reader.read_large_int(bits=256)' , name ) elif 'double' == arg . type : builder . writeln ( '{} = reader.read_double()' , name ) elif 'string' == arg . type : builder . writeln ( '{} = reader.tgread_string()' , name ) elif 'Bool' == arg . type : builder . writeln ( '{} = reader.tgread_bool()' , name ) elif 'true' == arg . type : # Arbitrary not - None value , don ' t actually read " true " flags builder . writeln ( '{} = True' , name ) elif 'bytes' == arg . type : builder . writeln ( '{} = reader.tgread_bytes()' , name ) elif 'date' == arg . type : # Custom format builder . writeln ( '{} = reader.tgread_date()' , name ) else : # Else it may be a custom type if not arg . skip_constructor_id : builder . writeln ( '{} = reader.tgread_object()' , name ) else : # Import the correct type inline to avoid cyclic imports . # There may be better solutions so that we can just access # all the types before the files have been parsed , but I # don ' t know of any . sep_index = arg . type . find ( '.' ) if sep_index == - 1 : ns , t = '.' , arg . type else : ns , t = '.' + arg . type [ : sep_index ] , arg . type [ sep_index + 1 : ] class_name = snake_to_camel_case ( t ) # There would be no need to import the type if we ' re in the # file with the same namespace , but since it does no harm # and we don ' t have information about such thing in the # method we just ignore that case . builder . writeln ( 'from {} import {}' , ns , class_name ) builder . writeln ( '{} = {}.from_reader(reader)' , name , class_name ) # End vector and flag blocks if required ( if we opened them before ) if arg . is_vector : builder . end_block ( ) if was_flag : builder . current_indent -= 1 builder . writeln ( 'else:' ) builder . writeln ( '{} = None' , name ) builder . current_indent -= 1 # Restore . is _ flag arg . is_flag = True
def _get_kdjj ( df , n_days ) : """Get the J of KDJ J = 3K - 2D : param df : data : param n _ days : calculation range : return : None"""
k_column = 'kdjk_{}' . format ( n_days ) d_column = 'kdjd_{}' . format ( n_days ) j_column = 'kdjj_{}' . format ( n_days ) df [ j_column ] = 3 * df [ k_column ] - 2 * df [ d_column ]
async def build_req_creds_json ( self , creds : dict , filt : dict = None , filt_dflt_incl : bool = False ) -> str : """Build and return indy - sdk requested credentials json from input indy - sdk creds structure through specified filter . : param creds : indy - sdk creds structure : param filt : filter mapping cred def ids to : - ( optionally ) ' attr - match ' : dict mapping attributes to values ( omit , empty dict , or None to match all ) ; - ( optionally ) ' minima ' : ( pred ) integer lower - bounds of interest ( omit , empty dict , or None to match all ) ; omit parameter or specify empty dict or None for no filter , matching all ; e . g . , ' Vx4E82R17q . . . : 3 : CL : 16:0 ' : { ' attr - match ' : { ' name ' : ' Alex ' , ' sex ' : ' M ' , ' favouriteDrink ' : None ' minima ' : { # if both attr - match and minima present , combined conjunctively ( i . e . , via AND ) ' favouriteNumber ' : 10, ' score ' : 100 # if more than one minimum present , combined conjunctively ( i . e . , via AND ) ' R17v42T4pk . . . : 3 : CL : 19:0 ' : { ' attr - match ' : { ' height ' : 175, ' birthdate ' : ' 1975-11-15 ' # combined conjunctively ( i . e . , via AND ) ' Z9ccax812j . . . : 3 : CL : 27:0 ' : { ' attr - match ' : { } # match all attributes on this cred def '9cHbp54C8n . . . : 3 : CL : 37:0 ' : { ' minima ' : { # request all attributes on this cred def , request preds specifying employees > = 50 ' employees ' : 50, : param filt _ dflt _ incl : whether to request ( True ) all creds by attribute / predicate that filter does not identify by cred def , or ( False ) to exclude them . Note that if the filter is None or { } , this parameter is unnecessary - it applies to a filter , not a non - filter . : return : indy _ sdk requested _ credentials json for use in proof creation"""
LOGGER . debug ( 'HolderProver.build_req_creds_json >>> creds: %s, filt: %s' , creds , filt ) req_creds = { 'self_attested_attributes' : { } , 'requested_attributes' : { } , 'requested_predicates' : { } } def _add_cred ( cred , uuid , key ) : nonlocal req_creds req_creds [ key ] [ uuid ] = { 'cred_id' : cred [ 'cred_info' ] [ 'referent' ] , 'revealed' : True } if cred . get ( 'interval' , None ) : req_creds [ key ] [ uuid ] [ 'timestamp' ] = cred [ 'interval' ] [ 'to' ] if key == 'requested_attributes' : req_creds [ key ] [ uuid ] [ 'revealed' ] = True if filt : for cd_id in filt : try : json . loads ( await self . get_cred_def ( cd_id ) ) except AbsentCredDef : LOGGER . warning ( 'HolderProver.build_req_creds_json: ignoring filter criterion, no cred def on %s' , cd_id ) filt . pop ( cd_id ) for attr_uuid in creds . get ( 'attrs' , { } ) : for cred in creds [ 'attrs' ] [ attr_uuid ] : if attr_uuid in req_creds [ 'requested_attributes' ] : continue cred_info = cred [ 'cred_info' ] cred_cd_id = cred_info [ 'cred_def_id' ] if filt : if cred_cd_id not in filt : if filt_dflt_incl : _add_cred ( cred , attr_uuid , 'requested_attributes' ) continue if cred_cd_id in filt and 'attr-match' in ( filt [ cred_cd_id ] or { } ) : # maybe filt [ cred _ cd _ id ] : None if not { k : str ( filt [ cred_cd_id ] . get ( 'attr-match' , { } ) [ k ] ) for k in filt [ cred_cd_id ] . get ( 'attr-match' , { } ) } . items ( ) <= cred_info [ 'attrs' ] . items ( ) : continue _add_cred ( cred , attr_uuid , 'requested_attributes' ) else : _add_cred ( cred , attr_uuid , 'requested_attributes' ) for pred_uuid in creds . get ( 'predicates' , { } ) : for cred in creds [ 'predicates' ] [ pred_uuid ] : if pred_uuid in req_creds [ 'requested_predicates' ] : continue cred_info = cred [ 'cred_info' ] cred_cd_id = cred_info [ 'cred_def_id' ] if filt : if cred_cd_id not in filt : if filt_dflt_incl : _add_cred ( cred , pred_uuid , 'requested_predicates' ) continue if cred_cd_id in filt and 'minima' in ( filt [ cred_cd_id ] or { } ) : # maybe filt [ cred _ cd _ id ] : None minima = filt [ cred_cd_id ] . get ( 'minima' , { } ) try : if any ( ( attr not in cred_info [ 'attrs' ] ) or ( int ( cred_info [ 'attrs' ] [ attr ] ) < int ( minima [ attr ] ) ) for attr in minima ) : continue except ValueError : continue # int conversion failed - reject candidate _add_cred ( cred , pred_uuid , 'requested_predicates' ) else : _add_cred ( cred , pred_uuid , 'requested_predicates' ) rv_json = json . dumps ( req_creds ) LOGGER . debug ( 'HolderProver.build_req_creds_json <<< %s' , rv_json ) return rv_json
def quit ( self , daemononly = False ) : '''Send quit event to quit the main loop'''
if not self . quitting : self . quitting = True self . queue . append ( SystemControlEvent ( SystemControlEvent . QUIT , daemononly = daemononly ) , True )
def _StartMonitoringProcess ( self , process ) : """Starts monitoring a process . Args : process ( MultiProcessBaseProcess ) : process . Raises : IOError : if the RPC client cannot connect to the server . KeyError : if the process is not registered with the engine or if the process is already being monitored . OSError : if the RPC client cannot connect to the server . ValueError : if the process is missing ."""
if process is None : raise ValueError ( 'Missing process.' ) pid = process . pid if pid in self . _process_information_per_pid : raise KeyError ( 'Already monitoring process (PID: {0:d}).' . format ( pid ) ) if pid in self . _rpc_clients_per_pid : raise KeyError ( 'RPC client (PID: {0:d}) already exists' . format ( pid ) ) rpc_client = plaso_xmlrpc . XMLProcessStatusRPCClient ( ) # Make sure that a worker process has started its RPC server . # The RPC port will be 0 if no server is available . rpc_port = process . rpc_port . value time_waited_for_process = 0.0 while not rpc_port : time . sleep ( 0.1 ) rpc_port = process . rpc_port . value time_waited_for_process += 0.1 if time_waited_for_process >= self . _RPC_SERVER_TIMEOUT : raise IOError ( 'RPC client unable to determine server (PID: {0:d}) port.' . format ( pid ) ) hostname = 'localhost' if not rpc_client . Open ( hostname , rpc_port ) : raise IOError ( ( 'RPC client unable to connect to server (PID: {0:d}) ' 'http://{1:s}:{2:d}' ) . format ( pid , hostname , rpc_port ) ) self . _rpc_clients_per_pid [ pid ] = rpc_client self . _process_information_per_pid [ pid ] = process_info . ProcessInfo ( pid )
def put ( self , measurementId , deviceId ) : """Initialises the measurement session from the given device . : param measurementId : : param deviceId : : return :"""
logger . info ( 'Starting measurement ' + measurementId + ' for ' + deviceId ) if self . _measurementController . startMeasurement ( measurementId , deviceId ) : logger . info ( 'Started measurement ' + measurementId + ' for ' + deviceId ) return None , 200 else : logger . warning ( 'Failed to start measurement ' + measurementId + ' for ' + deviceId ) return None , 404
def parse_cluster_pubsub_numpat ( res , ** options ) : """Result callback , handles different return types switchable by the ` aggregate ` flag ."""
aggregate = options . get ( 'aggregate' , True ) if not aggregate : return res numpat = 0 for node , node_numpat in res . items ( ) : numpat += node_numpat return numpat
def iskip ( value , iterable ) : """Skips all values in ' iterable ' matching the given ' value ' ."""
for e in iterable : if value is None : if e is None : continue elif e == value : continue yield e
def set ( aadb , cur ) : """Sets the values in the config file"""
cfg = Config ( ) edited = False if aadb : cfg . set ( ConfigKeys . asset_allocation_database_path , aadb ) print ( f"The database has been set to {aadb}." ) edited = True if cur : cfg . set ( ConfigKeys . default_currency , cur ) edited = True if edited : print ( f"Changes saved." ) else : print ( f"No changes were made." ) print ( f"Use --help parameter for more information." )
def conglomerate ( self , messages , ** config ) : """Given N messages , return another list that has some of them grouped together into a common ' item ' . A conglomeration of messages should be of the following form : : ' subtitle ' : ' relrod pushed commits to ghc and 487 other packages ' , ' link ' : None , # This could be something . ' icon ' : ' https : / / that - git - logo ' , ' secondary _ icon ' : ' https : / / that - relrod - avatar ' , ' start _ time ' : some _ timestamp , ' end _ time ' : some _ other _ timestamp , ' human _ time ' : ' 5 minutes ago ' , ' usernames ' : [ ' relrod ' ] , ' packages ' : [ ' ghc ' , ' nethack ' , . . . ] , ' topics ' : [ ' org . fedoraproject . prod . git . receive ' ] , ' categories ' : [ ' git ' ] , ' msg _ ids ' : { '2014 - abcde ' : { ' subtitle ' : ' relrod pushed some commits to ghc ' , ' title ' : ' git . receive ' , ' link ' : ' http : / / . . . ' , ' icon ' : ' http : / / . . . ' , '2014 - bcdef ' : { ' subtitle ' : ' relrod pushed some commits to nethack ' , ' title ' : ' git . receive ' , ' link ' : ' http : / / . . . ' , ' icon ' : ' http : / / . . . ' , The telltale sign that an entry in a list of messages represents a conglomerate message is the presence of the plural ` ` msg _ ids ` ` field . In contrast , ungrouped singular messages should bear a singular ` ` msg _ id ` ` field ."""
for conglomerator in self . conglomerator_objects : messages = conglomerator . conglomerate ( messages , ** config ) return messages
def remove_child ( self , child ) : """Remove a child from this node ."""
assert child in self . children self . children . remove ( child ) self . index . pop ( child . tax_id ) if child . parent is self : child . parent = None if child . index is self . index : child . index = None # Remove child subtree from index for n in child : if n is child : continue self . index . pop ( n . tax_id ) if n . index is self . index : n . index = None
def convert_coordinates ( coords , origin , wgs84 , wrapped ) : """Convert coordinates from one crs to another"""
if isinstance ( coords , list ) or isinstance ( coords , tuple ) : try : if isinstance ( coords [ 0 ] , list ) or isinstance ( coords [ 0 ] , tuple ) : return [ convert_coordinates ( list ( c ) , origin , wgs84 , wrapped ) for c in coords ] elif isinstance ( coords [ 0 ] , float ) : c = list ( transform ( origin , wgs84 , * coords ) ) if wrapped and c [ 0 ] < - 170 : c [ 0 ] = c [ 0 ] + 360 return c except IndexError : pass return None
def write_array ( self , outfile , pixels ) : """Write an array that holds all the image values as a PNG file on the output file . See also : meth : ` write ` method ."""
if self . interlace : if type ( pixels ) != array : # Coerce to array type fmt = 'BH' [ self . bitdepth > 8 ] pixels = array ( fmt , pixels ) self . write_passes ( outfile , self . array_scanlines_interlace ( pixels ) ) else : self . write_passes ( outfile , self . array_scanlines ( pixels ) )
def to_internal_value ( self , data ) : """Convert to internal value ."""
user = getattr ( self . context . get ( 'request' ) , 'user' ) queryset = self . get_queryset ( ) permission = get_full_perm ( 'view' , queryset . model ) try : return get_objects_for_user ( user , permission , queryset . filter ( ** { self . slug_field : data } ) , ) . latest ( ) except ObjectDoesNotExist : self . fail ( 'does_not_exist' , slug_name = self . slug_field , value = smart_text ( data ) , model_name = queryset . model . _meta . model_name , # pylint : disable = protected - access ) except ( TypeError , ValueError ) : self . fail ( 'invalid' )
def normalizedGraylevelVariance ( img ) : '''' GLVN ' algorithm ( Santos97)'''
mean , stdev = cv2 . meanStdDev ( img ) s = stdev [ 0 ] ** 2 / mean [ 0 ] return s [ 0 ]
def _norm ( self , x ) : """Compute the safe norm ."""
return tf . sqrt ( tf . reduce_sum ( tf . square ( x ) , keepdims = True , axis = - 1 ) + 1e-7 )
def unpack_binary ( self , offset , length = False ) : """Returns raw binary data from the relative offset with the given length . Arguments : - ` offset ` : The relative offset from the start of the block . - ` length ` : The length of the binary blob . If zero , the empty string zero length is returned . Throws : - ` OverrunBufferException `"""
if not length : return bytes ( "" . encode ( "ascii" ) ) o = self . _offset + offset try : return bytes ( struct . unpack_from ( "<{}s" . format ( length ) , self . _buf , o ) [ 0 ] ) except struct . error : raise OverrunBufferException ( o , len ( self . _buf ) )
def restart ( self ) : """Restart module"""
# Call stop & start if any of them was overriden if self . __class__ . stop != Module . stop or self . __class__ . start != Module . start : self . stop ( ) self . start ( ) else : # If not , restart is better for daemon in self . daemons ( ) : daemon . restart ( )
def _set_server_known ( cls , host , port ) : """Store the host / port combination for this server"""
with PostgreSql . _known_servers_lock : PostgreSql . _known_servers . add ( ( host , port ) )
def _run_gvcfgenotyper ( data , region , vrn_files , out_file ) : """Run gvcfgenotyper on a single gVCF region in input file ."""
if not utils . file_exists ( out_file ) : with file_transaction ( data , out_file ) as tx_out_file : input_file = "%s-inputs.txt" % utils . splitext_plus ( tx_out_file ) [ 0 ] with open ( input_file , "w" ) as out_handle : out_handle . write ( "%s\n" % "\n" . join ( vrn_files ) ) cmd = [ "gvcfgenotyper" , "-f" , dd . get_ref_file ( data ) , "-l" , input_file , "-r" , region , "-O" , "z" , "-o" , tx_out_file ] do . run ( cmd , "gvcfgenotyper: %s %s" % ( dd . get_sample_name ( data ) , region ) ) return out_file
def delete_hosting_device_resources ( self , context , tenant_id , mgmt_port , ** kwargs ) : """Deletes resources for a hosting device in a plugin specific way ."""
if mgmt_port is not None : try : self . _cleanup_hosting_port ( context , mgmt_port [ 'id' ] ) except n_exc . NeutronException as e : LOG . error ( "Unable to delete port:%(port)s after %(tries)d" " attempts due to exception %(exception)s. " "Skipping it" , { 'port' : mgmt_port [ 'id' ] , 'tries' : DELETION_ATTEMPTS , 'exception' : str ( e ) } )
def unload_modules ( ) : """Unload all modules of the jukebox package and all plugin modules Python provides the ` ` reload ` ` command for reloading modules . The major drawback is , that if this module is loaded in any other module the source code will not be resourced ! If you want to reload the code because you changed the source file , you have to get rid of it completely first . : returns : None : rtype : None : raises : None"""
mods = set ( [ ] ) for m in sys . modules : if m . startswith ( 'jukebox' ) : mods . add ( m ) pm = PluginManager . get ( ) for p in pm . get_all_plugins ( ) : mods . add ( p . __module__ ) for m in mods : del ( sys . modules [ m ] )
def _getXrefString ( self , xref , compressed = 1 ) : """_ getXrefString ( self , xref , compressed = 1 ) - > PyObject *"""
if self . isClosed : raise ValueError ( "operation illegal for closed doc" ) return _fitz . Document__getXrefString ( self , xref , compressed )
def quaternion_conjugate ( quaternion ) : """Return conjugate of quaternion . > > > q0 = random _ quaternion ( ) > > > q1 = quaternion _ conjugate ( q0) > > > q1[0 ] = = q0[0 ] and all ( q1[1 : ] = = - q0[1 : ] ) True"""
q = numpy . array ( quaternion , dtype = numpy . float64 , copy = True ) numpy . negative ( q [ 1 : ] , q [ 1 : ] ) return q
def run ( self ) : """Thread main loop"""
retries = 0 try : while not self . _stopping : try : data = self . notifications_api . long_poll_notifications ( ) except mds . rest . ApiException as e : # An HTTP 410 can be raised when stopping so don ' t log anything if not self . _stopping : backoff = 2 ** retries - random . randint ( int ( retries / 2 ) , retries ) LOG . error ( 'Notification long poll failed with exception (retry in %d seconds):\n%s' , backoff , e ) retries += 1 # Backoff for an increasing amount of time until we have tried 10 times , then reset the backoff . if retries >= 10 : retries = 0 time . sleep ( backoff ) else : handle_channel_message ( db = self . db , queues = self . queues , b64decode = self . _b64decode , notification_object = data ) if self . subscription_manager : self . subscription_manager . notify ( data . to_dict ( ) ) finally : self . _stopped . set ( )
def _make_expanded_field_serializer ( self , name , nested_expand , nested_fields , nested_omit ) : """Returns an instance of the dynamically created nested serializer ."""
field_options = self . expandable_fields [ name ] serializer_class = field_options [ 0 ] serializer_settings = copy . deepcopy ( field_options [ 1 ] ) if name in nested_expand : serializer_settings [ "expand" ] = nested_expand [ name ] if name in nested_fields : serializer_settings [ "fields" ] = nested_fields [ name ] if name in nested_omit : serializer_settings [ "omit" ] = nested_omit [ name ] if serializer_settings . get ( "source" ) == name : del serializer_settings [ "source" ] if type ( serializer_class ) == str : serializer_class = self . _import_serializer_class ( serializer_class ) return serializer_class ( ** serializer_settings )
def next ( self ) : """Return the next match ; raises Exception if no next match available"""
# Check the state and find the next match as a side - effect if necessary . if not self . has_next ( ) : raise StopIteration ( "No next match" ) # Don ' t retain that memory any longer than necessary . result = self . _last_match self . _last_match = None self . _state = PhoneNumberMatcher . _NOT_READY return result
def create_table ( self , name , * columns , ** kwargs ) : """Create a new table with the same metadata and info"""
targs = table_args ( ** kwargs ) args , kwargs = targs [ : - 1 ] , targs [ - 1 ] return Table ( name , self . metadata , * columns , * args , ** kwargs )
def validate_config ( config ) : """Verify sanity for a : class : ` . Config ` instance . This will raise an exception in case conditions are not met , otherwise will complete silently . : param config : ( : class : ` . Config ` ) Configuration container ."""
non_null_params = [ 'space_id' , 'access_token' ] for param in non_null_params : if getattr ( config , param ) is None : raise Exception ( 'Configuration for \"{0}\" must not be empty.' . format ( param ) ) for clazz in config . custom_entries : if not issubclass ( clazz , Entry ) : raise Exception ( 'Provided class \"{0}\" must be a subclass of Entry.' . format ( clazz . __name__ ) ) elif clazz is Entry : raise Exception ( 'Cannot register "Entry" as a custom entry class.' )
def iter_links_by_attrib ( self , element ) : '''Iterate an element by looking at its attributes for links .'''
for attrib_name in element . attrib . keys ( ) : attrib_value = element . attrib . get ( attrib_name ) if attrib_name in self . LINK_ATTRIBUTES : if self . javascript_scraper and attrib_value . lstrip ( ) . startswith ( 'javascript:' ) : for link in self . iter_links_by_js_attrib ( attrib_name , percent_decode ( attrib_value ) ) : yield link else : yield attrib_name , attrib_value elif self . javascript_scraper and attrib_name [ : 5 ] in self . DYNAMIC_ATTRIBUTES : for link in self . iter_links_by_js_attrib ( attrib_name , attrib_value ) : yield link elif attrib_name . startswith ( 'data-' ) : if is_likely_link ( attrib_value ) and not is_unlikely_link ( attrib_value ) : yield attrib_name , attrib_value elif attrib_name == 'srcset' : items = self . iter_links_by_srcset_attrib ( attrib_name , attrib_value ) for item in items : yield item
def getStat ( cls , obj , name ) : """Gets the stat for the given object with the given name , or None if no such stat exists ."""
objClass = type ( obj ) for theClass in objClass . __mro__ : if theClass == object : break for value in theClass . __dict__ . values ( ) : if isinstance ( value , Stat ) and value . getName ( ) == name : return value
def copy_resource ( self , container , resource , local_filename ) : """Identical to : meth : ` dockermap . client . base . DockerClientWrapper . copy _ resource ` with additional logging ."""
self . push_log ( "Receiving tarball for resource '{0}:{1}' and storing as {2}" . format ( container , resource , local_filename ) ) super ( DockerFabricClient , self ) . copy_resource ( container , resource , local_filename )
def detach_screens ( self , screen_ids ) : """Unplugs monitors from the virtual graphics card . in screen _ ids of type int"""
if not isinstance ( screen_ids , list ) : raise TypeError ( "screen_ids can only be an instance of type list" ) for a in screen_ids [ : 10 ] : if not isinstance ( a , baseinteger ) : raise TypeError ( "array can only contain objects of type baseinteger" ) self . _call ( "detachScreens" , in_p = [ screen_ids ] )
def get_recipients ( self , ** options ) : """Figures out the recipients"""
if options [ 'recipients_from_setting' ] : return settings . TIMELINE_DIGEST_EMAIL_RECIPIENTS users = get_user_model ( ) . _default_manager . all ( ) if options [ 'staff' ] : users = users . filter ( is_staff = True ) elif not options [ 'all' ] : users = users . filter ( is_staff = True , is_superuser = True ) return users . values_list ( settings . TIMELINE_USER_EMAIL_FIELD , flat = True )
def write_PIA0_B_control ( self , cpu_cycles , op_address , address , value ) : """write to 0xff03 - > PIA 0 B side Control reg . TODO : Handle IRQ bit 7 | IRQ 1 ( VSYNC ) flag bit 6 | IRQ 2 flag ( not used ) bit 5 | Control line 2 ( CB2 ) is an output = 1 bit 4 | Control line 2 ( CB2 ) set by bit 3 = 1 bit 3 | select line MSB of analog multiplexor ( MUX ) : 0 = control line 2 LO / 1 = control line 2 HI bit 2 | set data direction : 0 = $ FF02 is DDR / 1 = $ FF02 is normal data lines bit 1 | control line 1 ( CB1 ) : IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI bit 0 | VSYNC IRQ : 0 = disable IRQ / 1 = enable IRQ"""
log . critical ( "%04x| write $%02x (%s) to $%04x -> PIA 0 B side Control reg.\t|%s" , op_address , value , byte2bit_string ( value ) , address , self . cfg . mem_info . get_shortest ( op_address ) ) if is_bit_set ( value , bit = 0 ) : log . critical ( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: enable\t|%s" , op_address , value , byte2bit_string ( value ) , address , self . cfg . mem_info . get_shortest ( op_address ) ) self . cpu . irq_enabled = True value = set_bit ( value , bit = 7 ) else : log . critical ( "%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: disable\t|%s" , op_address , value , byte2bit_string ( value ) , address , self . cfg . mem_info . get_shortest ( op_address ) ) self . cpu . irq_enabled = False if not is_bit_set ( value , bit = 2 ) : self . pia_0_B_control . select_pdr ( ) else : self . pia_0_B_control . deselect_pdr ( ) self . pia_0_B_control . set ( value )
def load ( self , path = None ) : """Loads the specified DLL , if any , otherwise re - loads the current DLL . If ` ` path ` ` is specified , loads the DLL at the given ` ` path ` ` , otherwise re - loads the DLL currently specified by this library . Note : This creates a temporary DLL file to use for the instance . This is necessary to work around a limitation of the J - Link DLL in which multiple J - Links cannot be accessed from the same process . Args : self ( Library ) : the ` ` Library ` ` instance path ( path ) : path to the DLL to load Returns : ` ` True ` ` if library was loaded successfully . Raises : OSError : if there is no J - LINK SDK DLL present at the path . See Also : ` J - Link Multi - session < http : / / forum . segger . com / index . php ? page = Thread & threadID = 669 > ` _ ."""
self . unload ( ) self . _path = path or self . _path # Windows requires a proper suffix in order to load the library file , # so it must be set here . if self . _windows or self . _cygwin : suffix = '.dll' elif sys . platform . startswith ( 'darwin' ) : suffix = '.dylib' else : suffix = '.so' # Copy the J - Link DLL to a temporary file . This will be cleaned up the # next time we load a DLL using this library or if this library is # cleaned up . tf = tempfile . NamedTemporaryFile ( delete = False , suffix = suffix ) with open ( tf . name , 'wb' ) as outputfile : with open ( self . _path , 'rb' ) as inputfile : outputfile . write ( inputfile . read ( ) ) # This is needed to work around a WindowsError where the file is not # being properly cleaned up after exiting the with statement . tf . close ( ) self . _temp = tf self . _lib = ctypes . cdll . LoadLibrary ( tf . name ) if self . _windows : # The J - Link library uses a mix of _ _ cdecl and _ _ stdcall function # calls . While this is fine on a nix platform or in cygwin , this # causes issues with Windows , where it expects the _ _ stdcall # methods to follow the standard calling convention . As a result , # we have to convert them to windows function calls . self . _winlib = ctypes . windll . LoadLibrary ( tf . name ) for stdcall in self . _standard_calls_ : if hasattr ( self . _winlib , stdcall ) : # Backwards compatibility . Some methods do not exist on # older versions of the J - Link firmware , so ignore them in # these cases . setattr ( self . _lib , stdcall , getattr ( self . _winlib , stdcall ) ) return True
def users_with_birthday ( self , month , day ) : """Return a list of user objects who have a birthday on a given date ."""
users = User . objects . filter ( properties___birthday__month = month , properties___birthday__day = day ) results = [ ] for user in users : # TODO : permissions system results . append ( user ) return results
def uniquify ( l ) : """Uniquify a list ( skip duplicate items ) ."""
result = [ ] for x in l : if x not in result : result . append ( x ) return result
def create_inputs_to_reference ( job_data , input_files , input_directories ) : """Creates a dictionary with the summarized information in job _ data , input _ files and input _ directories : param job _ data : The job data specifying input parameters other than files and directories . : param input _ files : A dictionary describing the input files . : param input _ directories : A dictionary describing the input directories . : return : A summarized dictionary containing information about all given inputs ."""
return { ** deepcopy ( job_data ) , ** deepcopy ( input_files ) , ** deepcopy ( input_directories ) }
def define ( self , klass , name = "default" ) : """Define a class with a given set of attributes . : param klass : The class : type klass : class : param name : The short name : type name : str"""
def decorate ( func ) : @ wraps ( func ) def wrapped ( * args , ** kwargs ) : return func ( * args , ** kwargs ) self . register ( klass , func , name = name ) return wrapped return decorate
def tv_distance ( a , b ) : '''Get the Total Variation ( TV ) distance between two densities a and b .'''
if len ( a . shape ) == 1 : return np . sum ( np . abs ( a - b ) ) return np . sum ( np . abs ( a - b ) , axis = 1 )
def input_on_stderr ( prompt = '' , default = None , convert = None ) : """Output a string to stderr and wait for input . Args : prompt ( str ) : the message to display . default : the default value to return if the user leaves the field empty convert ( callable ) : a callable to be used to convert the value the user inserted . If None , the type of ` ` default ` ` will be used ."""
print ( prompt , end = '' , file = sys . stderr ) value = builtins . input ( ) return _convert ( value , default , convert )
def _get_child_mock ( self , ** kw ) : """Create the child mocks for attributes and return value . By default child mocks will be the same type as the parent . Subclasses of Mock may want to override this to customize the way child mocks are made . For non - callable mocks the callable variant will be used ( rather than any custom subclass ) ."""
_type = type ( self ) if not issubclass ( _type , CallableMixin ) : if issubclass ( _type , NonCallableMagicMock ) : klass = MagicMock elif issubclass ( _type , NonCallableMock ) : klass = Mock else : klass = _type . __mro__ [ 1 ] return klass ( ** kw )
def lonlat_to_screen ( self , lon , lat ) : """Projects geodesic coordinates to screen : param lon : longitude : param lat : latitude : return : x , y screen coordinates"""
if type ( lon ) == list : lon = np . array ( lon ) if type ( lat ) == list : lat = np . array ( lat ) lat_rad = np . radians ( lat ) n = 2.0 ** self . zoom xtile = ( lon + 180.0 ) / 360.0 * n ytile = ( 1.0 - np . log ( np . tan ( lat_rad ) + ( 1 / np . cos ( lat_rad ) ) ) / math . pi ) / 2.0 * n x = ( xtile * TILE_SIZE ) . astype ( int ) y = ( SCREEN_H - ytile * TILE_SIZE ) . astype ( int ) return x , y
def conjugate_gradient_nonlinear ( f , x , line_search = 1.0 , maxiter = 1000 , nreset = 0 , tol = 1e-16 , beta_method = 'FR' , callback = None ) : r"""Conjugate gradient for nonlinear problems . Parameters f : ` Functional ` Functional with ` ` f . gradient ` ` . x : ` ` op . domain ` ` element Vector to which the result is written . Its initial value is used as starting point of the iteration , and its values are updated in each iteration step . line _ search : float or ` LineSearch ` , optional Strategy to choose the step length . If a float is given , it is used as a fixed step length . maxiter : int , optional Maximum number of iterations to perform . nreset : int , optional Number of times the solver should be reset . Default : no reset . tol : float , optional Tolerance that should be used to terminating the iteration . beta _ method : { ' FR ' , ' PR ' , ' HS ' , ' DY ' } , optional Method to calculate ` ` beta ` ` in the iterates . - ` ` ' FR ' ` ` : Fletcher - Reeves - ` ` ' PR ' ` ` : Polak - Ribiere - ` ` ' HS ' ` ` : Hestenes - Stiefel - ` ` ' DY ' ` ` : Dai - Yuan callback : callable , optional Object executing code per iteration , e . g . plotting each iterate . Notes This is a general and optimized implementation of the nonlinear conjguate gradient method for solving a general unconstrained optimization problem . . math : : \ min f ( x ) for a differentiable functional : math : ` f : \ mathcal { X } \ to \ mathbb { R } ` on a Hilbert space : math : ` \ mathcal { X } ` . It does so by finding a zero of the gradient . . math : : \ nabla f : \ mathcal { X } \ to \ mathcal { X } . The method is described in a ` Wikipedia article < https : / / en . wikipedia . org / wiki / Nonlinear _ conjugate _ gradient _ method > ` _ . See Also odl . solvers . smooth . newton . bfgs _ method : Quasi - Newton solver for the same problem odl . solvers . iterative . iterative . conjugate _ gradient : Optimized solver for least - squares problem with linear and symmetric operator odl . solvers . iterative . iterative . conjugate _ gradient _ normal : Equivalent solver but for least - squares problem with linear operator"""
if x not in f . domain : raise TypeError ( '`x` {!r} is not in the domain of `f` {!r}' '' . format ( x , f . domain ) ) if not callable ( line_search ) : line_search = ConstantLineSearch ( line_search ) if beta_method not in [ 'FR' , 'PR' , 'HS' , 'DY' ] : raise ValueError ( 'unknown ``beta_method``' ) for _ in range ( nreset + 1 ) : # First iteration is done without beta dx = - f . gradient ( x ) dir_derivative = - dx . inner ( dx ) if abs ( dir_derivative ) < tol : return a = line_search ( x , dx , dir_derivative ) x . lincomb ( 1 , x , a , dx ) # x = x + a * dx s = dx # for ' HS ' and ' DY ' beta methods for _ in range ( maxiter // ( nreset + 1 ) ) : # Compute dx as - grad f dx , dx_old = - f . gradient ( x ) , dx # Calculate " beta " if beta_method == 'FR' : beta = dx . inner ( dx ) / dx_old . inner ( dx_old ) elif beta_method == 'PR' : beta = dx . inner ( dx - dx_old ) / dx_old . inner ( dx_old ) elif beta_method == 'HS' : beta = - dx . inner ( dx - dx_old ) / s . inner ( dx - dx_old ) elif beta_method == 'DY' : beta = - dx . inner ( dx ) / s . inner ( dx - dx_old ) else : raise RuntimeError ( 'unknown ``beta_method``' ) # Reset beta if negative . beta = max ( 0 , beta ) # Update search direction s . lincomb ( 1 , dx , beta , s ) # s = dx + beta * s # Find optimal step along s dir_derivative = - dx . inner ( s ) if abs ( dir_derivative ) <= tol : return a = line_search ( x , s , dir_derivative ) # Update position x . lincomb ( 1 , x , a , s ) # x = x + a * s if callback is not None : callback ( x )
def __downloadPage ( factory , * args , ** kwargs ) : """Start a HTTP download , returning a HTTPDownloader object"""
# The Twisted API is weird : # 1 ) web . client . downloadPage ( ) doesn ' t give us the HTTP headers # 2 ) there is no method that simply accepts a URL and gives you back # a HTTPDownloader object # TODO : convert getPage ( ) usage to something similar , too downloader = factory ( * args , ** kwargs ) if downloader . scheme == 'https' : from twisted . internet import ssl contextFactory = ssl . ClientContextFactory ( ) reactor . connectSSL ( downloader . host , downloader . port , downloader , contextFactory ) else : reactor . connectTCP ( downloader . host , downloader . port , downloader ) return downloader
def anim ( self , duration , offset = 0 , timestep = 1 , label = None , unit = None , time_fn = param . Dynamic . time_fn ) : """duration : The temporal duration to animate in the units defined on the global time function . offset : The temporal offset from which the animation is generated given the supplied pattern timestep : The time interval between successive frames . The duration must be an exact multiple of the timestep . label : A label string to override the label of the global time function ( if not None ) . unit : The unit string to override the unit value of the global time function ( if not None ) . time _ fn : The global time function object that is shared across the time - varying objects that are being sampled . Note that the offset , timestep and time _ fn only affect patterns parameterized by time - dependent number generators . Otherwise , the frames are generated by successive call to the pattern which may or may not be varying ( e . g to view the patterns contained within a Selector ) ."""
frames = ( duration // timestep ) + 1 if duration % timestep != 0 : raise ValueError ( "The duration value must be an exact multiple of the timestep." ) if label is None : label = time_fn . label if hasattr ( time_fn , 'label' ) else 'Time' unit = time_fn . unit if ( not unit and hasattr ( time_fn , 'unit' ) ) else unit vmap = HoloMap ( kdims = [ Dimension ( label , unit = unit if unit else '' ) ] ) self . state_push ( ) with time_fn as t : t ( offset ) for i in range ( frames ) : vmap [ t ( ) ] = self [ : ] t += timestep self . state_pop ( ) return vmap
def deflate ( f , * args , ** kwargs ) : """Deflate Flask Response Decorator ."""
data = f ( * args , ** kwargs ) if isinstance ( data , Response ) : content = data . data else : content = data deflater = zlib . compressobj ( ) deflated_data = deflater . compress ( content ) deflated_data += deflater . flush ( ) if isinstance ( data , Response ) : data . data = deflated_data data . headers [ 'Content-Encoding' ] = 'deflate' data . headers [ 'Content-Length' ] = str ( len ( data . data ) ) return data return deflated_data