signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def autostrip ( cls ) : """strip text fields before validation example : @ autostrip class PersonForm ( forms . Form ) : name = forms . CharField ( min _ length = 2 , max _ length = 10) email = forms . EmailField ( ) Author : nail . xx"""
warnings . warn ( "django-annoying autostrip is deprecated and will be removed in a " "future version. Django now has native support for stripping form " "fields. " "https://docs.djangoproject.com/en/stable/ref/forms/fields/#django.forms.CharField.strip" , DeprecationWarning , stacklevel = 2 , ) fields = [ ( key , value ) for key , value in cls . base_fields . items ( ) if isinstance ( value , forms . CharField ) ] for field_name , field_object in fields : def get_clean_func ( original_clean ) : return lambda value : original_clean ( value and value . strip ( ) ) clean_func = get_clean_func ( getattr ( field_object , 'clean' ) ) setattr ( field_object , 'clean' , clean_func ) return cls
def charts_slug_get ( self , slug , ** kwargs ) : """Chart A Chart is chosen by Pollster editors . One example is \" Obama job approval - Democrats \" . It is always based upon a single Question . Users should strongly consider basing their analysis on Questions instead . Charts are derived data ; Pollster editors publish them and change them as editorial priorities change . This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please define a ` callback ` function to be invoked when receiving the response . > > > def callback _ function ( response ) : > > > pprint ( response ) > > > thread = api . charts _ slug _ get ( slug , callback = callback _ function ) : param callback function : The callback function for asynchronous request . ( optional ) : param str slug : Unique identifier for a Chart ( required ) : return : Chart If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'callback' ) : return self . charts_slug_get_with_http_info ( slug , ** kwargs ) else : ( data ) = self . charts_slug_get_with_http_info ( slug , ** kwargs ) return data
def secgroup_delete ( self , name ) : '''Delete a security group'''
nt_ks = self . compute_conn for item in nt_ks . security_groups . list ( ) : if item . name == name : nt_ks . security_groups . delete ( item . id ) return { name : 'Deleted security group: {0}' . format ( name ) } return 'Security group not found: {0}' . format ( name )
def terminate ( self ) : """Properly terminates this player instance . Preferably use this instead of relying on python ' s garbage collector to cause this to be called from the object ' s destructor ."""
self . handle , handle = None , self . handle if threading . current_thread ( ) is self . _event_thread : # Handle special case to allow event handle to be detached . # This is necessary since otherwise the event thread would deadlock itself . grim_reaper = threading . Thread ( target = lambda : _mpv_terminate_destroy ( handle ) ) grim_reaper . start ( ) else : _mpv_terminate_destroy ( handle ) if self . _event_thread : self . _event_thread . join ( )
def applyUserPars_steps ( configObj , input_dict , step = '3a' ) : """Apply logic to turn on use of user - specified output WCS if user provides any parameter on command - line regardless of how final _ wcs was set ."""
step_kws = { '7a' : 'final_wcs' , '3a' : 'driz_sep_wcs' } stepname = getSectionName ( configObj , step ) finalParDict = configObj [ stepname ] . copy ( ) del finalParDict [ step_kws [ step ] ] # interpret input _ dict to find any parameters for this step specified by the user user_pars = { } for kw in finalParDict : if kw in input_dict : user_pars [ kw ] = input_dict [ kw ] if len ( user_pars ) > 0 : configObj [ stepname ] [ step_kws [ step ] ] = True
def decodeGsm7 ( encodedText ) : """GSM - 7 text decoding algorithm Decodes the specified GSM - 7 - encoded string into a plaintext string . : param encodedText : the text string to encode : type encodedText : bytearray or str : return : A string containing the decoded text : rtype : str"""
result = [ ] if type ( encodedText ) == str : encodedText = rawStrToByteArray ( encodedText ) # bytearray ( encodedText ) iterEncoded = iter ( encodedText ) for b in iterEncoded : if b == 0x1B : # ESC - switch to extended table c = chr ( next ( iterEncoded ) ) for char , value in dictItemsIter ( GSM7_EXTENDED ) : if c == value : result . append ( char ) break else : result . append ( GSM7_BASIC [ b ] ) return '' . join ( result )
def find_references_origin ( irs ) : """Make lvalue of each Index , Member operation points to the left variable"""
for ir in irs : if isinstance ( ir , ( Index , Member ) ) : ir . lvalue . points_to = ir . variable_left
def rename ( self , new_relation ) : """Rename this cached relation to new _ relation . Note that this will change the output of key ( ) , all refs must be updated ! : param _ CachedRelation new _ relation : The new name to apply to the relation"""
# Relations store this stuff inside their ` path ` dict . But they # also store a table _ name , and usually use it in their . render ( ) , # so we need to update that as well . It doesn ' t appear that # table _ name is ever anything but the identifier ( via . create ( ) ) self . inner = self . inner . incorporate ( path = { 'database' : new_relation . inner . database , 'schema' : new_relation . inner . schema , 'identifier' : new_relation . inner . identifier } , table_name = new_relation . inner . identifier )
def disallow ( ctx , foreign_account , permission , threshold , account ) : """Remove a key / account from an account ' s permission"""
print_tx ( ctx . bitshares . disallow ( foreign_account , account = account , permission = permission , threshold = threshold ) )
def createAndCleanTPED ( tped , tfam , snps , prefix , chosenSNPs , completion , concordance , snpsToComplete , tfamFileName , completionT , concordanceT ) : """Complete a TPED for duplicated SNPs . : param tped : a representation of the ` ` tped ` ` of duplicated markers . : param tfam : a representation of the ` ` tfam ` ` . : param snps : the position of duplicated markers in the ` ` tped ` ` . : param prefix : the prefix of the output files . : param chosenSNPs : the markers that were chosen for completion ( including problems ) . : param completion : the completion of each of the duplicated markers . : param concordance : the pairwise concordance of the duplicated markers . : param snpsToComplete : the markers that will be completed ( excluding problems ) . : param tfamFileName : the name of the original ` ` tfam ` ` file . : param completionT : the completion threshold . : param concordanceT : the concordance threshold . : type tped : numpy . array : type tfam : list : type snps : dict : type prefix : str : type chosenSNPs : dict : type completion : numpy . array : type concordance : dict : type snpsToComplete : set : type tfamFileName : str : type completionT : float : type concordanceT : float : returns : a tuple containing the new ` ` tped ` ` after completion ( : py : class : ` numpy . array ` as the first element , and the index of the markers that will need to be rid of ( : py : class : ` set ` ) as the last element . It creates three different files : * ` ` prefix . zeroed _ out ` ` : contains information about markers and samples where the genotyped was zeroed out . * ` ` prefix . not _ good _ enough ` ` : contains information about markers that were not good enough to help in completing the chosen markers ( because of concordance or completion ) . * ` ` prefix . removed _ duplicates ` ` : the list of markers that where used for completing the chosen one , hence they will be removed from the final data set . Cycling through every genotypes of every samples of every duplicated markers , checks if the genotypes are all the same . If the chosen one was not called , but the other ones were , then we complete the chosen one with the genotypes for the others ( assuming that they are all the same ) . If there is a difference between the genotypes , it is zeroed out for the chosen marker ."""
zeroedOutFile = None try : zeroedOutFile = open ( prefix + ".zeroed_out" , "w" ) except IOError : msg = "%(prefix).zeroed_out: can't write file" % locals ( ) raise ProgramError ( msg ) print >> zeroedOutFile , "\t" . join ( [ "famID" , "indID" , "snpID" ] ) notGoodEnoughFile = None try : notGoodEnoughFile = open ( prefix + ".not_good_enough" , "w" ) except IOError : msg = "%(prefix)s.not_good_enough: can't write file" % locals ( ) raise ProgramError ( msg ) print >> notGoodEnoughFile , "\t" . join ( [ "name" , "reason" ] ) removedFile = None try : removedFile = open ( prefix + ".removed_duplicates" , "w" ) except IOError : msg = "%(prefix)s.removed_duplicates: can't write file" % locals ( ) raise ProgramError ( msg ) notGoodEnoughSnps = set ( ) # Split the tped in ' snpInfo ' and ' genotypes ' snpInfo = tped [ : , : 4 ] genotypes = tped [ : , 4 : ] # The sed of index we want to get rid of at the end getRidOfIndex = set ( ) for snpID , indexes in snps . iteritems ( ) : if snpID not in snpsToComplete : # We don ' t want to complete this SNP , so we continue to next SNP continue # Getting the completion completionToRemove = set ( np . where ( completion [ indexes ] < completionT ) [ 0 ] ) for k in completionToRemove : notGoodEnoughSnps . add ( ( snpInfo [ indexes ] [ k , 1 ] , "completion" ) ) # Getting the concordance concordanceToRemove = set ( np . where ( concordance [ snpID ] < concordanceT ) [ 0 ] ) for k in concordanceToRemove : notGoodEnoughSnps . add ( ( snpInfo [ indexes ] [ k , 1 ] , "concordance" ) ) # These will be the indexes to remove indexesToRemove = set ( ) for index in completionToRemove | concordanceToRemove : indexesToRemove . add ( indexes [ index ] ) # These are the indexes to keep indexesToKeep = [ ] for index in indexes : if index not in indexesToRemove : indexesToKeep . append ( index ) # Getting the chosen SNP chosenOne = chosenSNPs [ snpID ] if chosenOne not in set ( indexesToKeep ) : # The chosen SNP is not a good SNP , so we go to next SNP logger . warning ( " - {} chosen but not good enough" . format ( snpInfo [ chosenOne , 1 ] , ) ) continue # Now cycling through the genotypes nbSamples = genotypes . shape [ 1 ] for sampleIndex in xrange ( nbSamples ) : # We need to remove the no call and keep the unique genotypes curGenotypes = genotypes [ indexesToKeep , sampleIndex ] cleanedCurGenotypes = curGenotypes [ np . where ( curGenotypes != "0 0" ) ] uniqueCleanedCurGenotypes = np . unique ( cleanedCurGenotypes ) # Checking the number of unique genotypes toComplete = False if len ( uniqueCleanedCurGenotypes ) > 1 : # There are more than one unique genotype ( except 0 0) # len = 0 means all were 0 0 # len = 1 means they are all the same # len > 1 means discordance ( might need to flip ) # Just need to check the order of the alleles possibleAlleles = [ set ( ) for k in xrange ( len ( uniqueCleanedCurGenotypes ) ) ] for k , geno in enumerate ( uniqueCleanedCurGenotypes ) : possibleAlleles [ k ] |= set ( geno . split ( " " ) ) allEqual = True for k in xrange ( len ( possibleAlleles ) ) : for l in xrange ( k + 1 , len ( possibleAlleles ) ) : if possibleAlleles [ k ] != possibleAlleles [ l ] : allEqual = False if not allEqual : # The genotypes are not all equal , we set the chosen # genotype to null ( 0 0) tped [ chosenOne , sampleIndex + 4 ] = "0 0" print >> zeroedOutFile , "\t" . join ( [ tfam [ sampleIndex , 0 ] , tfam [ sampleIndex , 1 ] , snpInfo [ chosenOne , 1 ] ] ) elif genotypes [ chosenOne , sampleIndex ] == "0 0" : toComplete = True elif ( ( len ( uniqueCleanedCurGenotypes ) == 1 ) and ( genotypes [ chosenOne , sampleIndex ] == "0 0" ) ) : toComplete = True if toComplete : # We complete the current individual tped [ chosenOne , sampleIndex + 4 ] = uniqueCleanedCurGenotypes [ 0 ] # We keep only the chose one for index in indexes : if index != chosenOne : getRidOfIndex . add ( index ) print >> removedFile , snpInfo [ index , 1 ] # Writing the not good enough file for item in notGoodEnoughSnps : print >> notGoodEnoughFile , "\t" . join ( item ) # Closing the output files zeroedOutFile . close ( ) notGoodEnoughFile . close ( ) # Printing the chosen file try : shutil . copy ( tfamFileName , prefix + ".chosen_snps.tfam" ) except IOError : msg = "%(tfamFileName)s: can't copy file to " "%(prefix)s.chosen_snps.tfam" % locals ( ) raise ProgramError ( msg ) chosenFile = None try : chosenFile = open ( prefix + ".chosen_snps.tped" , "w" ) except IOError : msg = "%(prefix)s.chosen_snps.tped: can't write file" % locals ( ) raise ProgramError ( msg ) for chosenOne in chosenSNPs . itervalues ( ) : snpID = ( tped [ chosenOne , 0 ] , tped [ chosenOne , 3 ] ) if snpID in snpsToComplete : print >> chosenFile , "\t" . join ( tped [ chosenOne ] ) chosenFile . close ( ) return tped , getRidOfIndex
def dense_output ( t_current , t_old , h_current , rcont ) : """Dense output function , basically extrapolatin"""
# initialization s = ( t_current - t_old ) / h_current s1 = 1.0 - s return rcont [ 0 ] + s * ( rcont [ 1 ] + s1 * ( rcont [ 2 ] + s * ( rcont [ 3 ] + s1 * ( rcont [ 4 ] + s * ( rcont [ 5 ] + s1 * ( rcont [ 6 ] + s * rcont [ 7 ] ) ) ) ) ) )
def create_time_labels ( self ) : """Create the time labels , but don ' t plot them yet . Notes It ' s necessary to have the height of the time labels , so that we can adjust the main scene . Not very robust , because it uses seconds as integers ."""
min_time = int ( floor ( min ( self . data . axis [ 'time' ] [ 0 ] ) ) ) max_time = int ( ceil ( max ( self . data . axis [ 'time' ] [ 0 ] ) ) ) n_time_labels = self . parent . value ( 'n_time_labels' ) self . idx_time = [ ] self . time_pos = [ ] for one_time in linspace ( min_time , max_time , n_time_labels ) : x_label = ( self . data . start_time + timedelta ( seconds = one_time ) ) . strftime ( '%H:%M:%S' ) item = QGraphicsSimpleTextItem ( x_label ) item . setFlag ( QGraphicsItem . ItemIgnoresTransformations ) self . idx_time . append ( item ) self . time_pos . append ( QPointF ( one_time , len ( self . idx_label ) * self . parent . value ( 'y_distance' ) ) )
def response_continue ( self ) : """Signals that a partial reception of data has occurred and that the exporter should continue to send data for this entity . This should also be used if import - side caching has missed , in which case the response will direct the exporter to re - send the full data for the entity ( otherwise it will send back the entity ID and rely on the import party ' s caching to resolve it ) . Use this for generic cases where we need to be messaged again about this entity - currently used after requesting and receiving a status block , and in its cache - refresh form if we have a cache miss during import . : return : A response that can be returned from a Flask service method"""
if self . entity is not None : ImportRequest . logger . debug ( "Sending: continue" ) return jsonify ( { 'state' : 'continue' } ) else : ImportRequest . logger . debug ( "Sending: continue-nocache" ) return jsonify ( { 'state' : 'continue-nocache' } )
def SayString ( self , text , delay = 0 ) : """Enter some text . : param text : the text you want to enter ."""
self . _delay ( delay ) cmd = Command ( "SayString" , 'SayString "%s"' % text ) self . add ( cmd )
def write_unitary_matrix_to_hdf5 ( temperature , mesh , unitary_matrix = None , sigma = None , sigma_cutoff = None , solver = None , filename = None , verbose = False ) : """Write eigenvectors of collision matrices at temperatures . Depending on the choice of the solver , eigenvectors are sotred in either column - wise or row - wise ."""
suffix = _get_filename_suffix ( mesh , sigma = sigma , sigma_cutoff = sigma_cutoff , filename = filename ) hdf5_filename = "unitary" + suffix + ".hdf5" with h5py . File ( hdf5_filename , 'w' ) as w : w . create_dataset ( 'temperature' , data = temperature ) if unitary_matrix is not None : w . create_dataset ( 'unitary_matrix' , data = unitary_matrix ) if solver is not None : w . create_dataset ( 'solver' , data = solver ) if verbose : if len ( temperature ) > 1 : text = "Unitary matrices " else : text = "Unitary matrix " if sigma is not None : text += "at sigma %s " % _del_zeros ( sigma ) if sigma_cutoff is not None : text += "(%4.2f SD) " % sigma_cutoff if len ( temperature ) > 1 : text += "were written into " else : text += "was written into " if sigma is not None : text += "\n" text += "\"%s\"." % hdf5_filename print ( text )
def buffer_leave ( self , filename ) : """User is changing of buffer ."""
self . log . debug ( 'buffer_leave: %s' , filename ) # TODO : This is questionable , and we should use location list for # single - file errors . self . editor . clean_errors ( )
def write_manifest ( self ) : """Write the file list in ' self . filelist ' to the manifest file named by ' self . manifest ' ."""
self . filelist . _repair ( ) # Now _ repairs should encodability , but not unicode files = [ self . _manifest_normalize ( f ) for f in self . filelist . files ] msg = "writing manifest file '%s'" % self . manifest self . execute ( write_file , ( self . manifest , files ) , msg )
def list_nodes ( full = False , call = None ) : '''List of nodes , keeping only a brief listing CLI Example : . . code - block : : bash salt - cloud - Q'''
if call == 'action' : raise SaltCloudSystemExit ( 'The list_nodes function must be called with -f or --function.' ) ret = { } nodes = list_nodes_full ( 'function' ) if full : return nodes for node in nodes : ret [ node ] = { } for item in ( 'id' , 'image' , 'size' , 'public_ips' , 'private_ips' , 'state' ) : ret [ node ] [ item ] = nodes [ node ] [ item ] return ret
def run ( self , grid = None , num_of_paths = 2000 , seed = 0 , num_of_workers = CPU_COUNT , profiling = False ) : """implements simulation : param list ( date ) grid : list of Monte Carlo grid dates : param int num _ of _ paths : number of Monte Carlo paths : param hashable seed : seed used for rnds initialisation ( additional adjustment in place ) : param int or None num _ of _ workers : number of parallel workers ( default : cpu _ count ( ) ) , if None no parallel processing is used : param bool profiling : signal whether to use profiling , True means used , else not : return object : final consumer state It returns a list of lists . The list contains per path a list produced by consumer at observation dates"""
self . grid = sorted ( set ( grid ) ) self . num_of_paths = num_of_paths self . num_of_workers = num_of_workers self . seed = seed # pre processing self . producer . initialize ( self . grid , self . num_of_paths , self . seed ) self . consumer . initialize ( self . grid , self . num_of_paths , self . seed ) if num_of_workers : # processing workers = list ( ) queue = Queue ( ) path_per_worker = int ( num_of_paths // num_of_workers ) start_path , stop_path = 0 , path_per_worker for i in range ( num_of_workers ) : if i == num_of_workers - 1 : stop_path = num_of_paths # ensure exact num of path as required name = 'worker-%d' % i if profiling : # display profile with ` snakeviz worker - 0 . prof ` # if not installed ` pip install snakeviz ` workers . append ( Process ( target = self . _run_parallel_process_with_profiling , name = name , args = ( start_path , stop_path , queue , name + '.prof' ) ) ) else : workers . append ( Process ( target = self . _run_parallel_process , name = name , args = ( start_path , stop_path , queue ) ) ) start_path , stop_path = stop_path , stop_path + path_per_worker for worker in workers : worker . start ( ) # post processing for _ in range ( num_of_workers ) : self . consumer . get ( queue . get ( ) ) for worker in workers : worker . join ( ) else : self . _run_process ( 0 , num_of_paths ) self . consumer . finalize ( ) return self . consumer . result
def get_mods ( package ) : """List all loadable python modules in a directory This function looks inside the specified directory for all files that look like Python modules with a numeric prefix and returns them . It will omit any duplicates and return file names without extension . : param package : package object : returns : list of tuples containing filename without extension , major _ version and minor _ version"""
pkgdir = package . __path__ [ 0 ] matches = filter ( None , [ PYMOD_RE . match ( f ) for f in os . listdir ( pkgdir ) ] ) parse_match = lambda groups : ( groups [ 0 ] , int ( groups [ 1 ] ) , int ( groups [ 2 ] ) ) return sorted ( list ( set ( [ parse_match ( m . groups ( ) ) for m in matches ] ) ) , key = lambda x : ( x [ 1 ] , x [ 2 ] ) )
def reentrancies ( self ) : """Return a mapping of variables to their re - entrancy count . A re - entrancy is when more than one edge selects a node as its target . These graphs are rooted , so the top node always has an implicit entrancy . Only nodes with re - entrancies are reported , and the count is only for the entrant edges beyond the first . Also note that these counts are for the interpreted graph , not for the linearized form , so inverted edges are always re - entrant ."""
entrancies = defaultdict ( int ) entrancies [ self . top ] += 1 # implicit entrancy to top for t in self . edges ( ) : entrancies [ t . target ] += 1 return dict ( ( v , cnt - 1 ) for v , cnt in entrancies . items ( ) if cnt >= 2 )
def libvlc_media_player_has_vout ( p_mi ) : '''How many video outputs does this media player have ? @ param p _ mi : the media player . @ return : the number of video outputs .'''
f = _Cfunctions . get ( 'libvlc_media_player_has_vout' , None ) or _Cfunction ( 'libvlc_media_player_has_vout' , ( ( 1 , ) , ) , None , ctypes . c_uint , MediaPlayer ) return f ( p_mi )
def wait_for_successful_query ( url , wait_for = 300 , ** kwargs ) : '''Query a resource until a successful response , and decode the return data CLI Example : . . code - block : : bash salt ' * ' http . wait _ for _ successful _ query http : / / somelink . com / wait _ for = 160'''
starttime = time . time ( ) while True : caught_exception = None result = None try : result = query ( url = url , ** kwargs ) if not result . get ( 'Error' ) and not result . get ( 'error' ) : return result except Exception as exc : caught_exception = exc if time . time ( ) > starttime + wait_for : if not result and caught_exception : # workaround pylint bug https : / / www . logilab . org / ticket / 3207 raise caught_exception # pylint : disable = E0702 return result
def _default_error_handler ( msg , _ ) : """Default error handler callback for libopenjp2."""
msg = "OpenJPEG library error: {0}" . format ( msg . decode ( 'utf-8' ) . rstrip ( ) ) opj2 . set_error_message ( msg )
def preprocess ( self ) : """Preprocessing . Each active custom field is given a ' name ' key that holds the field name , and for each keyed name , the value of the custom field is assigned . Notes on the group get some html tags removed ."""
super ( MambuLoan , self ) . preprocess ( ) try : self [ 'notes' ] = strip_tags ( self [ 'notes' ] ) except KeyError : pass
def lee_yeast_ChIP ( data_set = 'lee_yeast_ChIP' ) : """Yeast ChIP data from Lee et al ."""
if not data_available ( data_set ) : download_data ( data_set ) from pandas import read_csv dir_path = os . path . join ( data_path , data_set ) filename = os . path . join ( dir_path , 'binding_by_gene.tsv' ) S = read_csv ( filename , header = 1 , index_col = 0 , sep = '\t' ) transcription_factors = [ col for col in S . columns if col [ : 7 ] != 'Unnamed' ] annotations = S [ [ 'Unnamed: 1' , 'Unnamed: 2' , 'Unnamed: 3' ] ] S = S [ transcription_factors ] return data_details_return ( { 'annotations' : annotations , 'Y' : S , 'transcription_factors' : transcription_factors } , data_set )
def validate_hex ( value ) : """Validate that value has hex format ."""
try : binascii . unhexlify ( value ) except Exception : raise vol . Invalid ( '{} is not of hex format' . format ( value ) ) return value
def legend ( self ) : '''Splattable list of : class : ` ~ bokeh . models . annotations . Legend ` objects .'''
panels = self . above + self . below + self . left + self . right + self . center legends = [ obj for obj in panels if isinstance ( obj , Legend ) ] return _legend_attr_splat ( legends )
def enabled_service_owners ( ) : '''Return which packages own each of the services that are currently enabled . CLI Example : salt myminion introspect . enabled _ service _ owners'''
error = { } if 'pkg.owner' not in __salt__ : error [ 'Unsupported Package Manager' ] = ( 'The module for the package manager on this system does not ' 'support looking up which package(s) owns which file(s)' ) if 'service.show' not in __salt__ : error [ 'Unsupported Service Manager' ] = ( 'The module for the service manager on this system does not ' 'support showing descriptive service data' ) if error : return { 'Error' : error } ret = { } services = __salt__ [ 'service.get_enabled' ] ( ) for service in services : data = __salt__ [ 'service.show' ] ( service ) if 'ExecStart' not in data : continue start_cmd = data [ 'ExecStart' ] [ 'path' ] pkg = __salt__ [ 'pkg.owner' ] ( start_cmd ) ret [ service ] = next ( six . itervalues ( pkg ) ) return ret
def callback_oauth2 ( self , request ) : """Process for oAuth 2 : param request : contains the current session : return :"""
callback_url = self . callback_url ( request ) oauth = OAuth2Session ( client_id = self . consumer_key , redirect_uri = callback_url , scope = self . scope ) request_token = oauth . fetch_token ( self . REQ_TOKEN , code = request . GET . get ( 'code' , '' ) , authorization_response = callback_url , client_secret = self . consumer_secret , scope = self . scope , verify = False ) return request_token . get ( 'access_token' )
def _makeExtraWidgets ( self ) : """Makes a text widget ."""
self . textWidget = urwid . Text ( self . text ) return [ self . textWidget ]
def getVersion ( ) : """Returns underlying libusb ' s version information as a 6 - namedtuple ( or 6 - tuple if namedtuples are not avaiable ) : - major - minor - micro - nano - rc - describe Returns ( 0 , 0 , 0 , 0 , ' ' , ' ' ) if libusb doesn ' t have required entry point ."""
version = libusb1 . libusb_get_version ( ) . contents return Version ( version . major , version . minor , version . micro , version . nano , version . rc , version . describe , )
def initializable ( self ) : """True if the Slot is initializable ."""
return bool ( lib . EnvSlotInitableP ( self . _env , self . _cls , self . _name ) )
def atualizar_software_sat ( self ) : """Sobrepõe : meth : ` ~ satcfe . base . FuncoesSAT . atualizar _ software _ sat ` . : return : Uma resposta SAT padrão . : rtype : satcfe . resposta . padrao . RespostaSAT"""
resp = self . _http_post ( 'atualizarsoftwaresat' ) conteudo = resp . json ( ) return RespostaSAT . atualizar_software_sat ( conteudo . get ( 'retorno' ) )
def select ( i ) : """Input : { dict - dict with values being dicts with ' name ' as string to display and ' sort ' as int ( for ordering ) ( title ) - print title ( error _ if _ empty ) - if ' yes ' and Enter , make error ( skip _ sort ) - if ' yes ' , do not sort array Output : { return - return code = 0 string - selected dictionary key"""
s = '' title = i . get ( 'title' , '' ) if title != '' : out ( title ) out ( '' ) d = i [ 'dict' ] if i . get ( 'skip_sort' , '' ) != 'yes' : kd = sorted ( d , key = lambda v : d [ v ] . get ( 'sort' , 0 ) ) else : kd = d j = 0 ks = { } for k in kd : q = d [ k ] sj = str ( j ) ks [ sj ] = k qn = q . get ( 'name' , '' ) out ( sj + ') ' + qn ) j += 1 out ( '' ) rx = inp ( { 'text' : 'Make your selection (or press Enter for 0): ' } ) if rx [ 'return' ] > 0 : return rx sx = rx [ 'string' ] . strip ( ) if sx == '' : if i . get ( 'error_if_empty' , '' ) == 'yes' : return { 'return' : 1 , 'error' : 'selection is empty' } s = kd [ 0 ] else : if sx not in ks : return { 'return' : 1 , 'error' : 'selection is not recognized' } s = ks [ sx ] return { 'return' : 0 , 'string' : s }
def get_disks ( vm_ ) : '''Return the disks of a named vm CLI Example : . . code - block : : bash salt ' * ' virt . get _ disks < vm name >'''
with _get_xapi_session ( ) as xapi : disk = { } vm_uuid = _get_label_uuid ( xapi , 'VM' , vm_ ) if vm_uuid is False : return False for vbd in xapi . VM . get_VBDs ( vm_uuid ) : dev = xapi . VBD . get_device ( vbd ) if not dev : continue prop = xapi . VBD . get_runtime_properties ( vbd ) disk [ dev ] = { 'backend' : prop [ 'backend' ] , 'type' : prop [ 'device-type' ] , 'protocol' : prop [ 'protocol' ] } return disk
def sheets ( self , index = None ) : """Return either a list of all sheets if index is None , or the sheet at the given index ."""
if self . _sheets is None : self . _sheets = [ self . get_worksheet ( s , i ) for i , s in enumerate ( self . iterate_sheets ( ) ) ] if index is None : return self . _sheets else : return self . _sheets [ index ]
def get_root ( self ) : """: returns : the root node for the current node object ."""
return get_result_class ( self . __class__ ) . objects . get ( path = self . path [ 0 : self . steplen ] )
def process_beads_table ( beads_table , instruments_table , base_dir = "." , verbose = False , plot = False , plot_dir = None , full_output = False , get_transform_fxn_kwargs = { } ) : """Process calibration bead samples , as specified by an input table . This function processes the entries in ` beads _ table ` . For each row , the function does the following : - Load the FCS file specified in the field " File Path " . - Transform the forward scatter / side scatter and fluorescence channels to RFI - Remove the 250 first and 100 last events . - Remove saturated events in the forward scatter and side scatter channels . - Apply density gating on the forward scatter / side scatter channels . - Generate a standard curve transformation function , for each fluorescence channel in which the associated MEF values are specified . - Generate forward / side scatter density plots and fluorescence histograms , and plots of the clustering and fitting steps of standard curve generation , if ` plot ` = True . Names of forward / side scatter and fluorescence channels are taken from ` instruments _ table ` . Parameters beads _ table : DataFrame Table specifying beads samples to be processed . For more information about the fields required in this table , please consult the module ' s documentation . instruments _ table : DataFrame Table specifying instruments . For more information about the fields required in this table , please consult the module ' s documentation . base _ dir : str , optional Directory from where all the other paths are specified . verbose : bool , optional Whether to print information messages during the execution of this function . plot : bool , optional Whether to generate and save density / histogram plots of each sample , and each beads sample . plot _ dir : str , optional Directory relative to ` base _ dir ` into which plots are saved . If ` plot ` is False , this parameter is ignored . If ` ` plot = = True ` ` and ` ` plot _ dir is None ` ` , plot without saving . full _ output : bool , optional Flag indicating whether to include an additional output , containing intermediate results from the generation of the MEF transformation functions . get _ transform _ fxn _ kwargs : dict , optional Additional parameters passed directly to internal ` ` mef . get _ transform _ fxn ( ) ` ` function call . Returns beads _ samples : list of FCSData objects A list of processed , gated , and transformed samples , as specified in ` beads _ table ` , in the order of ` ` beads _ table . index ` ` . mef _ transform _ fxns : OrderedDict A dictionary of MEF transformation functions , indexed by ` ` beads _ table . index ` ` . mef _ outputs : list , only if ` ` full _ output = = True ` ` A list with intermediate results of the generation of the MEF transformation functions . For every entry in ` beads _ table ` , : func : ` FlowCal . mef . get _ transform _ fxn ( ) ` is called on the corresponding processed and gated beads sample with ` ` full _ output = True ` ` , and the full output ( a ` MEFOutput ` ` ` namedtuple ` ` ) is appended to ` mef _ outputs ` . Please refer to the output section of : func : ` FlowCal . mef . get _ transform _ fxn ( ) ` ' s documentation for more information ."""
# Initialize output variables beads_samples = [ ] mef_transform_fxns = collections . OrderedDict ( ) mef_outputs = [ ] # Return empty structures if beads table is empty if beads_table . empty : if full_output : return beads_samples , mef_transform_fxns , mef_outputs else : return beads_samples , mef_transform_fxns if verbose : msg = "Processing Beads table ({} entries)" . format ( len ( beads_table ) ) print ( "" ) print ( msg ) print ( "=" * len ( msg ) ) # Check that plotting directory exist , create otherwise if plot and plot_dir is not None and not os . path . exists ( os . path . join ( base_dir , plot_dir ) ) : os . makedirs ( os . path . join ( base_dir , plot_dir ) ) # Extract header and channel names for which MEF values are specified . headers = list ( beads_table . columns ) mef_headers_all = [ h for h in headers if re_mef_values . match ( h ) ] mef_channels_all = [ re_mef_values . match ( h ) . group ( 1 ) for h in mef_headers_all ] # Iterate through table # We will look for a ExcelUIException on each iteration . If an exception # is caught , it will be stored in beads _ samples . for beads_id , beads_row in beads_table . iterrows ( ) : try : # Instrument Data # Get the appropriate row in the instrument table instruments_row = instruments_table . loc [ beads_row [ 'Instrument ID' ] ] # Scatter channels : Foward Scatter , Side Scatter sc_channels = [ instruments_row [ 'Forward Scatter Channel' ] , instruments_row [ 'Side Scatter Channel' ] , ] # Fluorescence channels is a comma - separated list fl_channels = instruments_row [ 'Fluorescence Channels' ] . split ( ',' ) fl_channels = [ s . strip ( ) for s in fl_channels ] # Beads Data if verbose : print ( "\nBeads ID {}..." . format ( beads_id ) ) print ( "Loading file \"{}\"..." . format ( beads_row [ 'File Path' ] ) ) # Attempt to open file filename = os . path . join ( base_dir , beads_row [ 'File Path' ] ) try : beads_sample = FlowCal . io . FCSData ( filename ) except IOError : raise ExcelUIException ( "file \"{}\" not found" . format ( beads_row [ 'File Path' ] ) ) # Check that the number of events is greater than 400 if beads_sample . shape [ 0 ] < 400 : raise ExcelUIException ( "number of events is lower than 400" ) # Transform if verbose : print ( "Performing data transformation..." ) # Transform FSC / SSC and fluorescence channels to linear scale beads_sample = FlowCal . transform . to_rfi ( beads_sample , sc_channels + fl_channels ) # Parse clustering channels data cluster_channels = beads_row [ 'Clustering Channels' ] . split ( ',' ) cluster_channels = [ cc . strip ( ) for cc in cluster_channels ] # Gate if verbose : print ( "Performing gating..." ) # Remove first and last events . Transients in fluidics can make the # first few and last events slightly different from the rest . beads_sample_gated = FlowCal . gate . start_end ( beads_sample , num_start = 250 , num_end = 100 ) # Remove saturating events in forward / side scatter , if the FCS data # type is integer . The value of a saturating event is taken # automatically from ` beads _ sample _ gated . range ` . if beads_sample_gated . data_type == 'I' : beads_sample_gated = FlowCal . gate . high_low ( beads_sample_gated , channels = sc_channels ) # Density gating try : beads_sample_gated , __ , gate_contour = FlowCal . gate . density2d ( data = beads_sample_gated , channels = sc_channels , gate_fraction = beads_row [ 'Gate Fraction' ] , xscale = 'logicle' , yscale = 'logicle' , sigma = 5. , full_output = True ) except ValueError as ve : raise ExcelUIException ( ve . message ) # Plot forward / side scatter density plot and fluorescence histograms if plot : if verbose : print ( "Plotting density plot and histogram..." ) # Density plot parameters density_params = { } density_params [ 'mode' ] = 'scatter' density_params [ "title" ] = "{} ({:.1f}% retained)" . format ( beads_id , beads_sample_gated . shape [ 0 ] * 100. / beads_sample . shape [ 0 ] ) density_params [ 'xscale' ] = 'logicle' density_params [ 'yscale' ] = 'logicle' # Beads have a tight distribution , so axis limits will be set # from 0.75 decades below the 5th percentile to 0.75 decades # above the 95th percentile . density_params [ 'xlim' ] = ( np . percentile ( beads_sample_gated [ : , sc_channels [ 0 ] ] , 5 ) / ( 10 ** 0.75 ) , np . percentile ( beads_sample_gated [ : , sc_channels [ 0 ] ] , 95 ) * ( 10 ** 0.75 ) , ) density_params [ 'ylim' ] = ( np . percentile ( beads_sample_gated [ : , sc_channels [ 1 ] ] , 5 ) / ( 10 ** 0.75 ) , np . percentile ( beads_sample_gated [ : , sc_channels [ 1 ] ] , 95 ) * ( 10 ** 0.75 ) , ) # Beads have a tight distribution , so less smoothing should be # applied for visualization density_params [ 'sigma' ] = 5. # Histogram plot parameters hist_params = { 'xscale' : 'logicle' } # Plot if plot_dir is not None : figname = os . path . join ( base_dir , plot_dir , "density_hist_{}.png" . format ( beads_id ) ) else : figname = None plt . figure ( figsize = ( 6 , 4 ) ) FlowCal . plot . density_and_hist ( beads_sample , beads_sample_gated , density_channels = sc_channels , hist_channels = cluster_channels , gate_contour = gate_contour , density_params = density_params , hist_params = hist_params , savefig = figname ) # Process MEF values # For each fluorescence channel , check whether a list of known MEF # values of the bead subpopulations is provided in ` beads _ row ` . This # involves checking that a column named " [ channel ] MEF Values " # exists and is not empty . If so , store the name of the channel in # ` mef _ channels ` , and the specified MEF values in ` mef _ values ` . mef_values = [ ] mef_channels = [ ] for fl_channel in fl_channels : if fl_channel in mef_channels_all : # Get header from channel name mef_header = mef_headers_all [ mef_channels_all . index ( fl_channel ) ] # Extract text . If empty , ignore . mef_str = beads_row [ mef_header ] if pd . isnull ( mef_str ) : continue # Save channel name mef_channels . append ( fl_channel ) # Parse list of values mef = mef_str . split ( ',' ) mef = [ int ( e ) if e . strip ( ) . isdigit ( ) else np . nan for e in mef ] mef_values . append ( mef ) # Ensure matching number of ` mef _ values ` for all channels ( this # implies that the calibration beads have the same number of # subpopulations for all channels ) . if mef_values : if not np . all ( [ len ( mef_values_channel ) == len ( mef_values [ 0 ] ) for mef_values_channel in mef_values ] ) : raise ExcelUIException ( "Must specify the same number of" + " MEF Values for each channel." + " Use 'None' to instruct FlowCal" + " to ignore a detected" + " subpopulation." ) mef_values = np . array ( mef_values ) # Obtain standard curve transformation if mef_channels : if verbose : if len ( mef_channels ) == 1 : print ( "Calculating standard curve for channel {}..." . format ( mef_channels [ 0 ] ) ) else : print ( "Calculating standard curve for channels {}..." . format ( ", " . join ( mef_channels ) ) ) mef_output = FlowCal . mef . get_transform_fxn ( beads_sample_gated , mef_values , mef_channels = mef_channels , clustering_channels = cluster_channels , verbose = False , plot = plot , plot_filename = beads_id , plot_dir = os . path . join ( base_dir , plot_dir ) if plot_dir is not None else None , full_output = full_output , ** get_transform_fxn_kwargs ) if full_output : mef_transform_fxn = mef_output . transform_fxn else : mef_transform_fxn = mef_output else : mef_transform_fxn = None mef_output = None except ExcelUIException as e : # Print Exception message if verbose : print ( "ERROR: {}" . format ( str ( e ) ) ) # Append exception to beads _ samples array , and None to everything # else beads_samples . append ( e ) mef_transform_fxns [ beads_id ] = None if full_output : mef_outputs . append ( None ) else : # If no errors were found , store results beads_samples . append ( beads_sample_gated ) mef_transform_fxns [ beads_id ] = mef_transform_fxn if full_output : mef_outputs . append ( mef_output ) if full_output : return beads_samples , mef_transform_fxns , mef_outputs else : return beads_samples , mef_transform_fxns
def auth ( self ) : """Auth is used to call the AUTH API of CricketAPI . Access token required for every request call to CricketAPI . Auth functional will post user Cricket API app details to server and return the access token . Return : Access token"""
if not self . store_handler . has_value ( 'access_token' ) : params = { } params [ "access_key" ] = self . access_key params [ "secret_key" ] = self . secret_key params [ "app_id" ] = self . app_id params [ "device_id" ] = self . device_id auth_url = self . api_path + "auth/" response = self . get_response ( auth_url , params , "post" ) if 'auth' in response : self . store_handler . set_value ( "access_token" , response [ 'auth' ] [ 'access_token' ] ) self . store_handler . set_value ( "expires" , response [ 'auth' ] [ 'expires' ] ) logger . info ( 'Getting new access token' ) else : msg = "Error getting access_token, " + "please verify your access_key, secret_key and app_id" logger . error ( msg ) raise Exception ( "Auth Failed, please check your access details" )
def timeinfo ( self ) : """Time series data of the time step . Set to None if no time series data is available for this time step ."""
if self . istep not in self . sdat . tseries . index : return None return self . sdat . tseries . loc [ self . istep ]
def hs_mux ( sel , ls_hsi , hso ) : """[ Many - to - one ] Multiplexes a list of input handshake interfaces sel - ( i ) selects an input handshake interface to be connected to the output ls _ hsi - ( i ) list of input handshake tuples ( ready , valid ) hso - ( o ) output handshake tuple ( ready , valid )"""
N = len ( ls_hsi ) ls_hsi_rdy , ls_hsi_vld = zip ( * ls_hsi ) ls_hsi_rdy , ls_hsi_vld = list ( ls_hsi_rdy ) , list ( ls_hsi_vld ) hso_rdy , hso_vld = hso @ always_comb def _hsmux ( ) : hso_vld . next = 0 for i in range ( N ) : ls_hsi_rdy [ i ] . next = 0 if i == sel : hso_vld . next = ls_hsi_vld [ i ] ls_hsi_rdy [ i ] . next = hso_rdy return _hsmux
def map_or_apply ( function , param ) : """Map the function on ` ` param ` ` , or apply it , depending whether ` ` param ` ` is a list or an item . : param function : The function to apply . : param param : The parameter to feed the function with ( list or item ) . : returns : The computed value or ` ` None ` ` ."""
try : if isinstance ( param , list ) : return [ next ( iter ( function ( i ) ) ) for i in param ] else : return next ( iter ( function ( param ) ) ) except StopIteration : return None
def _write_method ( schema ) : """Add a write method for named schema to a class ."""
def method ( self , filename = None , schema = schema , id_col = 'uid' , sequence_col = 'sequence' , extra_data = None , alphabet = None , ** kwargs ) : # Use generic write class to write data . return _write ( self . _data , filename = filename , schema = schema , id_col = id_col , sequence_col = sequence_col , extra_data = extra_data , alphabet = alphabet , ** kwargs ) # Update docs method . __doc__ = _write_doc_template ( schema ) return method
def scatter ( * args , ** kwargs ) : """This function creates a scatter chart . Specifcally it creates an : py : class : ` . AxisChart ` and then adds a : py : class : ` . ScatterSeries ` to it . : param \ * data : The data for the scatter series as either ( x , y ) values or two big tuples / lists of x and y values respectively . : param str name : The name to be associated with the series . : param str color : The hex colour of the data points . : param Number size : The size of each data point - generally the diameter . : param Number linewidth : The width in pixels of the data points ' edge . : raises ValueError : if the size and length of the data doesn ' t match either format . : param str title : The chart ' s title . This will be displayed at the top of the chart . : param width : The width in pixels of the chart . : param height : The height in pixels of the chart . : param str x _ label : The label for the x - axis . : param str y _ label : The label for the y - axis . : rtype : : py : class : ` . AxisChart `"""
scatter_series_kwargs = { } for kwarg in ( "name" , "color" , "size" , "linewidth" ) : if kwarg in kwargs : scatter_series_kwargs [ kwarg ] = kwargs [ kwarg ] del kwargs [ kwarg ] if "color" not in scatter_series_kwargs : scatter_series_kwargs [ "color" ] = colors [ 0 ] series = ScatterSeries ( * args , ** scatter_series_kwargs ) chart = AxisChart ( series , ** kwargs ) return chart
def display ( component , ** kwargs ) : """Display the given component based on the environment it ' s run from . See : class : ` DisplayEnvironment < cqparts . display . environment . DisplayEnvironment > ` documentation for more details . : param component : component to display : type component : : class : ` Component < cqparts . Component > ` Additional parameters may be used by the chosen : class : ` DisplayEnvironment < cqparts . display . environment . DisplayEnvironment > `"""
disp_env = get_display_environment ( ) if disp_env is None : raise LookupError ( 'valid display environment could not be found' ) disp_env . display ( component , ** kwargs )
def _get_reference_namespace ( self , name ) : """Return namespace where reference name is defined It returns the globals ( ) if reference has not yet been defined"""
glbs = self . _mglobals ( ) if self . _pdb_frame is None : return glbs else : lcls = self . _pdb_locals if name in lcls : return lcls else : return glbs
def _get_bandgap_eigenval ( eigenval_fname , outcar_fname ) : """Get the bandgap from the EIGENVAL file"""
with open ( outcar_fname , "r" ) as f : parser = OutcarParser ( ) nelec = next ( iter ( filter ( lambda x : "number of electrons" in x , parser . parse ( f . readlines ( ) ) ) ) ) [ "number of electrons" ] with open ( eigenval_fname , "r" ) as f : eigenval_info = list ( EigenvalParser ( ) . parse ( f . readlines ( ) ) ) # spin _ polarized = ( 2 = = len ( next ( filter ( lambda x : " kpoint " in x , eigenval _ info ) ) [ " occupancies " ] [ 0 ] ) ) # if spin _ polarized : all_energies = [ zip ( * x [ "energies" ] ) for x in eigenval_info if "energies" in x ] spin_energies = zip ( * all_energies ) gaps = [ VaspParser . _get_bandgap_from_bands ( x , nelec / 2.0 ) for x in spin_energies ] return min ( gaps )
def _render_select ( selections ) : """Render the selection part of a query . Parameters selections : dict Selections for a table Returns str A string for the " select " part of a query See Also render _ query : Further clarification of ` selections ` dict formatting"""
if not selections : return 'SELECT *' rendered_selections = [ ] for name , options in selections . items ( ) : if not isinstance ( options , list ) : options = [ options ] original_name = name for options_dict in options : name = original_name alias = options_dict . get ( 'alias' ) alias = "as %s" % alias if alias else "" formatter = options_dict . get ( 'format' ) if formatter : name = _format_select ( formatter , name ) rendered_selections . append ( "%s %s" % ( name , alias ) ) return "SELECT " + ", " . join ( rendered_selections )
def get_type ( var ) : """Gets types accounting for numpy Ignore : import utool as ut import pandas as pd var = np . array ( [ ' a ' , ' b ' , ' c ' ] ) ut . get _ type ( var ) var = pd . Index ( [ ' a ' , ' b ' , ' c ' ] ) ut . get _ type ( var )"""
if HAVE_NUMPY and isinstance ( var , np . ndarray ) : if _WIN32 : # This is a weird system specific error # https : / / github . com / numpy / numpy / issues / 3667 type_ = var . dtype else : type_ = var . dtype . type elif HAVE_PANDAS and isinstance ( var , pd . Index ) : if _WIN32 : type_ = var . dtype else : type_ = var . dtype . type else : type_ = type ( var ) return type_
def iter_filths ( ) : """Iterate over all instances of filth"""
for filth_cls in iter_filth_clss ( ) : if issubclass ( filth_cls , RegexFilth ) : m = next ( re . finditer ( r"\s+" , "fake pattern string" ) ) yield filth_cls ( m ) else : yield filth_cls ( )
def _task_table ( self , task_id ) : """Fetch and parse the task table information for a single task ID . Args : task _ id : A task ID to get information about . Returns : A dictionary with information about the task ID in question ."""
assert isinstance ( task_id , ray . TaskID ) message = self . _execute_command ( task_id , "RAY.TABLE_LOOKUP" , ray . gcs_utils . TablePrefix . RAYLET_TASK , "" , task_id . binary ( ) ) if message is None : return { } gcs_entries = ray . gcs_utils . GcsTableEntry . GetRootAsGcsTableEntry ( message , 0 ) assert gcs_entries . EntriesLength ( ) == 1 task_table_message = ray . gcs_utils . Task . GetRootAsTask ( gcs_entries . Entries ( 0 ) , 0 ) execution_spec = task_table_message . TaskExecutionSpec ( ) task_spec = task_table_message . TaskSpecification ( ) task = ray . _raylet . Task . from_string ( task_spec ) function_descriptor_list = task . function_descriptor_list ( ) function_descriptor = FunctionDescriptor . from_bytes_list ( function_descriptor_list ) task_spec_info = { "DriverID" : task . driver_id ( ) . hex ( ) , "TaskID" : task . task_id ( ) . hex ( ) , "ParentTaskID" : task . parent_task_id ( ) . hex ( ) , "ParentCounter" : task . parent_counter ( ) , "ActorID" : ( task . actor_id ( ) . hex ( ) ) , "ActorCreationID" : task . actor_creation_id ( ) . hex ( ) , "ActorCreationDummyObjectID" : ( task . actor_creation_dummy_object_id ( ) . hex ( ) ) , "ActorCounter" : task . actor_counter ( ) , "Args" : task . arguments ( ) , "ReturnObjectIDs" : task . returns ( ) , "RequiredResources" : task . required_resources ( ) , "FunctionID" : function_descriptor . function_id . hex ( ) , "FunctionHash" : binary_to_hex ( function_descriptor . function_hash ) , "ModuleName" : function_descriptor . module_name , "ClassName" : function_descriptor . class_name , "FunctionName" : function_descriptor . function_name , } return { "ExecutionSpec" : { "Dependencies" : [ execution_spec . Dependencies ( i ) for i in range ( execution_spec . DependenciesLength ( ) ) ] , "LastTimestamp" : execution_spec . LastTimestamp ( ) , "NumForwards" : execution_spec . NumForwards ( ) } , "TaskSpec" : task_spec_info }
def fit_points_in_bounding_box_params ( df_points , bounding_box , padding_fraction = 0 ) : '''Return offset and scale factor to scale ` ` x ` ` , ` ` y ` ` columns of : data : ` df _ points ` to fill : data : ` bounding _ box ` while maintaining aspect ratio . Arguments df _ points : pandas . DataFrame A frame with at least the columns ` ` x ` ` and ` ` y ` ` , containing one row per point . bounding _ box : pandas . Series A ` pandas . Series ` containing numeric ` width ` and ` height ` values . padding _ fraction : float Fraction of padding to add around points . Returns ( offset , scale ) : ( pandas . Series , float ) Offset translation and scale required to fit all points in : data : ` df _ points ` to fill : data : ` bounding _ box ` while maintaining aspect ratio . : data : ` offset ` contains ` ` x ` ` and ` ` y ` ` values for the offset .'''
width = df_points . x . max ( ) height = df_points . y . max ( ) points_bbox = pd . Series ( [ width , height ] , index = [ 'width' , 'height' ] ) fill_scale = 1 - 2 * padding_fraction assert ( fill_scale > 0 ) scale = scale_to_fit_a_in_b ( points_bbox , bounding_box ) padded_scale = scale * fill_scale offset = .5 * ( bounding_box - points_bbox * padded_scale ) offset . index = [ 'x' , 'y' ] return offset , padded_scale
def update_existing ( self , * args , ** kwargs ) : """Update already existing properties of this CIM instance . Existing properties will be updated , and new properties will be ignored without further notice . Parameters : * args ( list ) : Properties for updating the properties of the instance , specified as positional arguments . Each positional argument must be a tuple ( key , value ) , where key and value are described for setting the : attr : ` ~ pywbem . CIMInstance . properties ` property . * * kwargs ( dict ) : Properties for updating the properties of the instance , specified as keyword arguments . The name and value of the keyword arguments are described as key and value for setting the : attr : ` ~ pywbem . CIMInstance . properties ` property ."""
for mapping in args : if hasattr ( mapping , 'items' ) : for key , value in mapping . items ( ) : try : prop = self . properties [ key ] except KeyError : continue prop . value = value else : for ( key , value ) in mapping : try : prop = self . properties [ key ] except KeyError : continue prop . value = value for key , value in kwargs . items ( ) : try : prop = self . properties [ key ] except KeyError : continue prop . value = value
def resolve_configuration ( self , configuration ) : """Resolve requirements from given JSON encoded data . The JSON should follow the testcase meta - data requirements field format . This function will resolve requirements for each individual DUT and create a DUT requirements list that contains the configuration for each DUT , eg : " duts " : [ { " * " : { " count " : 2 , " type " : " process " } } would result in the following configuration : { " 1 " : { " type " : " process " , " allowed _ platforms " : [ ] , " nick " : None } { " 2 " : { " type " : " process " , " allowed _ platforms " : [ ] , " nick " : None } : param requirements : optional argument if requirements come from external source , should be similar to the following format : " duts " : [ { " * " : { " count " : 2 , " type " : " process " } }"""
configuration = configuration if configuration else self . json_config self . _resolve_requirements ( configuration [ "requirements" ] ) self . _resolve_dut_count ( )
def randomize_molecule_low ( molecule , manipulations ) : """Return a randomized copy of the molecule , without the nonbond check ."""
manipulations = copy . copy ( manipulations ) shuffle ( manipulations ) coordinates = molecule . coordinates . copy ( ) for manipulation in manipulations : manipulation . apply ( coordinates ) return molecule . copy_with ( coordinates = coordinates )
def find ( decl_matcher , decls , recursive = True ) : """Returns a list of declarations that match ` decl _ matcher ` defined criteria or None : param decl _ matcher : Python callable object , that takes one argument - reference to a declaration : param decls : the search scope , : class : declaration _ t object or : class : declaration _ t objects list t : param recursive : boolean , if True , the method will run ` decl _ matcher ` on the internal declarations too"""
where = [ ] if isinstance ( decls , list ) : where . extend ( decls ) else : where . append ( decls ) if recursive : where = make_flatten ( where ) return list ( filter ( decl_matcher , where ) )
def setText ( self , text ) : """Sets the text for this widget to the inputed text , converting it based \ on the current input format if necessary . : param text | < str >"""
if text is None : text = '' super ( XLineEdit , self ) . setText ( projex . text . encoded ( self . formatText ( text ) , self . encoding ( ) ) )
def format ( table , field , fmt , ** kwargs ) : """Convenience function to format all values in the given ` field ` using the ` fmt ` format string . The ` ` where ` ` keyword argument can be given with a callable or expression which is evaluated on each row and which should return True if the conversion should be applied on that row , else False ."""
conv = lambda v : fmt . format ( v ) return convert ( table , field , conv , ** kwargs )
def int_to_alpha ( n , upper = True ) : "Generates alphanumeric labels of form A - Z , AA - ZZ etc ."
casenum = 65 if upper else 97 label = '' count = 0 if n == 0 : return str ( chr ( n + casenum ) ) while n >= 0 : mod , div = n % 26 , n for _ in range ( count ) : div //= 26 div %= 26 if count == 0 : val = mod else : val = div label += str ( chr ( val + casenum ) ) count += 1 n -= 26 ** count return label [ : : - 1 ]
def ei ( cn , ns = None , lo = None , di = None , iq = None , ico = None , pl = None ) : # pylint : disable = redefined - outer - name , too - many - arguments """This function is a wrapper for : meth : ` ~ pywbem . WBEMConnection . EnumerateInstances ` . Enumerate the instances of a class ( including instances of its subclasses ) in a namespace . Parameters : cn ( : term : ` string ` or : class : ` ~ pywbem . CIMClassName ` ) : Name of the class to be enumerated ( case independent ) . If specified as a ` CIMClassName ` object , its ` host ` attribute will be ignored . ns ( : term : ` string ` ) : Name of the CIM namespace to be used ( case independent ) . If ` None ` , defaults to the namespace of the ` cn ` parameter if specified as a ` CIMClassName ` , or to the default namespace of the connection . lo ( : class : ` py : bool ` ) : LocalOnly flag : Exclude inherited properties . ` None ` will cause the server default of ` True ` to be used . Deprecated in : term : ` DSP0200 ` : WBEM server implementations for ` True ` may vary ; this parameter should be set to ` False ` by the caller . di ( : class : ` py : bool ` ) : DeepInheritance flag : Include properties added by subclasses . If ` None ` , this parameter will not be sent to the server , and the server default of ` True ` will be used . iq ( : class : ` py : bool ` ) : IncludeQualifiers flag : Include qualifiers . ` None ` will cause the server default of ` False ` to be used . Deprecated in : term : ` DSP0200 ` : Clients cannot rely on qualifiers to be returned in this operation . ico ( : class : ` py : bool ` ) : IncludeClassOrigin flag : Include class origin information for the properties in the retrieved instances . ` None ` will cause the server default of ` False ` to be used . Deprecated in : term : ` DSP0200 ` : WBEM servers may either implement this parameter as specified , or may treat any specified value as ` False ` . pl ( : term : ` string ` or : term : ` py : iterable ` of : term : ` string ` ) : PropertyList : Names of properties to be included ( if not otherwise excluded ) . An empty iterable indicates to include no properties . If ` None ` , all properties will be included . Returns : list of : class : ` ~ pywbem . CIMInstance ` : The instances , with their ` path ` attribute being a : class : ` ~ pywbem . CIMInstanceName ` object with its attributes set as follows : * ` classname ` : Name of the creation class of the instance . * ` keybindings ` : Keybindings of the instance . * ` namespace ` : Name of the CIM namespace containing the instance . * ` host ` : ` None ` , indicating the WBEM server is unspecified ."""
return CONN . EnumerateInstances ( cn , ns , LocalOnly = lo , DeepInheritance = di , IncludeQualifiers = iq , IncludeClassOrigin = ico , PropertyList = pl )
def get_json_log_data ( data ) : """Returns a new ` data ` dictionary with hidden params for log purpose ."""
log_data = data for param in LOG_HIDDEN_JSON_PARAMS : if param in data [ 'params' ] : if log_data is data : log_data = copy . deepcopy ( data ) log_data [ 'params' ] [ param ] = "**********" return log_data
def find_interfaces ( device , ** kwargs ) : """: param device : : return :"""
interfaces = [ ] try : for cfg in device : try : interfaces . extend ( usb_find_desc ( cfg , find_all = True , ** kwargs ) ) except : pass except : pass return interfaces
def send_single_file ( self , sender , receiver , media_id ) : """发送单聊文件消息 : param sender : 发送人 : param receiver : 接收人成员 ID : param media _ id : 文件id , 可以调用上传素材文件接口获取 , 文件须大于4字节 : return : 返回的 JSON 数据包"""
return self . send_file ( sender , 'single' , receiver , media_id )
def from_array ( array ) : """Deserialize a new ChatActionMessage from a given dictionary . : return : new ChatActionMessage instance . : rtype : ChatActionMessage"""
if array is None or not array : return None # end if assert_type_or_raise ( array , dict , parameter_name = "array" ) data = { } data [ 'action' ] = u ( array . get ( 'action' ) ) if array . get ( 'chat_id' ) is None : data [ 'receiver' ] = None elif isinstance ( array . get ( 'chat_id' ) , None ) : data [ 'receiver' ] = None ( array . get ( 'chat_id' ) ) elif isinstance ( array . get ( 'chat_id' ) , str ) : data [ 'receiver' ] = u ( array . get ( 'chat_id' ) ) elif isinstance ( array . get ( 'chat_id' ) , int ) : data [ 'receiver' ] = int ( array . get ( 'chat_id' ) ) else : raise TypeError ( 'Unknown type, must be one of None, str, int or None.' ) # end if return ChatActionMessage ( ** data )
def _photometricErrors ( self , n_per_bin = 100 , plot = False ) : """Realistic photometric errors estimated from catalog objects and mask . Extend below the magnitude threshold with a flat extrapolation ."""
self . catalog . spatialBin ( self . roi ) if len ( self . catalog . mag_1 ) < n_per_bin : logger . warning ( "Catalog contains fewer objects than requested to calculate errors." ) n_per_bin = int ( len ( self . catalog . mag_1 ) / 3 ) # Band 1 mag_1_thresh = self . mask . mask_1 . mask_roi_sparse [ self . catalog . pixel_roi_index ] - self . catalog . mag_1 sorting_indices = np . argsort ( mag_1_thresh ) mag_1_thresh_sort = mag_1_thresh [ sorting_indices ] mag_err_1_sort = self . catalog . mag_err_1 [ sorting_indices ] # ADW : Can ' t this be done with np . median ( axis = ? ) mag_1_thresh_medians = [ ] mag_err_1_medians = [ ] for i in range ( 0 , int ( len ( mag_1_thresh ) / float ( n_per_bin ) ) ) : mag_1_thresh_medians . append ( np . median ( mag_1_thresh_sort [ n_per_bin * i : n_per_bin * ( i + 1 ) ] ) ) mag_err_1_medians . append ( np . median ( mag_err_1_sort [ n_per_bin * i : n_per_bin * ( i + 1 ) ] ) ) if mag_1_thresh_medians [ 0 ] > 0. : mag_1_thresh_medians = np . insert ( mag_1_thresh_medians , 0 , - 99. ) mag_err_1_medians = np . insert ( mag_err_1_medians , 0 , mag_err_1_medians [ 0 ] ) self . photo_err_1 = scipy . interpolate . interp1d ( mag_1_thresh_medians , mag_err_1_medians , bounds_error = False , fill_value = mag_err_1_medians [ - 1 ] ) # Band 2 mag_2_thresh = self . mask . mask_2 . mask_roi_sparse [ self . catalog . pixel_roi_index ] - self . catalog . mag_2 sorting_indices = np . argsort ( mag_2_thresh ) mag_2_thresh_sort = mag_2_thresh [ sorting_indices ] mag_err_2_sort = self . catalog . mag_err_2 [ sorting_indices ] mag_2_thresh_medians = [ ] mag_err_2_medians = [ ] for i in range ( 0 , int ( len ( mag_2_thresh ) / float ( n_per_bin ) ) ) : mag_2_thresh_medians . append ( np . median ( mag_2_thresh_sort [ n_per_bin * i : n_per_bin * ( i + 1 ) ] ) ) mag_err_2_medians . append ( np . median ( mag_err_2_sort [ n_per_bin * i : n_per_bin * ( i + 1 ) ] ) ) if mag_2_thresh_medians [ 0 ] > 0. : mag_2_thresh_medians = np . insert ( mag_2_thresh_medians , 0 , - 99. ) mag_err_2_medians = np . insert ( mag_err_2_medians , 0 , mag_err_2_medians [ 0 ] ) self . photo_err_2 = scipy . interpolate . interp1d ( mag_2_thresh_medians , mag_err_2_medians , bounds_error = False , fill_value = mag_err_2_medians [ - 1 ] )
def get_active_token ( self ) : """Getting the valid access token . Access token expires every 24 hours , It will expires then it will generate a new token . Return : active access token"""
expire_time = self . store_handler . has_value ( "expires" ) access_token = self . store_handler . has_value ( "access_token" ) if expire_time and access_token : expire_time = self . store_handler . get_value ( "expires" ) if not datetime . now ( ) < datetime . fromtimestamp ( float ( expire_time ) ) : self . store_handler . delete_value ( "access_token" ) self . store_handler . delete_value ( "expires" ) logger . info ( 'Access token expired, going to get new token' ) self . auth ( ) else : logger . info ( 'Access token noy expired yet' ) else : self . auth ( ) return self . store_handler . get_value ( "access_token" )
def _validate_schema ( schema , body ) : """Validate data against a schema"""
# Note # Schema validation is currently the major CPU bottleneck of # BigchainDB . the ` jsonschema ` library validates python data structures # directly and produces nice error messages , but validation takes 4 + ms # per transaction which is pretty slow . The rapidjson library validates # much faster at 1.5ms , however it produces _ very _ poor error messages . # For this reason we use both , rapidjson as an optimistic pathway and # jsonschema as a fallback in case there is a failure , so we can produce # a helpful error message . try : schema [ 1 ] ( rapidjson . dumps ( body ) ) except ValueError as exc : try : jsonschema . validate ( body , schema [ 0 ] ) except jsonschema . ValidationError as exc2 : raise SchemaValidationError ( str ( exc2 ) ) from exc2 logger . warning ( 'code problem: jsonschema did not raise an exception, wheras rapidjson raised %s' , exc ) raise SchemaValidationError ( str ( exc ) ) from exc
def check ( self , diff ) : r"""Check that the new file introduced has a valid name The module can either be an _ _ init _ _ . py file or must match ` ` feature _ [ a - zA - Z0-9 _ ] + \ . \ w + ` ` ."""
filename = pathlib . Path ( diff . b_path ) . parts [ - 1 ] is_valid_feature_module_name = re_test ( FEATURE_MODULE_NAME_REGEX , filename ) is_valid_init_module_name = filename == '__init__.py' assert is_valid_feature_module_name or is_valid_init_module_name
def unflatten2 ( flat_list , cumlen_list ) : """Rebuilds unflat list from invertible _ flatten1 Args : flat _ list ( list ) : the flattened list cumlen _ list ( list ) : the list which undoes flattenting Returns : unflat _ list2 : original nested list SeeAlso : invertible _ flatten1 invertible _ flatten2 unflatten2 Example : > > > # ENABLE _ DOCTEST > > > from utool . util _ list import * # NOQA > > > import utool > > > utool . util _ list > > > flat _ list = [ 5 , 2 , 3 , 12 , 3 , 3 , 9 , 13 , 3 , 5] > > > cumlen _ list = [ 1 , 6 , 7 , 9 , 10] > > > unflat _ list2 = unflatten2 ( flat _ list , cumlen _ list ) > > > result = ( unflat _ list2) > > > print ( result ) [ [ 5 ] , [ 2 , 3 , 12 , 3 , 3 ] , [ 9 ] , [ 13 , 3 ] , [ 5 ] ]"""
unflat_list2 = [ flat_list [ low : high ] for low , high in zip ( itertools . chain ( [ 0 ] , cumlen_list ) , cumlen_list ) ] return unflat_list2
def gcd ( * numbers ) : """Returns the greatest common divisor for a sequence of numbers . Args : \*numbers: Sequence of numbers. Returns : ( int ) Greatest common divisor of numbers ."""
n = numbers [ 0 ] for i in numbers : n = pygcd ( n , i ) return n
def send_data ( data ) : """Send data to herkulex Paketize & write the packet to serial port Args : data ( list ) : the data to be sent Raises : SerialException : Error occured while opening serial port"""
datalength = len ( data ) csm1 = checksum1 ( data , datalength ) csm2 = checksum2 ( csm1 ) data . insert ( 0 , 0xFF ) data . insert ( 1 , 0xFF ) data . insert ( 5 , csm1 ) data . insert ( 6 , csm2 ) stringtosend = "" for i in range ( len ( data ) ) : byteformat = '%02X' % data [ i ] stringtosend = stringtosend + "\\x" + byteformat try : SERPORT . write ( stringtosend . decode ( 'string-escape' ) ) # print stringtosend except : raise HerkulexError ( "could not communicate with motors" )
def authenticate ( self , request ) : """Attempt to authenticate the request . : param request : django . http . Request instance : return bool : True if success else raises HTTP _ 401"""
authenticators = self . _meta . authenticators if request . method == 'OPTIONS' and ADREST_ALLOW_OPTIONS : self . auth = AnonimousAuthenticator ( self ) return True error_message = "Authorization required." for authenticator in authenticators : auth = authenticator ( self ) try : if not auth . authenticate ( request ) : raise AssertionError ( error_message ) self . auth = auth auth . configure ( request ) return True except AssertionError , e : error_message = str ( e ) raise HttpError ( error_message , status = status . HTTP_401_UNAUTHORIZED )
def parse ( self , what ) : """: param what : can be ' rlz - 1 / ref - asset1 ' , ' rlz - 2 / sid - 1 ' , . . ."""
if '/' not in what : key , spec = what , '' else : key , spec = what . split ( '/' ) if spec and not spec . startswith ( ( 'ref-' , 'sid-' ) ) : raise ValueError ( 'Wrong specification in %s' % what ) elif spec == '' : # export losses for all assets aids = [ ] arefs = [ ] for aid , rec in enumerate ( self . assetcol . array ) : aids . append ( aid ) arefs . append ( self . asset_refs [ aid ] ) elif spec . startswith ( 'sid-' ) : # passed the site ID sid = int ( spec [ 4 : ] ) aids = [ ] arefs = [ ] for aid , rec in enumerate ( self . assetcol . array ) : if rec [ 'site_id' ] == sid : aids . append ( aid ) arefs . append ( self . asset_refs [ aid ] ) elif spec . startswith ( 'ref-' ) : # passed the asset name arefs = [ spec [ 4 : ] ] aids = [ self . str2asset [ arefs [ 0 ] ] [ 'ordinal' ] ] else : raise ValueError ( 'Wrong specification in %s' % what ) return aids , arefs , spec , key
def prepare_headers ( self , headers , metadata , queue_derive = True ) : """Convert a dictionary of metadata into S3 compatible HTTP headers , and append headers to ` ` headers ` ` . : type metadata : dict : param metadata : Metadata to be converted into S3 HTTP Headers and appended to ` ` headers ` ` . : type headers : dict : param headers : ( optional ) S3 compatible HTTP headers ."""
if not metadata . get ( 'scanner' ) : scanner = 'Internet Archive Python library {0}' . format ( __version__ ) metadata [ 'scanner' ] = scanner prepared_metadata = prepare_metadata ( metadata ) headers [ 'x-archive-auto-make-bucket' ] = '1' if queue_derive is False : headers [ 'x-archive-queue-derive' ] = '0' else : headers [ 'x-archive-queue-derive' ] = '1' for meta_key , meta_value in prepared_metadata . items ( ) : # Encode arrays into JSON strings because Archive . org does not # yet support complex metadata structures in # < identifier > _ meta . xml . if isinstance ( meta_value , dict ) : meta_value = json . dumps ( meta_value ) # Convert the metadata value into a list if it is not already # iterable . if ( isinstance ( meta_value , six . string_types ) or not hasattr ( meta_value , '__iter__' ) ) : meta_value = [ meta_value ] # Convert metadata items into HTTP headers and add to # ` ` headers ` ` dict . for i , value in enumerate ( meta_value ) : if not value : continue header_key = 'x-archive-meta{0:02d}-{1}' . format ( i , meta_key ) if ( isinstance ( value , six . string_types ) and needs_quote ( value ) ) : if six . PY2 and isinstance ( value , six . text_type ) : value = value . encode ( 'utf-8' ) value = 'uri({0})' . format ( urllib . parse . quote ( value ) ) # because rfc822 http headers disallow _ in names , IA - S3 will # translate two hyphens in a row ( - - ) into an underscore ( _ ) . header_key = header_key . replace ( '_' , '--' ) headers [ header_key ] = value super ( S3PreparedRequest , self ) . prepare_headers ( headers )
def generate_pydenticon ( identifier , size ) : '''Use pydenticon to generate an identicon image . All parameters are extracted from configuration .'''
blocks_size = get_internal_config ( 'size' ) foreground = get_internal_config ( 'foreground' ) background = get_internal_config ( 'background' ) generator = pydenticon . Generator ( blocks_size , blocks_size , digest = hashlib . sha1 , foreground = foreground , background = background ) # Pydenticon adds padding to the size and as a consequence # we need to compute the size without the padding padding = int ( round ( get_internal_config ( 'padding' ) * size / 100. ) ) size = size - 2 * padding padding = ( padding , ) * 4 return generator . generate ( identifier , size , size , padding = padding , output_format = 'png' )
def forward_iter ( self , X , training = False , device = 'cpu' ) : """Yield outputs of module forward calls on each batch of data . The storage device of the yielded tensors is determined by the ` ` device ` ` parameter . Parameters X : input data , compatible with skorch . dataset . Dataset By default , you should be able to pass : * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list / tuple of the former three * a Dataset If this doesn ' t work with your data , you have to pass a ` ` Dataset ` ` that can deal with the data . training : bool ( default = False ) Whether to set the module to train mode or not . device : string ( default = ' cpu ' ) The device to store each inference result on . This defaults to CPU memory since there is genereally more memory available there . For performance reasons this might be changed to a specific CUDA device , e . g . ' cuda : 0 ' . Yields yp : torch tensor Result from a forward call on an individual batch ."""
dataset = self . get_dataset ( X ) iterator = self . get_iterator ( dataset , training = training ) for data in iterator : Xi = unpack_data ( data ) [ 0 ] yp = self . evaluation_step ( Xi , training = training ) if isinstance ( yp , tuple ) : yield tuple ( n . to ( device ) for n in yp ) else : yield yp . to ( device )
def _get_distance_scaling ( self , C , mag , rhypo ) : """Returns the distance scalig term"""
return ( C [ "a3" ] * np . log ( rhypo ) ) + ( C [ "a4" ] + C [ "a5" ] * mag ) * rhypo
def convolutional_layer_series ( initial_size , layer_sequence ) : """Execute a series of convolutional layer transformations to the size number"""
size = initial_size for filter_size , padding , stride in layer_sequence : size = convolution_size_equation ( size , filter_size , padding , stride ) return size
def readCol ( self , col , startRow = 0 , endRow = - 1 ) : '''read col'''
return self . __operation . readCol ( col , startRow , endRow )
def _merge_before_set ( self , key , existing , value , is_secret ) : """Merge the new value being set with the existing value before set"""
def _log_before_merging ( _value ) : self . logger . debug ( "Merging existing %s: %s with new: %s" , key , existing , _value ) def _log_after_merge ( _value ) : self . logger . debug ( "%s merged to %s" , key , _value ) global_merge = getattr ( self , "MERGE_ENABLED_FOR_DYNACONF" , False ) if isinstance ( value , dict ) : local_merge = value . pop ( "dynaconf_merge" , value . pop ( "dynaconf_merge_unique" , None ) ) if global_merge or local_merge : safe_value = { k : "***" for k in value } if is_secret else value _log_before_merging ( safe_value ) object_merge ( existing , value ) safe_value = ( { k : ( "***" if k in safe_value else v ) for k , v in value . items ( ) } if is_secret else value ) _log_after_merge ( safe_value ) if isinstance ( value , ( list , tuple ) ) : local_merge = ( "dynaconf_merge" in value or "dynaconf_merge_unique" in value ) if global_merge or local_merge : value = list ( value ) unique = False if local_merge : try : value . remove ( "dynaconf_merge" ) except ValueError : # EAFP value . remove ( "dynaconf_merge_unique" ) unique = True original = set ( value ) _log_before_merging ( [ "***" for item in value ] if is_secret else value ) object_merge ( existing , value , unique = unique ) safe_value = ( [ "***" if item in original else item for item in value ] if is_secret else value ) _log_after_merge ( safe_value ) return value
def keys_to_typing ( value ) : """Processes the values that will be typed in the element ."""
typing = [ ] for val in value : if isinstance ( val , Keys ) : typing . append ( val ) elif isinstance ( val , int ) : val = str ( val ) for i in range ( len ( val ) ) : typing . append ( val [ i ] ) else : for i in range ( len ( val ) ) : typing . append ( val [ i ] ) return typing
def values ( self ) : """Return all values as numpy - array ( mean , var , min , max , num ) ."""
return np . array ( [ self . mean , self . var , self . min , self . max , self . num ] )
def get_core_source_partial ( ) : """_ get _ core _ source ( ) is expensive , even with @ lru _ cache in minify . py , threads can enter it simultaneously causing severe slowdowns ."""
global _core_source_partial if _core_source_partial is None : _core_source_lock . acquire ( ) try : if _core_source_partial is None : _core_source_partial = PartialZlib ( _get_core_source ( ) . encode ( 'utf-8' ) ) finally : _core_source_lock . release ( ) return _core_source_partial
def copy_file ( stream , target , maxread = - 1 , buffer_size = 2 * 16 ) : '''Read from : stream and write to : target until : maxread or EOF .'''
size , read = 0 , stream . read while 1 : to_read = buffer_size if maxread < 0 else min ( buffer_size , maxread - size ) part = read ( to_read ) if not part : return size target . write ( part ) size += len ( part )
def process_runway_configs ( runway_dir = '' ) : """Read the _ application . json _ files . Args : runway _ dir ( str ) : Name of runway directory with app . json files . Returns : collections . defaultdict : Configurations stored for each environment found ."""
LOG . info ( 'Processing application.json files from local directory "%s".' , runway_dir ) file_lookup = FileLookup ( runway_dir = runway_dir ) app_configs = process_configs ( file_lookup , 'application-master-{env}.json' , 'pipeline.json' ) return app_configs
def compile ( self , db ) : """Building the sql expression : param db : the database instance"""
sql = self . expression if self . alias : sql += ( ' AS ' + db . quote_column ( self . alias ) ) return sql
def _bytes_to_values ( self , bs , width = None ) : """Convert a packed row of bytes into a row of values . Result will be a freshly allocated object , not shared with the argument ."""
if self . bitdepth == 8 : return bytearray ( bs ) if self . bitdepth == 16 : return array ( 'H' , struct . unpack ( '!%dH' % ( len ( bs ) // 2 ) , bs ) ) assert self . bitdepth < 8 if width is None : width = self . width # Samples per byte spb = 8 // self . bitdepth out = bytearray ( ) mask = 2 ** self . bitdepth - 1 shifts = [ self . bitdepth * i for i in reversed ( list ( range ( spb ) ) ) ] for o in bs : out . extend ( [ mask & ( o >> i ) for i in shifts ] ) return out [ : width ]
def start_head_processes ( self ) : """Start head processes on the node ."""
logger . info ( "Process STDOUT and STDERR is being redirected to {}." . format ( self . _logs_dir ) ) assert self . _redis_address is None # If this is the head node , start the relevant head node processes . self . start_redis ( ) self . start_monitor ( ) self . start_raylet_monitor ( ) # The dashboard is Python3 . x only . if PY3 and self . _ray_params . include_webui : self . start_dashboard ( )
def require ( obj , caller_args = [ ] ) : """Primary method for test assertions in Specter : param obj : The evaluated target object : param caller _ args : Is only used when using expecting a raised Exception"""
line , module = get_module_and_line ( '__spec__' ) src_params = ExpectParams ( line , module ) require_obj = RequireAssert ( obj , src_params = src_params , caller_args = caller_args ) _add_expect_to_wrapper ( require_obj ) return require_obj
def get_version ( module_name_or_file = None ) : """Return the current version as defined by the given module / file ."""
if module_name_or_file is None : parts = base_module . split ( '.' ) module_name_or_file = parts [ 0 ] if len ( parts ) > 1 else find_packages ( exclude = [ 'test' , 'test.*' ] ) [ 0 ] if os . path . isdir ( module_name_or_file ) : module_name_or_file = os . path . join ( module_name_or_file , '__init__.py' ) with open ( module_name_or_file , 'r' ) as f : match = VERSION_PATTERN . search ( f . read ( ) ) return match . group ( 1 )
def simulate ( self , l , noisefunc = None , random_state = None ) : """Simulate vector autoregressive ( VAR ) model . This function generates data from the VAR model . Parameters l : int or [ int , int ] Number of samples to generate . Can be a tuple or list , where l [ 0] is the number of samples and l [ 1 ] is the number of trials . noisefunc : func , optional This function is used to create the generating noise process . If set to None , Gaussian white noise with zero mean and unit variance is used . Returns data : array , shape ( n _ trials , n _ samples , n _ channels ) Generated data ."""
m , n = np . shape ( self . coef ) p = n // m try : l , t = l except TypeError : t = 1 if noisefunc is None : rng = check_random_state ( random_state ) noisefunc = lambda : rng . normal ( size = ( 1 , m ) ) n = l + 10 * p y = np . zeros ( ( n , m , t ) ) res = np . zeros ( ( n , m , t ) ) for s in range ( t ) : for i in range ( p ) : e = noisefunc ( ) res [ i , : , s ] = e y [ i , : , s ] = e for i in range ( p , n ) : e = noisefunc ( ) res [ i , : , s ] = e y [ i , : , s ] = e for k in range ( 1 , p + 1 ) : y [ i , : , s ] += self . coef [ : , ( k - 1 ) : : p ] . dot ( y [ i - k , : , s ] ) self . residuals = res [ 10 * p : , : , : ] . T self . rescov = sp . cov ( cat_trials ( self . residuals ) . T , rowvar = False ) return y [ 10 * p : , : , : ] . transpose ( [ 2 , 1 , 0 ] )
def package ( self , vm_name = None , base = None , output = None , vagrantfile = None ) : '''Packages a running vagrant environment into a box . vm _ name = None : name of VM . base = None : name of a VM in virtualbox to package as a base box output = None : name of the file to output vagrantfile = None : Vagrantfile to package with this box'''
cmd = [ 'package' , vm_name ] if output is not None : cmd += [ '--output' , output ] if vagrantfile is not None : cmd += [ '--vagrantfile' , vagrantfile ] self . _call_vagrant_command ( cmd )
def output_shape ( self ) : """Returns the output shape ."""
if self . _output_shape is None : self . _ensure_is_connected ( ) if callable ( self . _output_shape ) : self . _output_shape = tuple ( self . _output_shape ( ) ) return self . _output_shape
def get_k8s_metadata ( ) : """Get kubernetes container metadata , as on GCP GKE ."""
k8s_metadata = { } gcp_cluster = ( gcp_metadata_config . GcpMetadataConfig . get_attribute ( gcp_metadata_config . CLUSTER_NAME_KEY ) ) if gcp_cluster is not None : k8s_metadata [ CLUSTER_NAME_KEY ] = gcp_cluster for attribute_key , attribute_env in _K8S_ENV_ATTRIBUTES . items ( ) : attribute_value = os . environ . get ( attribute_env ) if attribute_value is not None : k8s_metadata [ attribute_key ] = attribute_value return k8s_metadata
def _searchservices ( device , name = None , uuid = None , uuidbad = None ) : """Searches the given IOBluetoothDevice using the specified parameters . Returns an empty list if the device has no services . uuid should be IOBluetoothSDPUUID object ."""
if not isinstance ( device , _IOBluetooth . IOBluetoothDevice ) : raise ValueError ( "device must be IOBluetoothDevice, was %s" % type ( device ) ) services = [ ] allservices = device . getServices ( ) if uuid : gooduuids = ( uuid , ) else : gooduuids = ( ) if uuidbad : baduuids = ( uuidbad , ) else : baduuids = ( ) if allservices is not None : for s in allservices : if gooduuids and not s . hasServiceFromArray_ ( gooduuids ) : continue if baduuids and s . hasServiceFromArray_ ( baduuids ) : continue if name is None or s . getServiceName ( ) == name : services . append ( s ) return services
def view_history_source ( name , gitref = None ) : """Serve a page name from git repo ( an old version of a page ) . then return the reST source code This function does not use any template it returns only plain text . . note : : this is a bottle view * this is a GET only method : you can not change a committed page Keyword Arguments : : name : ( str ) - - name of the rest file ( without the . rst extension ) : gitref : ( str ) - - hexsha of the git commit to look into Returns : bottle response object or 404 error page"""
response . set_header ( 'Cache-control' , 'no-cache' ) response . set_header ( 'Pragma' , 'no-cache' ) response . set_header ( 'Content-Type' , 'text/html; charset=utf-8' ) if gitref is None : files = glob . glob ( "{0}.rst" . format ( name ) ) if len ( files ) > 0 : file_handle = open ( files [ 0 ] , 'r' ) content = file_handle . read ( ) else : return abort ( 404 ) else : content = read_committed_file ( gitref , name + '.rst' ) if content : return template ( 'source_view' , type = "history" , name = name , extended_name = '__source__' , is_repo = check_repo ( ) , history = commit_history ( "{0}.rst" . format ( name ) ) , gitref = gitref , content = content . decode ( 'utf-8' ) ) else : return abort ( 404 )
def _post ( self , * args , ** kwargs ) : """Wrapper around Requests for POST requests Returns : Response : A Requests Response object"""
if 'timeout' not in kwargs : kwargs [ 'timeout' ] = self . timeout req = self . session . post ( * args , ** kwargs ) return req
def remove_ipv4addr ( self , ipv4addr ) : """Remove an IPv4 address from the host . : param str ipv4addr : The IP address to remove"""
for addr in self . ipv4addrs : if ( ( isinstance ( addr , dict ) and addr [ 'ipv4addr' ] == ipv4addr ) or ( isinstance ( addr , HostIPv4 ) and addr . ipv4addr == ipv4addr ) ) : self . ipv4addrs . remove ( addr ) break
def validate ( self , grid ) : """Using the MagIC data model , generate validation errors on a MagicGrid . Parameters grid : dialogs . magic _ grid3 . MagicGrid The MagicGrid to be validated Returns warnings : dict Empty dict if no warnings , otherwise a dict with format { name of problem : [ problem _ columns ] }"""
grid_name = str ( grid . GetName ( ) ) dmodel = self . contribution . dmodel reqd_headers = dmodel . get_reqd_headers ( grid_name ) df = self . contribution . tables [ grid_name ] . df df = df . replace ( '' , np . nan ) # python does not view empty strings as null if df . empty : return { } col_names = set ( df . columns ) missing_headers = set ( reqd_headers ) - col_names present_headers = set ( reqd_headers ) - set ( missing_headers ) non_null_headers = df . dropna ( how = 'all' , axis = 'columns' ) . columns null_reqd_headers = present_headers - set ( non_null_headers ) if any ( missing_headers ) or any ( null_reqd_headers ) : warnings = { 'missing required column(s)' : sorted ( missing_headers ) , 'no data in required column(s)' : sorted ( null_reqd_headers ) } else : warnings = { } return warnings