signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def doestr2tabstr ( astr , kword ) : """doestr2tabstr"""
alist = astr . split ( '..' ) del astr # strip junk put . . back for num in range ( 0 , len ( alist ) ) : alist [ num ] = alist [ num ] . strip ( ) alist [ num ] = alist [ num ] + os . linesep + '..' + os . linesep alist . pop ( ) lblock = [ ] for num in range ( 0 , len ( alist ) ) : linels = alist [ num ] . split ( os . linesep ) firstline = linels [ 0 ] assignls = firstline . split ( '=' ) keyword = assignls [ - 1 ] . strip ( ) if keyword == kword : lblock = lblock + [ alist [ num ] ] # print firstline # get all val lval = [ ] for num in range ( 0 , len ( lblock ) ) : block = lblock [ num ] linel = block . split ( os . linesep ) lvalin = [ ] for k in range ( 0 , len ( linel ) ) : line = linel [ k ] assignl = line . split ( '=' ) if k == 0 : lvalin = lvalin + [ assignl [ 0 ] ] else : if assignl [ - 1 ] == '..' : assignl [ - 1 ] = '.' lvalin = lvalin + [ assignl [ - 1 ] ] lvalin . pop ( ) lval = lval + [ lvalin ] # get keywords kwordl = [ ] block = lblock [ 0 ] linel = block . split ( os . linesep ) for k in range ( 0 , len ( linel ) ) : line = linel [ k ] assignl = line . split ( '=' ) if k == 0 : kword = ' = ' + assignl [ 1 ] . strip ( ) else : if assignl [ 0 ] == '..' : assignl [ 0 ] = '.' else : assignl [ 0 ] = assignl [ 0 ] + '=' kword = assignl [ 0 ] . strip ( ) kwordl = kwordl + [ kword ] kwordl . pop ( ) astr = '' for num in range ( 0 , len ( kwordl ) ) : linest = '' linest = linest + kwordl [ num ] for k in range ( 0 , len ( lval ) ) : linest = linest + '\t' + lval [ k ] [ num ] astr = astr + linest + os . linesep return astr
def makeHttpRequest ( method , url , payload , headers , retries = MAX_RETRIES , session = None ) : """Make an HTTP request and retry it until success , return request"""
retry = - 1 response = None while retry < retries : retry += 1 # if this isn ' t the first retry then we sleep if retry > 0 : snooze = float ( retry * retry ) / 10.0 log . info ( 'Sleeping %0.2f seconds for exponential backoff' , snooze ) time . sleep ( snooze ) # Seek payload to start , if it is a file if hasattr ( payload , 'seek' ) : payload . seek ( 0 ) log . debug ( 'Making attempt %d' , retry ) try : response = makeSingleHttpRequest ( method , url , payload , headers , session ) except requests . exceptions . RequestException as rerr : if retry < retries : log . warn ( 'Retrying because of: %s' % rerr ) continue # raise a connection exception raise rerr # Handle non 2xx status code and retry if possible try : response . raise_for_status ( ) except requests . exceptions . RequestException : pass status = response . status_code if 500 <= status and status < 600 and retry < retries : if retry < retries : log . warn ( 'Retrying because of: %d status' % status ) continue else : raise exceptions . TaskclusterRestFailure ( "Unknown Server Error" , superExc = None ) return response # This code - path should be unreachable assert False , "Error from last retry should have been raised!"
def _get_tr ( cls , df ) : """True Range of the trading tr = max [ ( high - low ) , abs ( high - close _ prev ) , abs ( low - close _ prev ) ] : param df : data : return : None"""
prev_close = df [ 'close_-1_s' ] high = df [ 'high' ] low = df [ 'low' ] c1 = high - low c2 = np . abs ( high - prev_close ) c3 = np . abs ( low - prev_close ) df [ 'tr' ] = np . max ( ( c1 , c2 , c3 ) , axis = 0 )
def send ( self , to , language = None , ** data ) : """This is the method to be called"""
self . data = data self . get_context_data ( ) if app_settings [ 'SEND_EMAILS' ] : try : if language : mail . send ( to , template = self . template , context = self . context_data , language = language ) else : mail . send ( to , template = self . template , context = self . context_data ) except EmailTemplate . DoesNotExist : msg = 'Trying to use a non existent email template {0}' . format ( self . template ) LOGGER . error ( 'Trying to use a non existent email template {0}' . format ( self . template ) )
def set ( self , key , func , * args , ** kwargs ) : """Return key ' s value if it exists , otherwise call given function . : param key : The key to lookup / set . : param func : A function to use if the key doesn ' t exist . All other arguments and keyword arguments are passed to * func * ."""
if key in self : return self [ key ] self [ key ] = value = func ( * args , ** kwargs ) return value
def byte_to_bitstring ( byte ) : """Convert one byte to a list of bits"""
assert 0 <= byte <= 0xff bits = [ int ( x ) for x in list ( bin ( byte + 0x100 ) [ 3 : ] ) ] return bits
def install_string ( self ) : """Add every missing file to the install string shown to the user in an error message ."""
args = [ "--reference-name" , self . reference_name , "--annotation-name" , self . annotation_name ] if self . annotation_version : args . extend ( [ "--annotation-version" , str ( self . annotation_version ) ] ) if self . requires_gtf : args . append ( "--gtf" ) args . append ( "\"%s\"" % self . _gtf_path_or_url ) if self . requires_protein_fasta : args += [ "--protein-fasta \"%s\"" % path for path in self . _protein_fasta_paths_or_urls ] if self . requires_transcript_fasta : args += [ "--transcript-fasta \"%s\"" % path for path in self . _transcript_fasta_paths_or_urls ] return "pyensembl install %s" % " " . join ( args )
def set_bind ( self ) : """Sets key bindings - - we need this more than once"""
RangedInt . set_bind ( self ) self . unbind ( '<Next>' ) self . unbind ( '<Prior>' ) self . bind ( '<Next>' , lambda e : self . set ( self . _min ( ) ) ) self . bind ( '<Prior>' , lambda e : self . set ( self . _max ( ) ) )
def unlock_kinetis_read_until_ack ( jlink , address ) : """Polls the device until the request is acknowledged . Sends a read request to the connected device to read the register at the given ' address ' . Polls indefinitely until either the request is ACK ' d or the request ends in a fault . Args : jlink ( JLink ) : the connected J - Link address ( int ) the address of the register to poll Returns : ` ` SWDResponse ` ` object on success . Raises : KinetisException : when read exits with non - ack or non - wait status . Note : This function is required in order to avoid reading corrupt or otherwise invalid data from registers when communicating over SWD ."""
request = swd . ReadRequest ( address , ap = True ) response = None while True : response = request . send ( jlink ) if response . ack ( ) : break elif response . wait ( ) : continue raise KinetisException ( 'Read exited with status: %s' , response . status ) return response
def verify_x509_cert_chain ( cert_chain , ca_pem_file = None , ca_path = None ) : """Look at certs in the cert chain and add them to the store one by one . Return the cert at the end of the chain . That is the cert to be used by the caller for verifying . From https : / / www . w3 . org / TR / xmldsig - core2 / # sec - X509Data : " All certificates appearing in an X509Data element must relate to the validation key by either containing it or being part of a certification chain that terminates in a certificate containing the validation key . No ordering is implied by the above constraints " """
from OpenSSL import SSL context = SSL . Context ( SSL . TLSv1_METHOD ) if ca_pem_file is None and ca_path is None : import certifi ca_pem_file = certifi . where ( ) context . load_verify_locations ( ensure_bytes ( ca_pem_file , none_ok = True ) , capath = ca_path ) store = context . get_cert_store ( ) certs = list ( reversed ( cert_chain ) ) end_of_chain , last_error = None , None while len ( certs ) > 0 : for cert in certs : try : end_of_chain = _add_cert_to_store ( store , cert ) certs . remove ( cert ) break except RedundantCert : certs . remove ( cert ) if end_of_chain is None : end_of_chain = cert break except Exception as e : last_error = e else : raise last_error return end_of_chain
def update_with ( self , ** query ) : """secure update , mass assignment protected"""
for k , v in self . _filter_attrs ( query ) . items ( ) : setattr ( self , k , v ) return self . save ( )
def from_json ( cls , data ) : """Create a Wind Condition from a dictionary . Args : data = { " wind _ speed " : float , " wind _ direction " : float , " rain " : bool , " snow _ on _ ground " : bool }"""
# Check required and optional keys optional_keys = { 'wind_direction' : 0 , 'rain' : False , 'snow_on_ground' : False } assert 'wind_speed' in data , 'Required key "wind_speed" is missing!' for key , val in optional_keys . items ( ) : if key not in data : data [ key ] = val return cls ( data [ 'wind_speed' ] , data [ 'wind_direction' ] , data [ 'rain' ] , data [ 'snow_on_ground' ] )
def _get_server ( self ) : """Get server to use for request . Also process inactive server list , re - add them after given interval ."""
with self . _lock : inactive_server_count = len ( self . _inactive_servers ) for i in range ( inactive_server_count ) : try : ts , server , message = heapq . heappop ( self . _inactive_servers ) except IndexError : pass else : if ( ts + self . retry_interval ) > time ( ) : # Not yet , put it back heapq . heappush ( self . _inactive_servers , ( ts , server , message ) ) else : self . _active_servers . append ( server ) logger . warn ( "Restored server %s into active pool" , server ) # if none is old enough , use oldest if not self . _active_servers : ts , server , message = heapq . heappop ( self . _inactive_servers ) self . _active_servers . append ( server ) logger . info ( "Restored server %s into active pool" , server ) server = self . _active_servers [ 0 ] self . _roundrobin ( ) return server
def othertype ( self , othertype ) : """Set the ` ` OTHERTYPE ` ` attribute value ."""
if othertype is not None : self . _el . set ( 'TYPE' , 'OTHER' ) self . _el . set ( 'OTHERTYPE' , othertype )
def _pdf ( self , phi ) : """Evaluate the _ unnormalized _ flow PDF ."""
pdf = np . inner ( self . _vn , np . cos ( np . outer ( phi , self . _n ) ) ) pdf *= 2. pdf += 1. return pdf
def get_method_sig ( method ) : """Given a function , it returns a string that pretty much looks how the function signature would be written in python . : param method : a python method : return : A string similar describing the pythong method signature . eg : " my _ method ( first _ argArg , second _ arg = 42 , third _ arg = ' something ' ) " """
# The return value of ArgSpec is a bit weird , as the list of arguments and # list of defaults are returned in separate array . # eg : ArgSpec ( args = [ ' first _ arg ' , ' second _ arg ' , ' third _ arg ' ] , # varargs = None , keywords = None , defaults = ( 42 , ' something ' ) ) argspec = inspect . getargspec ( method ) arg_index = 0 args = [ ] # Use the args and defaults array returned by argspec and find out # which arguments has default for arg in argspec . args : default_arg = _get_default_arg ( argspec . args , argspec . defaults , arg_index ) if default_arg . has_default : val = default_arg . default_value if isinstance ( val , basestring ) : val = '"' + val + '"' args . append ( "%s=%s" % ( arg , val ) ) else : args . append ( arg ) arg_index += 1 if argspec . varargs : args . append ( '*' + argspec . varargs ) if argspec . keywords : args . append ( '**' + argspec . keywords ) return "%s(%s)" % ( method . __name__ , ", " . join ( args [ 1 : ] ) )
def save_data ( self , trigger_id , ** data ) : """let ' s save the data : param trigger _ id : trigger ID from which to save data : param data : the data to check to be used and save : type trigger _ id : int : type data : dict : return : the status of the save statement : rtype : boolean"""
if self . token : title = self . set_title ( data ) body = self . set_content ( data ) # get the details of this trigger trigger = Github . objects . get ( trigger_id = trigger_id ) # check if it remains more than 1 access # then we can create an issue limit = self . gh . ratelimit_remaining if limit > 1 : # repo goes to " owner " # project goes to " repository " r = self . gh . create_issue ( trigger . repo , trigger . project , title , body ) else : # rate limit reach logger . warning ( "Rate limit reached" ) update_result ( trigger_id , msg = "Rate limit reached" , status = True ) # put again in cache the data that could not be # published in Github yet cache . set ( 'th_github_' + str ( trigger_id ) , data , version = 2 ) return True sentence = str ( 'github {} created' ) . format ( r ) logger . debug ( sentence ) status = True else : sentence = "no token or link provided for trigger ID {} " . format ( trigger_id ) logger . critical ( sentence ) update_result ( trigger_id , msg = sentence , status = False ) status = False return status
def get_environ ( self ) : """Return a new environ dict targeting the given wsgi . version ."""
req = self . req env_10 = super ( Gateway_u0 , self ) . get_environ ( ) env = dict ( map ( self . _decode_key , env_10 . items ( ) ) ) # Request - URI enc = env . setdefault ( six . u ( 'wsgi.url_encoding' ) , six . u ( 'utf-8' ) ) try : env [ 'PATH_INFO' ] = req . path . decode ( enc ) env [ 'QUERY_STRING' ] = req . qs . decode ( enc ) except UnicodeDecodeError : # Fall back to latin 1 so apps can transcode if needed . env [ 'wsgi.url_encoding' ] = 'ISO-8859-1' env [ 'PATH_INFO' ] = env_10 [ 'PATH_INFO' ] env [ 'QUERY_STRING' ] = env_10 [ 'QUERY_STRING' ] env . update ( map ( self . _decode_value , env . items ( ) ) ) return env
def reset ( self ) : """Sets initial conditions for the experiment ."""
self . stepid = 0 for task , agent in zip ( self . tasks , self . agents ) : task . reset ( ) agent . module . reset ( ) agent . history . reset ( )
def inverted_level_order ( self ) -> Iterator [ "BSP" ] : """Iterate over this BSP ' s hierarchy in inverse level order . . . versionadded : : 8.3"""
levels = [ ] # type : List [ List [ ' BSP ' ] ] next = [ self ] # type : List [ ' BSP ' ] while next : levels . append ( next ) level = next # type : List [ ' BSP ' ] next = [ ] for node in level : next . extend ( node . children ) while levels : yield from levels . pop ( )
def hover_pixmap ( self , value ) : """Setter for * * self . _ _ hover _ pixmap * * attribute . : param value : Attribute value . : type value : QPixmap"""
if value is not None : assert type ( value ) is QPixmap , "'{0}' attribute: '{1}' type is not 'QPixmap'!" . format ( "hover_pixmap" , value ) self . __hover_pixmap = value
def disable ( name , runas = None ) : '''Disable a launchd service . Raises an error if the service fails to be disabled : param str name : Service label , file name , or full path : param str runas : User to run launchctl commands : return : ` ` True ` ` if successful or if the service is already disabled : rtype : bool CLI Example : . . code - block : : bash salt ' * ' service . disable org . cups . cupsd'''
# Get the service target . enable requires a full < service - target > service_target = _get_domain_target ( name , service_target = True ) [ 0 ] # disable the service : will raise an error if it fails return launchctl ( 'disable' , service_target , runas = runas )
def filter_by_gene_expression ( self , gene_expression_dict , min_expression_value = 0.0 ) : """Filters effects to those which have an associated gene whose expression value in the gene _ expression _ dict argument is greater than min _ expression _ value . Parameters gene _ expression _ dict : dict Dictionary mapping Ensembl gene IDs to expression estimates ( either FPKM or TPM ) min _ expression _ value : float Threshold above which we ' ll keep an effect in the result collection"""
return self . filter_above_threshold ( key_fn = lambda effect : effect . gene_id , value_dict = gene_expression_dict , threshold = min_expression_value )
def readObject ( self ) : """Reads an object from the stream ."""
ref = self . readInteger ( False ) if ref & REFERENCE_BIT == 0 : obj = self . context . getObject ( ref >> 1 ) if obj is None : raise pyamf . ReferenceError ( 'Unknown reference %d' % ( ref >> 1 , ) ) if self . use_proxies is True : obj = self . readProxy ( obj ) return obj ref >>= 1 class_def = self . _getClassDefinition ( ref ) alias = class_def . alias obj = alias . createInstance ( codec = self ) obj_attrs = dict ( ) self . context . addObject ( obj ) if class_def . encoding in ( ObjectEncoding . EXTERNAL , ObjectEncoding . PROXY ) : obj . __readamf__ ( DataInput ( self ) ) if self . use_proxies is True : obj = self . readProxy ( obj ) return obj elif class_def . encoding == ObjectEncoding . DYNAMIC : self . _readStatic ( class_def , obj_attrs ) self . _readDynamic ( class_def , obj_attrs ) elif class_def . encoding == ObjectEncoding . STATIC : self . _readStatic ( class_def , obj_attrs ) else : raise pyamf . DecodeError ( "Unknown object encoding" ) alias . applyAttributes ( obj , obj_attrs , codec = self ) if self . use_proxies is True : obj = self . readProxy ( obj ) return obj
def show ( self ) : """Show the hidden spinner ."""
thr_is_alive = self . _spin_thread and self . _spin_thread . is_alive ( ) if thr_is_alive and self . _hide_spin . is_set ( ) : # clear the hidden spinner flag self . _hide_spin . clear ( ) # clear the current line so the spinner is not appended to it sys . stdout . write ( "\r" ) self . _clear_line ( )
def optimize ( self , timeSeries , forecastingMethods = None , startingPercentage = 0.0 , endPercentage = 100.0 ) : """Runs the optimization of the given TimeSeries . : param TimeSeries timeSeries : TimeSeries instance that requires an optimized forecast . : param list forecastingMethods : List of forecastingMethods that will be used for optimization . : param float startingPercentage : Defines the start of the interval . This has to be a value in [ 0.0 , 100.0 ] . It represents the value , where the error calculation should be started . 25.0 for example means that the first 25 % of all calculated errors will be ignored . : param float endPercentage : Defines the end of the interval . This has to be a value in [ 0.0 , 100.0 ] . It represents the value , after which all error values will be ignored . 90.0 for example means that the last 10 % of all local errors will be ignored . : return : Returns the optimized forecasting method , the corresponding error measure and the forecasting methods parameters . : rtype : [ BaseForecastingMethod , BaseErrorMeasure , Dictionary ] : raise : Raises a : py : exc : ` ValueError ` ValueError if no forecastingMethods is empty ."""
if forecastingMethods is None or len ( forecastingMethods ) == 0 : raise ValueError ( "forecastingMethods cannot be empty." ) self . _startingPercentage = startingPercentage self . _endPercentage = endPercentage results = [ ] for forecastingMethod in forecastingMethods : results . append ( [ forecastingMethod ] + self . optimize_forecasting_method ( timeSeries , forecastingMethod ) ) # get the forecasting method with the smallest error bestForecastingMethod = min ( results , key = lambda item : item [ 1 ] . get_error ( self . _startingPercentage , self . _endPercentage ) ) for parameter in bestForecastingMethod [ 2 ] : bestForecastingMethod [ 0 ] . set_parameter ( parameter , bestForecastingMethod [ 2 ] [ parameter ] ) return bestForecastingMethod
def _nemo_accpars ( self , vo , ro ) : """NAME : _ nemo _ accpars PURPOSE : return the accpars potential parameters for use of this potential with NEMO INPUT : vo - velocity unit in km / s ro - length unit in kpc OUTPUT : accpars string HISTORY : 2018-09-14 - Written - Bovy ( UofT )"""
GM = self . _amp * vo ** 2. * ro / 2. return "0,1,%s,%s,0" % ( GM , self . a * ro )
def afni_wf ( name = 'AFNISkullStripWorkflow' , unifize = False , n4_nthreads = 1 ) : """Skull - stripping workflow Originally derived from the ` codebase of the QAP < https : / / github . com / preprocessed - connectomes - project / quality - assessment - protocol / blob / master / qap / anatomical _ preproc . py # L105 > ` _ . Now , this workflow includes : abbr : ` INU ( intensity non - uniformity ) ` correction using the N4 algorithm and ( optionally ) intensity harmonization using ANFI ' s ` ` 3dUnifize ` ` ."""
workflow = pe . Workflow ( name = name ) inputnode = pe . Node ( niu . IdentityInterface ( fields = [ 'in_file' ] ) , name = 'inputnode' ) outputnode = pe . Node ( niu . IdentityInterface ( fields = [ 'bias_corrected' , 'out_file' , 'out_mask' , 'bias_image' ] ) , name = 'outputnode' ) inu_n4 = pe . Node ( ants . N4BiasFieldCorrection ( dimension = 3 , save_bias = True , num_threads = n4_nthreads , copy_header = True ) , n_procs = n4_nthreads , name = 'inu_n4' ) sstrip = pe . Node ( afni . SkullStrip ( outputtype = 'NIFTI_GZ' ) , name = 'skullstrip' ) sstrip_orig_vol = pe . Node ( afni . Calc ( expr = 'a*step(b)' , outputtype = 'NIFTI_GZ' ) , name = 'sstrip_orig_vol' ) binarize = pe . Node ( fsl . Threshold ( args = '-bin' , thresh = 1.e-3 ) , name = 'binarize' ) if unifize : # Add two unifize steps , pre - and post - skullstripping . inu_uni_0 = pe . Node ( afni . Unifize ( outputtype = 'NIFTI_GZ' ) , name = 'unifize_pre_skullstrip' ) inu_uni_1 = pe . Node ( afni . Unifize ( gm = True , outputtype = 'NIFTI_GZ' ) , name = 'unifize_post_skullstrip' ) workflow . connect ( [ ( inu_n4 , inu_uni_0 , [ ( 'output_image' , 'in_file' ) ] ) , ( inu_uni_0 , sstrip , [ ( 'out_file' , 'in_file' ) ] ) , ( inu_uni_0 , sstrip_orig_vol , [ ( 'out_file' , 'in_file_a' ) ] ) , ( sstrip_orig_vol , inu_uni_1 , [ ( 'out_file' , 'in_file' ) ] ) , ( inu_uni_1 , outputnode , [ ( 'out_file' , 'out_file' ) ] ) , ( inu_uni_0 , outputnode , [ ( 'out_file' , 'bias_corrected' ) ] ) , ] ) else : workflow . connect ( [ ( inputnode , sstrip_orig_vol , [ ( 'in_file' , 'in_file_a' ) ] ) , ( inu_n4 , sstrip , [ ( 'output_image' , 'in_file' ) ] ) , ( sstrip_orig_vol , outputnode , [ ( 'out_file' , 'out_file' ) ] ) , ( inu_n4 , outputnode , [ ( 'output_image' , 'bias_corrected' ) ] ) , ] ) # Remaining connections workflow . connect ( [ ( sstrip , sstrip_orig_vol , [ ( 'out_file' , 'in_file_b' ) ] ) , ( inputnode , inu_n4 , [ ( 'in_file' , 'input_image' ) ] ) , ( sstrip_orig_vol , binarize , [ ( 'out_file' , 'in_file' ) ] ) , ( binarize , outputnode , [ ( 'out_file' , 'out_mask' ) ] ) , ( inu_n4 , outputnode , [ ( 'bias_image' , 'bias_image' ) ] ) , ] ) return workflow
def mount ( nbd , root = None ) : '''Pass in the nbd connection device location , mount all partitions and return a dict of mount points CLI Example : . . code - block : : bash salt ' * ' qemu _ nbd . mount / dev / nbd0'''
__salt__ [ 'cmd.run' ] ( 'partprobe {0}' . format ( nbd ) , python_shell = False , ) ret = { } if root is None : root = os . path . join ( tempfile . gettempdir ( ) , 'nbd' , os . path . basename ( nbd ) ) for part in glob . glob ( '{0}p*' . format ( nbd ) ) : m_pt = os . path . join ( root , os . path . basename ( part ) ) time . sleep ( 1 ) mnt = __salt__ [ 'mount.mount' ] ( m_pt , part , True ) if mnt is not True : continue ret [ m_pt ] = part return ret
def limitsChanged ( self , param , limits ) : """Called when the parameter ' s limits have changed"""
ParameterItem . limitsChanged ( self , param , limits ) t = self . param . opts [ 'type' ] if t == 'int' or t == 'float' : self . widget . setOpts ( bounds = limits ) else : return
def ssh_invite ( ctx , code_length , user , ** kwargs ) : """Add a public - key to a ~ / . ssh / authorized _ keys file"""
for name , value in kwargs . items ( ) : setattr ( ctx . obj , name , value ) from . import cmd_ssh ctx . obj . code_length = code_length ctx . obj . ssh_user = user return go ( cmd_ssh . invite , ctx . obj )
def get_messages ( self ) : """returns all messages in this thread as dict mapping all contained messages to their direct responses . : rtype : dict mapping : class : ` ~ alot . db . message . Message ` to a list of : class : ` ~ alot . db . message . Message ` ."""
if not self . _messages : # if not already cached query = self . _dbman . query ( 'thread:' + self . _id ) thread = next ( query . search_threads ( ) ) def accumulate ( acc , msg ) : M = Message ( self . _dbman , msg , thread = self ) acc [ M ] = [ ] r = msg . get_replies ( ) if r is not None : for m in r : acc [ M ] . append ( accumulate ( acc , m ) ) return M self . _messages = { } for m in thread . get_toplevel_messages ( ) : self . _toplevel_messages . append ( accumulate ( self . _messages , m ) ) return self . _messages
def __watch_file_system ( self ) : """Watches the file system for paths that have been changed or invalidated on disk ."""
for path , data in self . __paths . items ( ) : stored_modified_time , is_file = data try : if not foundations . common . path_exists ( path ) : LOGGER . warning ( "!> {0} | '{1}' path has been invalidated and will be unregistered!" . format ( self . __class__ . __name__ , path ) ) del ( self . __paths [ path ] ) if is_file : self . file_invalidated . emit ( path ) else : self . directory_invalidated . emit ( path ) continue except KeyError : LOGGER . debug ( "> {0} | '{1}' path has been unregistered while iterating!" . format ( self . __class__ . __name__ , path ) ) continue try : modified_time = self . get_path_modified_time ( path ) except OSError : LOGGER . debug ( "> {0} | '{1}' path has been invalidated while iterating!" . format ( self . __class__ . __name__ , path ) ) continue if stored_modified_time != modified_time : self . __paths [ path ] = ( modified_time , os . path . isfile ( path ) ) LOGGER . debug ( "> {0} | '{1}' path has been changed!" . format ( self . __class__ . __name__ , path ) ) if is_file : self . file_changed . emit ( path ) else : self . directory_changed . emit ( path )
def delete_all ( self ) : """Delete all record in the table / collection of this object . * * 中文文档 * * 删除表中的所有记录"""
for record in self . find ( using_name = False , data_only = True ) : res = self . delete_one ( record [ "id" ] )
def put_results ( self ) : """Put results to scheduler , used by poller or reactionner when they are in active mode ( passive = False ) This function is not intended for external use . Let the poller and reactionner manage all this stuff by themselves ; ) : param from : poller / reactionner identification : type from : str : param results : list of actions results : type results : list : return : True : rtype : bool"""
res = cherrypy . request . json who_sent = res [ 'from' ] results = res [ 'results' ] results = unserialize ( results , no_load = True ) if results : logger . debug ( "Got some results: %d results from %s" , len ( results ) , who_sent ) else : logger . debug ( "-> no results" ) for result in results : logger . debug ( "-> result: %s" , result ) # Append to the scheduler result queue self . app . sched . waiting_results . put ( result ) return True
def log_cef ( name , severity = logging . INFO , env = None , username = 'none' , signature = None , ** kwargs ) : """Wraps cef logging function so we don ' t need to pass in the config dictionary every time . See bug 707060 . ` ` env ` ` can be either a request object or just the request . META dictionary ."""
cef_logger = commonware . log . getLogger ( 'cef' ) c = { 'product' : settings . CEF_PRODUCT , 'vendor' : settings . CEF_VENDOR , 'version' : settings . CEF_VERSION , 'device_version' : settings . CEF_DEVICE_VERSION } # The CEF library looks for some things in the env object like # REQUEST _ METHOD and any REMOTE _ ADDR stuff . Django not only doesn ' t send # half the stuff you ' d expect , but it specifically doesn ' t implement # readline on its FakePayload object so these things fail . I have no idea # if that ' s outdated code in Django or not , but andym made this # < strike > awesome < / strike > less crappy so the tests will actually pass . # In theory , the last part of this if ( ) will never be hit except in the # test runner . Good luck with that . if isinstance ( env , HttpRequest ) : r = env . META . copy ( ) elif isinstance ( env , dict ) : r = env else : r = { } # Drop kwargs into CEF config array , then log . c [ 'environ' ] = r c . update ( { 'username' : username , 'signature' : signature , 'data' : kwargs , } ) cef_logger . log ( severity , name , c )
def compound_crossspec ( a_data , tbin , Df = None , pointProcess = False ) : """Calculate cross spectra of compound signals . a _ data is a list of datasets ( a _ data = [ data1 , data2 , . . . ] ) . For each dataset in a _ data , the compound signal is calculated and the crossspectra between these compound signals is computed . If pointProcess = True , power spectra are normalized by the length T of the time series . Parameters a _ data : list of numpy . ndarrays Array : 1st axis unit , 2nd axis time . tbin : float Binsize in ms . Df : float / None , Window width of sliding rectangular filter ( smoothing ) , None - > no smoothing . pointProcess : bool If set to True , crossspectrum is normalized to signal length ` T ` Returns freq : tuple numpy . ndarray of frequencies . CRO : tuple 3 dim numpy . ndarray ; 1st axis first compound signal , 2nd axis second compound signal , 3rd axis frequency . Examples > > > compound _ crossspec ( [ np . array ( [ analog _ sig1 , analog _ sig2 ] ) , np . array ( [ analog _ sig3 , analog _ sig4 ] ) ] , tbin , Df = Df ) Out [ 1 ] : ( freq , CRO ) > > > CRO . shape Out [ 2 ] : ( 2,2 , len ( analog _ sig1 ) )"""
a_mdata = [ ] for data in a_data : a_mdata . append ( np . sum ( data , axis = 0 ) ) # calculate compound signals return crossspec ( np . array ( a_mdata ) , tbin , Df , units = False , pointProcess = pointProcess )
def convert ( model , feature_names , target ) : """Convert a boosted tree model to protobuf format . Parameters decision _ tree : RandomForestRegressor A trained scikit - learn tree model . feature _ names : [ str ] Name of the input columns . target : str Name of the output column . Returns model _ spec : An object of type Model _ pb . Protobuf representation of the model"""
if not ( _HAS_SKLEARN ) : raise RuntimeError ( 'scikit-learn not found. scikit-learn conversion API is disabled.' ) _sklearn_util . check_expected_type ( model , _ensemble . RandomForestRegressor ) def is_rf_model ( m ) : if len ( m . estimators_ ) == 0 : return False if hasattr ( m , 'estimators_' ) and m . estimators_ is not None : for t in m . estimators_ : if not hasattr ( t , 'tree_' ) or t . tree_ is None : return False return True else : return False _sklearn_util . check_fitted ( model , is_rf_model ) return _MLModel ( _convert_tree_ensemble ( model , feature_names , target ) )
def postcode ( self ) : """Replaces all question mark ( ' ? ' ) occurrences with a random letter from postal _ code _ formats then passes result to numerify to insert numbers"""
temp = re . sub ( r'\?' , lambda x : self . postal_code_letter ( ) , self . random_element ( self . postal_code_formats ) ) return self . numerify ( temp )
def build ( self , lv2_uri ) : """Returns a new : class : ` . Lv2Effect ` by the valid lv2 _ uri : param string lv2 _ uri : : return Lv2Effect : Effect created"""
try : plugin = self . _plugins [ lv2_uri ] except KeyError : raise Lv2EffectBuilderError ( "Lv2EffectBuilder not contains metadata information about the plugin '{}'. \n" "Try re-scan the installed plugins using the reload method::\n" " >>> lv2_effect_builder.reload(lv2_effect_builder.lv2_plugins_data())" . format ( lv2_uri ) ) return Lv2Effect ( plugin )
def kelvin_to_rgb ( kelvin ) : """Convert a color temperature given in kelvin to an approximate RGB value . : param kelvin : Color temp in K : return : Tuple of ( r , g , b ) , equivalent color for the temperature"""
temp = kelvin / 100.0 # Calculate Red : if temp <= 66 : red = 255 else : red = 329.698727446 * ( ( temp - 60 ) ** - 0.1332047592 ) # Calculate Green : if temp <= 66 : green = 99.4708025861 * math . log ( temp ) - 161.1195681661 else : green = 288.1221695283 * ( ( temp - 60 ) ** - 0.0755148492 ) # Calculate Blue : if temp > 66 : blue = 255 elif temp <= 19 : blue = 0 else : blue = 138.5177312231 * math . log ( temp - 10 ) - 305.0447927307 return tuple ( correct_output ( c ) for c in ( red , green , blue ) )
def create_from_row ( cls , table_row ) : """Create a ` JobDetails ` from an ` astropy . table . row . Row `"""
kwargs = { } for key in table_row . colnames : kwargs [ key ] = table_row [ key ] infile_refs = kwargs . pop ( 'infile_refs' ) outfile_refs = kwargs . pop ( 'outfile_refs' ) rmfile_refs = kwargs . pop ( 'rmfile_refs' ) intfile_refs = kwargs . pop ( 'intfile_refs' ) kwargs [ 'infile_ids' ] = np . arange ( infile_refs [ 0 ] , infile_refs [ 1 ] ) kwargs [ 'outfile_ids' ] = np . arange ( outfile_refs [ 0 ] , outfile_refs [ 1 ] ) kwargs [ 'rmfile_ids' ] = np . arange ( rmfile_refs [ 0 ] , rmfile_refs [ 1 ] ) kwargs [ 'intfile_ids' ] = np . arange ( intfile_refs [ 0 ] , intfile_refs [ 1 ] ) return cls ( ** kwargs )
def present ( name , passwd , database = "admin" , user = None , password = None , host = "localhost" , port = 27017 , authdb = None , roles = None ) : '''Ensure that the user is present with the specified properties name The name of the user to manage passwd The password of the user to manage user MongoDB user with sufficient privilege to create the user password Password for the admin user specified with the ` ` user ` ` parameter host The hostname / IP address of the MongoDB server port The port on which MongoDB is listening database The database in which to create the user . . note : : If the database doesn ' t exist , it will be created . authdb The database in which to authenticate roles The roles assigned to user specified with the ` ` name ` ` parameter Example : . . code - block : : yaml mongouser - myapp : mongodb _ user . present : - name : myapp - passwd : password - of - myapp - database : admin # Connect as admin : sekrit - user : admin - password : sekrit - roles : - readWrite - userAdmin - dbOwner'''
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : 'User {0} is already present' . format ( name ) } # setup default empty roles if not provided to preserve previous API interface if roles is None : roles = [ ] # Check for valid port try : port = int ( port ) except TypeError : ret [ 'result' ] = False ret [ 'comment' ] = 'Port ({0}) is not an integer.' . format ( port ) return ret # check if user exists users = __salt__ [ 'mongodb.user_find' ] ( name , user , password , host , port , database , authdb ) if users : # check for errors returned in users e . g . # users = ( False , ' Failed to connect to MongoDB database localhost : 27017 ' ) # users = ( False , ' not authorized on admin to execute command { usersInfo : " root " } ' ) if not users [ 0 ] : ret [ 'result' ] = False ret [ 'comment' ] = "Mongo Err: {0}" . format ( users [ 1 ] ) return ret # check each user occurrence for usr in users : # prepare empty list for current roles current_roles = [ ] # iterate over user roles and append each to current _ roles list for role in usr [ 'roles' ] : # check correct database to be sure to fill current _ roles only for desired db if role [ 'db' ] == database : current_roles . append ( role [ 'role' ] ) # fill changes if the roles and current roles differ if not set ( current_roles ) == set ( roles ) : ret [ 'changes' ] . update ( { name : { 'database' : database , 'roles' : { 'old' : current_roles , 'new' : roles } } } ) __salt__ [ 'mongodb.user_create' ] ( name , passwd , user , password , host , port , database = database , authdb = authdb , roles = roles ) return ret # if the check does not return a boolean , return an error # this may be the case if there is a database connection error if not isinstance ( users , list ) : ret [ 'comment' ] = users ret [ 'result' ] = False return ret if __opts__ [ 'test' ] : ret [ 'result' ] = None ret [ 'comment' ] = ( 'User {0} is not present and needs to be created' ) . format ( name ) return ret # The user is not present , make it ! if __salt__ [ 'mongodb.user_create' ] ( name , passwd , user , password , host , port , database = database , authdb = authdb , roles = roles ) : ret [ 'comment' ] = 'User {0} has been created' . format ( name ) ret [ 'changes' ] [ name ] = 'Present' else : ret [ 'comment' ] = 'Failed to create database {0}' . format ( name ) ret [ 'result' ] = False return ret
def __dog_started ( self ) : """Prepare watchdog for scheduled task starting : return : None"""
if self . __task is not None : raise RuntimeError ( 'Unable to start task. In order to start a new task - at first stop it' ) self . __task = self . record ( ) . task ( ) if isinstance ( self . __task , WScheduleTask ) is False : task_class = self . __task . __class__ . __qualname__ raise RuntimeError ( 'Unable to start unknown type of task: %s' % task_class )
def insert ( self , i , tag1 , tag2 , cmd = "prevtag" , x = None , y = None ) : """Inserts a new rule that updates words with tag1 to tag2, given constraints x and y , e . g . , Context . append ( " TO < NN " , " VB " )"""
if " < " in tag1 and not x and not y : tag1 , x = tag1 . split ( " < " ) ; cmd = "prevtag" if " > " in tag1 and not x and not y : x , tag1 = tag1 . split ( " > " ) ; cmd = "nexttag" lazylist . insert ( self , i , [ tag1 , tag2 , cmd , x or "" , y or "" ] )
def post ( self ) : """Create bucket ."""
with db . session . begin_nested ( ) : bucket = Bucket . create ( storage_class = current_app . config [ 'FILES_REST_DEFAULT_STORAGE_CLASS' ] , ) db . session . commit ( ) return self . make_response ( data = bucket , context = { 'class' : Bucket , } )
def stop_all_periodic_tasks ( self , remove_tasks = True ) : """Stop sending any messages that were started using bus . send _ periodic : param bool remove _ tasks : Stop tracking the stopped tasks ."""
for task in self . _periodic_tasks : task . stop ( remove_task = remove_tasks )
def cric__lasso ( ) : """Lasso Regression"""
model = sklearn . linear_model . LogisticRegression ( penalty = "l1" , C = 0.002 ) # we want to explain the raw probability outputs of the trees model . predict = lambda X : model . predict_proba ( X ) [ : , 1 ] return model
def get_points ( self ) : """Returns a ketama compatible list of ( position , nodename ) tuples ."""
return [ ( k , self . runtime . _ring [ k ] ) for k in self . runtime . _keys ]
def drop ( self , index = None , columns = None ) : """Remove row data for target index and columns . Args : index : Target index to drop . columns : Target columns to drop . Returns : A new QueryCompiler ."""
if self . _is_transposed : return self . transpose ( ) . drop ( index = columns , columns = index ) . transpose ( ) if index is None : new_data = self . data new_index = self . index else : def delitem ( df , internal_indices = [ ] ) : return df . drop ( index = df . index [ internal_indices ] ) numeric_indices = list ( self . index . get_indexer_for ( index ) ) new_data = self . data . apply_func_to_select_indices ( 1 , delitem , numeric_indices , keep_remaining = True ) # We can ' t use self . index . drop with duplicate keys because in Pandas # it throws an error . new_index = self . index [ ~ self . index . isin ( index ) ] if columns is None : new_columns = self . columns new_dtypes = self . dtypes else : def delitem ( df , internal_indices = [ ] ) : return df . drop ( columns = df . columns [ internal_indices ] ) numeric_indices = list ( self . columns . get_indexer_for ( columns ) ) new_data = new_data . apply_func_to_select_indices ( 0 , delitem , numeric_indices , keep_remaining = True ) new_columns = self . columns [ ~ self . columns . isin ( columns ) ] new_dtypes = self . dtypes . drop ( columns ) return self . __constructor__ ( new_data , new_index , new_columns , new_dtypes )
def NTU_from_P_G ( P1 , R1 , Ntp , optimal = True ) : r'''Returns the number of transfer units of a TEMA G type heat exchanger with a specified ( for side 1 ) thermal effectiveness ` P1 ` , heat capacity ratio ` R1 ` , the number of tube passes ` Ntp ` , and for the two - pass case whether or not the inlets are arranged optimally . The supported cases are as follows : * One tube pass ( tube fluid split into two streams individually mixed , shell fluid mixed ) * Two tube passes ( shell and tube exchanger with shell and tube fluids mixed in each pass at the cross section ) , counterflow arrangement * Two tube passes ( shell and tube exchanger with shell and tube fluids mixed in each pass at the cross section ) , parallelflow arrangement Parameters P1 : float Thermal effectiveness of the heat exchanger in the P - NTU method , calculated with respect to stream 1 [ - ] R1 : float Heat capacity ratio of the heat exchanger in the P - NTU method , calculated with respect to stream 1 ( shell side = 1 , tube side = 2 ) [ - ] Ntp : int Number of tube passes , 1 or 2 [ - ] optimal : bool , optional Whether or not the arrangement is configured to give more of a countercurrent and efficient ( True ) case or an inefficient parallel case ( only applies for two passes ) , [ - ] Returns NTU1 : float Thermal number of transfer units of the heat exchanger in the P - NTU method , calculated with respect to stream 1 ( shell side = 1 , tube side Notes For numbers of tube passes greater than 1 or 2 , an exception is raised . Although this function allows the thermal effectiveness desired to be specified , it does not mean such a high value can be obtained . An exception is raised which shows the maximum possible effectiveness obtainable at the specified ` R1 ` and configuration . > > > NTU _ from _ P _ G ( P1 = 1 , R1 = 1/3 . , Ntp = 2) Traceback ( most recent call last ) : ValueError : No solution possible gives such a high P1 ; maximum P1 = 0.954545 at NTU1 = 10000.00000 Of the three configurations , 1 pass and the optimal 2 pass have monotonic functions which allow for a bounded solver to work smoothly . In both cases a solution is searched for between NTU1 values of 1E - 11 and 1E - 4. For the 2 pass unoptimal solution , a bounded solver is first use , but the upper bound on P1 and the upper NTU1 limit is calculated from a pade approximation performed with mpmath . Examples > > > NTU _ from _ P _ G ( P1 = . 573 , R1 = 1/3 . , Ntp = 1) 0.9999513707769526'''
NTU_min = 1E-11 function = temperature_effectiveness_TEMA_G if Ntp == 1 or ( Ntp == 2 and optimal ) : NTU_max = 1E4 # We could fit a curve to determine the NTU where the floating point # does not allow NTU to increase though , but that would be another # binary bisection process , different from the current pipeline elif Ntp == 2 and not optimal : NTU_max = _NTU_max_for_P_solver ( NTU_from_G_2_unoptimal , R1 ) else : raise Exception ( 'Supported numbers of tube passes are 1 or 2.' ) return _NTU_from_P_solver ( P1 , R1 , NTU_min , NTU_max , function , Ntp = Ntp , optimal = optimal )
def create_new_example ( foo = '' , a = '' , b = '' ) : """Factory method for example entities . : rtype : Example"""
return Example . __create__ ( foo = foo , a = a , b = b )
def to_dict ( self , remove_nones = False ) : """Creates a dictionary representation of the object . : param remove _ nones : Whether ` ` None ` ` values should be filtered out of the dictionary . Defaults to ` ` False ` ` . : return : A dictionary representation of the report ."""
if remove_nones : report_dict = super ( ) . to_dict ( remove_nones = True ) else : report_dict = { 'title' : self . title , 'reportBody' : self . body , 'timeBegan' : self . time_began , 'externalUrl' : self . external_url , 'distributionType' : self . _get_distribution_type ( ) , 'externalTrackingId' : self . external_id , 'enclaveIds' : self . enclave_ids , 'created' : self . created , 'updated' : self . updated , } # id field might not be present if self . id is not None : report_dict [ 'id' ] = self . id else : report_dict [ 'id' ] = None return report_dict
def make_prefetchitem_applicationfilename ( application_filename , condition = 'is' , negate = False , preserve_case = False ) : """Create a node for PrefetchItem / ApplicationFileName : return : A IndicatorItem represented as an Element node"""
document = 'PrefetchItem' search = 'PrefetchItem/ApplicationFileName' content_type = 'string' content = application_filename ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case ) return ii_node
def count ( self , q ) : """Shorthand for counting the results of a specific query . # # Arguments * ` q ` ( str ) : The query to count . This will be executed as : ` " SELECT COUNT ( * ) % s " % q ` . # # Returns * ` count ` ( int ) : The resulting count ."""
q = "SELECT COUNT(*) %s" % q return int ( self . quick ( q ) . split ( "\n" ) [ 1 ] )
def set_title ( self , title = None ) : """Sets the editor title . : param title : Editor title . : type title : unicode : return : Method success . : rtype : bool"""
if not title : # TODO : https : / / bugreports . qt - project . org / browse / QTBUG - 27084 # titleTemplate = self . is _ modified ( ) and " { 0 } * " or " { 0 } " # title = titleTemplate . format ( self . get _ file _ short _ name ( ) ) title = self . get_file_short_name ( ) LOGGER . debug ( "> Setting editor title to '{0}'." . format ( title ) ) self . __title = title self . setWindowTitle ( title ) self . title_changed . emit ( ) return True
def from_file ( path ) : """Return a ` ` Config ` ` instance by reading a configuration file ."""
with open ( path ) as stream : obj = yaml . safe_load ( stream ) Config . lint ( obj ) return Config ( config_dict = obj )
def spec_from_json_dict ( json_dict # type : Dict [ str , Any ] ) : # type : ( . . . ) - > FieldSpec """Turns a dictionary into the appropriate object . : param dict json _ dict : A dictionary with properties . : returns : An initialised instance of the appropriate FieldSpec subclass ."""
if 'ignored' in json_dict : return Ignore ( json_dict [ 'identifier' ] ) type_str = json_dict [ 'format' ] [ 'type' ] spec_type = cast ( FieldSpec , FIELD_TYPE_MAP [ type_str ] ) return spec_type . from_json_dict ( json_dict )
def _create_ip_report ( self ) : '''this will take the obfuscated ip and hostname databases and output csv files'''
try : ip_report_name = os . path . join ( self . report_dir , "%s-ip.csv" % self . session ) self . logger . con_out ( 'Creating IP Report - %s' , ip_report_name ) ip_report = open ( ip_report_name , 'wt' ) ip_report . write ( 'Obfuscated IP,Original IP\n' ) for k , v in self . ip_db . items ( ) : ip_report . write ( '%s,%s\n' % ( self . _int2ip ( k ) , self . _int2ip ( v ) ) ) ip_report . close ( ) self . logger . info ( 'Completed IP Report' ) self . ip_report = ip_report_name except Exception as e : # pragma : no cover self . logger . exception ( e ) raise Exception ( 'CreateReport Error: Error Creating IP Report' )
def _parse_example_spec ( self ) : """Returns a ` tf . Example ` parsing spec as dict ."""
height , width = image_util . get_expected_image_size ( self . module_spec ) input_shape = [ height , width , 3 ] return { self . key : tf_v1 . FixedLenFeature ( input_shape , tf . float32 ) }
def tiltFactor ( self , midpointdepth = None , printAvAngle = False ) : '''get tilt factor from inverse distance law https : / / en . wikipedia . org / wiki / Inverse - square _ law'''
# TODO : can also be only def . with FOV , rot , tilt beta2 = self . viewAngle ( midpointdepth = midpointdepth ) try : angles , vals = getattr ( emissivity_vs_angle , self . opts [ 'material' ] ) ( ) except AttributeError : raise AttributeError ( "material[%s] is not in list of know materials: %s" % ( self . opts [ 'material' ] , [ o [ 0 ] for o in getmembers ( emissivity_vs_angle ) if isfunction ( o [ 1 ] ) ] ) ) if printAvAngle : avg_angle = beta2 [ self . foreground ( ) ] . mean ( ) print ( 'angle: %s DEG' % np . degrees ( avg_angle ) ) # use averaged angle instead of beta2 to not overemphasize correction normEmissivity = np . clip ( InterpolatedUnivariateSpline ( np . radians ( angles ) , vals ) ( beta2 ) , 0 , 1 ) return normEmissivity
def maxlevel ( lst ) : """Return maximum nesting depth"""
maxlev = 0 def f ( lst , level ) : nonlocal maxlev if isinstance ( lst , list ) : level += 1 maxlev = max ( level , maxlev ) for item in lst : f ( item , level ) f ( lst , 0 ) return maxlev
def get_file_object ( filename , mode = "r" ) : """Context manager for a file object . If filename is present , this is the same as with open ( filename , mode ) : . . . If filename is not present , then the file object returned is either sys . stdin or sys . stdout depending on the mode . : filename : the name of the file , or None for STDIN : mode : the mode to open the file with"""
if filename is None : if mode . startswith ( "r" ) : yield sys . stdin else : yield sys . stdout else : with open ( filename , mode ) as fobj : yield fobj
def get_initials ( pinyin , strict ) : """获取单个拼音中的声母 . : param pinyin : 单个拼音 : type pinyin : unicode : param strict : 是否严格遵照 《 汉语拼音方案 》 来处理声母和韵母 : return : 声母 : rtype : unicode"""
if strict : _initials = _INITIALS else : _initials = _INITIALS_NOT_STRICT for i in _initials : if pinyin . startswith ( i ) : return i return ''
def get_form_language ( self , request , obj = None ) : """Return the current language for the currently displayed object fields ."""
if obj is not None : return obj . get_current_language ( ) else : return self . _language ( request )
def check_refresh ( self , data , ret ) : '''Check to see if the modules for this state instance need to be updated , only update if the state is a file or a package and if it changed something . If the file function is managed check to see if the file is a possible module type , e . g . a python , pyx , or . so . Always refresh if the function is recurse , since that can lay down anything .'''
_reload_modules = False if data . get ( 'reload_grains' , False ) : log . debug ( 'Refreshing grains...' ) self . opts [ 'grains' ] = salt . loader . grains ( self . opts ) _reload_modules = True if data . get ( 'reload_pillar' , False ) : log . debug ( 'Refreshing pillar...' ) self . opts [ 'pillar' ] = self . _gather_pillar ( ) _reload_modules = True if not ret [ 'changes' ] : if data . get ( 'force_reload_modules' , False ) : self . module_refresh ( ) return if data . get ( 'reload_modules' , False ) or _reload_modules : # User explicitly requests a reload self . module_refresh ( ) return if data [ 'state' ] == 'file' : if data [ 'fun' ] == 'managed' : if data [ 'name' ] . endswith ( ( '.py' , '.pyx' , '.pyo' , '.pyc' , '.so' ) ) : self . module_refresh ( ) elif data [ 'fun' ] == 'recurse' : self . module_refresh ( ) elif data [ 'fun' ] == 'symlink' : if 'bin' in data [ 'name' ] : self . module_refresh ( ) elif data [ 'state' ] in ( 'pkg' , 'ports' ) : self . module_refresh ( )
def _write_cron_lines ( user , lines ) : '''Takes a list of lines to be committed to a user ' s crontab and writes it'''
lines = [ salt . utils . stringutils . to_str ( _l ) for _l in lines ] path = salt . utils . files . mkstemp ( ) if _check_instance_uid_match ( user ) or __grains__ . get ( 'os_family' ) in ( 'Solaris' , 'AIX' ) : # In some cases crontab command should be executed as user rather than root with salt . utils . files . fpopen ( path , 'w+' , uid = __salt__ [ 'file.user_to_uid' ] ( user ) , mode = 0o600 ) as fp_ : fp_ . writelines ( lines ) ret = __salt__ [ 'cmd.run_all' ] ( _get_cron_cmdstr ( path ) , runas = user , python_shell = False ) else : with salt . utils . files . fpopen ( path , 'w+' , mode = 0o600 ) as fp_ : fp_ . writelines ( lines ) ret = __salt__ [ 'cmd.run_all' ] ( _get_cron_cmdstr ( path , user ) , python_shell = False ) os . remove ( path ) return ret
def unicode_string ( string ) : """Make sure string is unicode , try to default with utf8 , or base64 if failed . can been decode by ` decode _ unicode _ string `"""
if isinstance ( string , six . text_type ) : return string try : return string . decode ( "utf8" ) except UnicodeDecodeError : return '[BASE64-DATA]' + base64 . b64encode ( string ) + '[/BASE64-DATA]'
def delete_webhook ( self , scaling_group , policy , webhook ) : """Deletes the specified webhook from the specified policy ."""
uri = "/%s/%s/policies/%s/webhooks/%s" % ( self . uri_base , utils . get_id ( scaling_group ) , utils . get_id ( policy ) , utils . get_id ( webhook ) ) resp , resp_body = self . api . method_delete ( uri ) return None
def ParseOptions ( cls , options , configuration_object ) : """Parses and validates options . Args : options ( argparse . Namespace ) : parser options . configuration _ object ( CLITool ) : object to be configured by the argument helper . Raises : BadConfigObject : when the configuration object is of the wrong type . BadConfigOption : if the required artifact definitions are not defined ."""
if not isinstance ( configuration_object , tools . CLITool ) : raise errors . BadConfigObject ( 'Configuration object is not an instance of CLITool' ) artifacts_path = getattr ( options , 'artifact_definitions_path' , None ) data_location = getattr ( configuration_object , '_data_location' , None ) if ( ( not artifacts_path or not os . path . exists ( artifacts_path ) ) and data_location ) : artifacts_path = os . path . dirname ( data_location ) artifacts_path = os . path . join ( artifacts_path , 'artifacts' ) if not os . path . exists ( artifacts_path ) and 'VIRTUAL_ENV' in os . environ : artifacts_path = os . path . join ( os . environ [ 'VIRTUAL_ENV' ] , 'share' , 'artifacts' ) if not os . path . exists ( artifacts_path ) : artifacts_path = os . path . join ( sys . prefix , 'share' , 'artifacts' ) if not os . path . exists ( artifacts_path ) : artifacts_path = os . path . join ( sys . prefix , 'local' , 'share' , 'artifacts' ) if sys . prefix != '/usr' : if not os . path . exists ( artifacts_path ) : artifacts_path = os . path . join ( '/usr' , 'share' , 'artifacts' ) if not os . path . exists ( artifacts_path ) : artifacts_path = os . path . join ( '/usr' , 'local' , 'share' , 'artifacts' ) if not os . path . exists ( artifacts_path ) : artifacts_path = None if not artifacts_path or not os . path . exists ( artifacts_path ) : raise errors . BadConfigOption ( 'Unable to determine path to artifact definitions.' ) custom_artifacts_path = getattr ( options , 'custom_artifact_definitions_path' , None ) if custom_artifacts_path and not os . path . isfile ( custom_artifacts_path ) : raise errors . BadConfigOption ( 'No such artifacts filter file: {0:s}.' . format ( custom_artifacts_path ) ) if custom_artifacts_path : logger . info ( 'Custom artifact filter file: {0:s}' . format ( custom_artifacts_path ) ) registry = artifacts_registry . ArtifactDefinitionsRegistry ( ) reader = artifacts_reader . YamlArtifactsReader ( ) logger . info ( 'Determined artifact definitions path: {0:s}' . format ( artifacts_path ) ) try : registry . ReadFromDirectory ( reader , artifacts_path ) except ( KeyError , artifacts_errors . FormatError ) as exception : raise errors . BadConfigOption ( ( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}' ) . format ( artifacts_path , exception ) ) for name in preprocessors_manager . PreprocessPluginsManager . GetNames ( ) : if not registry . GetDefinitionByName ( name ) : raise errors . BadConfigOption ( 'Missing required artifact definition: {0:s}' . format ( name ) ) if custom_artifacts_path : try : registry . ReadFromFile ( reader , custom_artifacts_path ) except ( KeyError , artifacts_errors . FormatError ) as exception : raise errors . BadConfigOption ( ( 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}' ) . format ( custom_artifacts_path , exception ) ) setattr ( configuration_object , '_artifact_definitions_path' , artifacts_path ) setattr ( configuration_object , '_custom_artifacts_path' , custom_artifacts_path )
def _process_image_file ( fobj , session , filename ) : """Process image files from the dataset ."""
# We need to read the image files and convert them to JPEG , since some files # actually contain GIF , PNG or BMP data ( despite having a . jpg extension ) and # some encoding options that will make TF crash in general . image = _decode_image ( fobj , session , filename = filename ) return _encode_jpeg ( image )
def qvalues1 ( PV , m = None , pi = 1.0 ) : """estimate q vlaues from a list of Pvalues this algorihm is taken from Storey , significance testing for genomic . . . m : number of tests , ( if not len ( PV ) ) , pi : fraction of expected true null ( 1.0 is a conservative estimate ) @ param PV : pvalues @ param m : total number of tests if PV is not the entire array . @ param pi : fraction of true null"""
S = PV . shape PV = PV . flatten ( ) if m is None : m = len ( PV ) * 1.0 else : m *= 1.0 lPV = len ( PV ) # 1 . sort pvalues PV = PV . squeeze ( ) IPV = PV . argsort ( ) PV = PV [ IPV ] # 2 . estimate lambda if pi is None : lrange = sp . linspace ( 0.05 , 0.95 , max ( lPV / 100.0 , 10 ) ) pil = sp . double ( ( PV [ : , sp . newaxis ] > lrange ) . sum ( axis = 0 ) ) / lPV pilr = pil / ( 1.0 - lrange ) # ok , I think for SNPs this is pretty useless , pi is close to 1! pi = 1.0 # if there is something useful in there use the something close to 1 if pilr [ - 1 ] < 1.0 : pi = pilr [ - 1 ] # 3 . initialise q values QV_ = pi * m / lPV * PV QV_ [ - 1 ] = min ( QV_ [ - 1 ] , 1.0 ) # 4 . update estimate for i in range ( lPV - 2 , - 1 , - 1 ) : QV_ [ i ] = min ( pi * m * PV [ i ] / ( i + 1.0 ) , QV_ [ i + 1 ] ) # 5 . invert sorting QV = sp . zeros_like ( PV ) QV [ IPV ] = QV_ QV = QV . reshape ( S ) return QV
def is_connected ( self , attempts = 3 ) : """Try to reconnect if neccessary . : param attempts : The amount of tries to reconnect if neccessary . : type attempts : ` ` int ` `"""
if self . gce is None : while attempts > 0 : self . logger . info ( "Attempting to connect ..." ) try : self . connect ( ) except ComputeEngineManagerException : attempts -= 1 continue self . logger . info ( "Connection established." ) return True self . logger . error ( "Unable to connect to Google Compute Engine." ) return False return True
def text ( self ) : """Get the raw text for the response"""
try : return self . _text except AttributeError : if IS_PYTHON_3 : encoding = self . _response . headers . get_content_charset ( "utf-8" ) else : encoding = self . _response . headers . getparam ( "charset" ) self . _text = self . _response . read ( ) . decode ( encoding or "utf-8" ) return self . _text
def fetch_mga_scores ( mga_vec , codon_pos , default_mga = None ) : """Get MGAEntropy scores from pre - computed scores in array . Parameters mga _ vec : np . array numpy vector containing MGA Entropy conservation scores for residues codon _ pos : list of int position of codon in protein sequence default _ mga : float or None , default = None value to use if MGA entropy score not available for a given mutation . Drop mutations if no default specified . Returns mga _ ent _ scores : np . array score results for MGA entropy conservation"""
# keep only positions in range of MGAEntropy scores len_mga = len ( mga_vec ) good_codon_pos = [ p for p in codon_pos if p < len_mga ] # get MGAEntropy scores if good_codon_pos : mga_ent_scores = mga_vec [ good_codon_pos ] else : mga_ent_scores = None return mga_ent_scores
def _round_to_next_multiple ( n , m ) : """Round up the the next multiple . : param n : The number to round up . : param m : The multiple . : return : The rounded number"""
return n if n % m == 0 else n + m - n % m
def username_name ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) username = ET . SubElement ( config , "username" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" ) name = ET . SubElement ( username , "name" ) name . text = kwargs . pop ( 'name' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def save_config ( ** kwargs ) : """Save configuration keys to vispy config file Parameters * * kwargs : keyword arguments Key / value pairs to save to the config file ."""
if kwargs == { } : kwargs = config . _config current_config = _load_config ( ) current_config . update ( ** kwargs ) # write to disk fname = _get_config_fname ( ) if fname is None : raise RuntimeError ( 'config filename could not be determined' ) if not op . isdir ( op . dirname ( fname ) ) : os . mkdir ( op . dirname ( fname ) ) with open ( fname , 'w' ) as fid : json . dump ( current_config , fid , sort_keys = True , indent = 0 )
def is_valid ( self ) : """Return True if tile is available in tile pyramid ."""
if not all ( [ isinstance ( self . zoom , int ) , self . zoom >= 0 , isinstance ( self . row , int ) , self . row >= 0 , isinstance ( self . col , int ) , self . col >= 0 ] ) : raise TypeError ( "zoom, col and row must be integers >= 0" ) cols = self . tile_pyramid . matrix_width ( self . zoom ) rows = self . tile_pyramid . matrix_height ( self . zoom ) if self . col >= cols : raise ValueError ( "col (%s) exceeds matrix width (%s)" % ( self . col , cols ) ) if self . row >= rows : raise ValueError ( "row (%s) exceeds matrix height (%s)" % ( self . row , rows ) ) return True
def get_env ( key : str , default : Any = None , clean : Callable [ [ str ] , Any ] = lambda v : v ) : '''Retrieves a configuration value from the environment variables . The given * key * is uppercased and prefixed by ` ` " BACKEND _ " ` ` and then ` ` " SORNA _ " ` ` if the former does not exist . : param key : The key name . : param default : The default value returned when there is no corresponding environment variable . : param clean : A single - argument function that is applied to the result of lookup ( in both successes and the default value for failures ) . The default is returning the value as - is . : returns : The value processed by the * clean * function .'''
key = key . upper ( ) v = os . environ . get ( 'BACKEND_' + key ) if v is None : v = os . environ . get ( 'SORNA_' + key ) if v is None : if default is None : raise KeyError ( key ) v = default return clean ( v )
def set_from_json ( self , obj , json , models = None , setter = None ) : '''Sets the value of this property from a JSON value . This method first Args : obj ( HasProps ) : json ( JSON - dict ) : models ( seq [ Model ] , optional ) : setter ( ClientSession or ServerSession or None , optional ) : This is used to prevent " boomerang " updates to Bokeh apps . ( default : None ) In the context of a Bokeh server application , incoming updates to properties will be annotated with the session that is doing the updating . This value is propagated through any subsequent change notifications that the update triggers . The session can compare the event setter to itself , and suppress any updates that originate from itself . Returns : None'''
return super ( BasicPropertyDescriptor , self ) . set_from_json ( obj , self . property . from_json ( json , models ) , models , setter )
def _get_translated_queryset ( self , meta = None ) : """Return the queryset that points to the translated model . If there is a prefetch , it can be read from this queryset ."""
# Get via self . TRANSLATIONS _ FIELD . get ( . . ) so it also uses the prefetch / select _ related cache . if meta is None : meta = self . _parler_meta . root accessor = getattr ( self , meta . rel_name ) # RelatedManager return accessor . get_queryset ( )
def _proxy ( self ) : """Generate an instance context for the instance , the context is capable of performing various actions . All instance actions are proxied to the context : returns : CredentialListContext for this CredentialListInstance : rtype : twilio . rest . api . v2010 . account . sip . credential _ list . CredentialListContext"""
if self . _context is None : self . _context = CredentialListContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , sid = self . _solution [ 'sid' ] , ) return self . _context
def _get_userdir ( user = None ) : """Returns the user dir or None"""
if user is not None and not isinstance ( user , fsnative ) : raise TypeError if is_win : if "HOME" in environ : path = environ [ "HOME" ] elif "USERPROFILE" in environ : path = environ [ "USERPROFILE" ] elif "HOMEPATH" in environ and "HOMEDRIVE" in environ : path = os . path . join ( environ [ "HOMEDRIVE" ] , environ [ "HOMEPATH" ] ) else : return if user is None : return path else : return os . path . join ( os . path . dirname ( path ) , user ) else : import pwd if user is None : if "HOME" in environ : return environ [ "HOME" ] else : try : return path2fsn ( pwd . getpwuid ( os . getuid ( ) ) . pw_dir ) except KeyError : return else : try : return path2fsn ( pwd . getpwnam ( user ) . pw_dir ) except KeyError : return
def is_message_handler ( type_ , from_ , cb ) : """Return true if ` cb ` has been decorated with : func : ` message _ handler ` for the given ` type _ ` and ` from _ ` ."""
try : handlers = aioxmpp . service . get_magic_attr ( cb ) except AttributeError : return False return aioxmpp . service . HandlerSpec ( ( _apply_message_handler , ( type_ , from_ ) ) , require_deps = ( SimpleMessageDispatcher , ) ) in handlers
def _querystring ( self ) : """Additional keyword arguments"""
kw = { "studyoid" : self . studyoid } if self . location_oid is not None : kw [ "locationoid" ] = self . location_oid return kw
def reset ( self , params , repetition ) : """Called at the beginning of each experiment and each repetition"""
pprint . pprint ( params ) self . initialize ( params , repetition ) # Load CIFAR dataset dataDir = params . get ( 'dataDir' , 'data' ) self . transform_train = transforms . Compose ( [ transforms . RandomCrop ( 32 , padding = 4 ) , transforms . RandomHorizontalFlip ( ) , transforms . ToTensor ( ) , transforms . Normalize ( ( 0.4914 , 0.4822 , 0.4465 ) , ( 0.2023 , 0.1994 , 0.2010 ) ) , ] ) self . trainset = datasets . CIFAR10 ( root = dataDir , train = True , download = True , transform = self . transform_train ) self . createModel ( params , repetition ) print ( "Torch reports" , torch . cuda . device_count ( ) , "GPUs available" ) if torch . cuda . device_count ( ) > 1 : self . model = torch . nn . DataParallel ( self . model ) self . model . to ( self . device ) self . optimizer = self . createOptimizer ( self . model ) self . lr_scheduler = self . createLearningRateScheduler ( self . optimizer ) self . test_loaders = self . createTestLoaders ( self . noise_values )
def return_features_base ( dbpath , set_object , names ) : """Generic function which returns a list of extracted features from the database Parameters dbpath : string , path to SQLite database file set _ object : object ( either TestSet or TrainSet ) which is stored in the database names : list of strings , a list of feature names which are to be retrieved from the database , if equal to ' all ' , all features will be returned Returns return _ list : list of lists , each ' inside list ' corresponds to a single data point , each element of the ' inside list ' is a feature ( can be of any type )"""
engine = create_engine ( 'sqlite:////' + dbpath ) session_cl = sessionmaker ( bind = engine ) session = session_cl ( ) return_list = [ ] if names == 'all' : for i in session . query ( set_object ) . order_by ( set_object . id ) : row_list = [ ] for feature in i . features : row_list . append ( i . features [ feature ] ) return_list . append ( row_list [ : ] ) else : for i in session . query ( set_object ) . order_by ( set_object . id ) : row_list = [ ] for feature in i . features : if feature in names : row_list . append ( i . features [ feature ] ) return_list . append ( row_list [ : ] ) return return_list
def remove_key ( self , store_key ) : """Remove key from the index . : param store _ key : The key for the document in the store : type store _ key : str"""
if store_key in self . _undefined_keys : del self . _undefined_keys [ store_key ] if store_key in self . _reverse_index : for value in self . _reverse_index [ store_key ] : self . _index [ value ] . remove ( store_key ) del self . _reverse_index [ store_key ]
def do_tag ( self , args : argparse . Namespace ) : """create an html tag"""
# The Namespace always includes the Statement object created when parsing the command line statement = args . __statement__ self . poutput ( "The command line you ran was: {}" . format ( statement . command_and_args ) ) self . poutput ( "It generated this tag:" ) self . poutput ( '<{0}>{1}</{0}>' . format ( args . tag , ' ' . join ( args . content ) ) )
def _findUniqueMappingValues ( mapping ) : """Find mapping entries that are unique for one key ( value length of 1 ) . . . Note : This function can be used to find unique proteins by providing a peptide to protein mapping . : param mapping : dict , for each key contains a set of entries : returns : a set of unique mapping values"""
uniqueMappingValues = set ( ) for entries in viewvalues ( mapping ) : if len ( entries ) == 1 : uniqueMappingValues . update ( entries ) return uniqueMappingValues
def add_repo_key ( path = None , text = None , keyserver = None , keyid = None , saltenv = 'base' ) : '''. . versionadded : : 2017.7.0 Add a repo key using ` ` apt - key add ` ` . : param str path : The path of the key file to import . : param str text : The key data to import , in string form . : param str keyserver : The server to download the repo key specified by the keyid . : param str keyid : The key id of the repo key to add . : param str saltenv : The environment the key file resides in . : return : A boolean representing whether the repo key was added . : rtype : bool CLI Examples : . . code - block : : bash salt ' * ' pkg . add _ repo _ key ' salt : / / apt / sources / test . key ' salt ' * ' pkg . add _ repo _ key text = " ' $ KEY1 ' " salt ' * ' pkg . add _ repo _ key keyserver = ' keyserver . example ' keyid = ' 0000AAAA ' '''
cmd = [ 'apt-key' ] kwargs = { } current_repo_keys = get_repo_keys ( ) if path : cached_source_path = __salt__ [ 'cp.cache_file' ] ( path , saltenv ) if not cached_source_path : log . error ( 'Unable to get cached copy of file: %s' , path ) return False cmd . extend ( [ 'add' , cached_source_path ] ) elif text : log . debug ( 'Received value: %s' , text ) cmd . extend ( [ 'add' , '-' ] ) kwargs . update ( { 'stdin' : text } ) elif keyserver : if not keyid : error_msg = 'No keyid or keyid too short for keyserver: {0}' . format ( keyserver ) raise SaltInvocationError ( error_msg ) cmd . extend ( [ 'adv' , '--batch' , '--keyserver' , keyserver , '--recv' , keyid ] ) elif keyid : error_msg = 'No keyserver specified for keyid: {0}' . format ( keyid ) raise SaltInvocationError ( error_msg ) else : raise TypeError ( '{0}() takes at least 1 argument (0 given)' . format ( add_repo_key . __name__ ) ) # If the keyid is provided or determined , check it against the existing # repo key ids to determine whether it needs to be imported . if keyid : for current_keyid in current_repo_keys : if current_keyid [ - ( len ( keyid ) ) : ] == keyid : log . debug ( "The keyid '%s' already present: %s" , keyid , current_keyid ) return True cmd_ret = _call_apt ( cmd , ** kwargs ) if cmd_ret [ 'retcode' ] == 0 : return True log . error ( 'Unable to add repo key: %s' , cmd_ret [ 'stderr' ] ) return False
def for_me ( conditions , myself ) : """Am I among the intended audiences"""
if not conditions . audience_restriction : # No audience restriction return True for restriction in conditions . audience_restriction : if not restriction . audience : continue for audience in restriction . audience : if audience . text . strip ( ) == myself : return True else : # print ( " Not for me : % s ! = % s " % ( audience . text . strip ( ) , # myself ) ) pass return False
def _check_series_convert_timestamps_internal ( s , timezone ) : """Convert a tz - naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage : param s : a pandas . Series : param timezone : the timezone to convert . if None then use local timezone : return pandas . Series where if it is a timestamp , has been UTC normalized without a time zone"""
from pyspark . sql . utils import require_minimum_pandas_version require_minimum_pandas_version ( ) from pandas . api . types import is_datetime64_dtype , is_datetime64tz_dtype # TODO : handle nested timestamps , such as ArrayType ( TimestampType ( ) ) ? if is_datetime64_dtype ( s . dtype ) : # When tz _ localize a tz - naive timestamp , the result is ambiguous if the tz - naive # timestamp is during the hour when the clock is adjusted backward during due to # daylight saving time ( dst ) . # E . g . , for America / New _ York , the clock is adjusted backward on 2015-11-01 2:00 to # 2015-11-01 1:00 from dst - time to standard time , and therefore , when tz _ localize # a tz - naive timestamp 2015-11-01 1:30 with America / New _ York timezone , it can be either # dst time ( 2015-01-01 1:30-0400 ) or standard time ( 2015-11-01 1:30-0500 ) . # Here we explicit choose to use standard time . This matches the default behavior of # pytz . # Here are some code to help understand this behavior : # > > > import datetime # > > > import pandas as pd # > > > import pytz # > > > t = datetime . datetime ( 2015 , 11 , 1 , 1 , 30) # > > > ts = pd . Series ( [ t ] ) # > > > tz = pytz . timezone ( ' America / New _ York ' ) # > > > ts . dt . tz _ localize ( tz , ambiguous = True ) # 0 2015-11-01 01:30:00-04:00 # dtype : datetime64 [ ns , America / New _ York ] # > > > ts . dt . tz _ localize ( tz , ambiguous = False ) # 0 2015-11-01 01:30:00-05:00 # dtype : datetime64 [ ns , America / New _ York ] # > > > str ( tz . localize ( t ) ) # '2015-11-01 01:30:00-05:00' tz = timezone or _get_local_timezone ( ) return s . dt . tz_localize ( tz , ambiguous = False ) . dt . tz_convert ( 'UTC' ) elif is_datetime64tz_dtype ( s . dtype ) : return s . dt . tz_convert ( 'UTC' ) else : return s
def select ( self , key , where = None , start = None , stop = None , columns = None , iterator = False , chunksize = None , auto_close = False , ** kwargs ) : """Retrieve pandas object stored in file , optionally based on where criteria Parameters key : object where : list of Term ( or convertible ) objects , optional start : integer ( defaults to None ) , row number to start selection stop : integer ( defaults to None ) , row number to stop selection columns : a list of columns that if not None , will limit the return columns iterator : boolean , return an iterator , default False chunksize : nrows to include in iteration , return an iterator auto _ close : boolean , should automatically close the store when finished , default is False Returns The selected object"""
group = self . get_node ( key ) if group is None : raise KeyError ( 'No object named {key} in the file' . format ( key = key ) ) # create the storer and axes where = _ensure_term ( where , scope_level = 1 ) s = self . _create_storer ( group ) s . infer_axes ( ) # function to call on iteration def func ( _start , _stop , _where ) : return s . read ( start = _start , stop = _stop , where = _where , columns = columns ) # create the iterator it = TableIterator ( self , s , func , where = where , nrows = s . nrows , start = start , stop = stop , iterator = iterator , chunksize = chunksize , auto_close = auto_close ) return it . get_result ( )
def from_code_array ( self ) : """Replaces everything in pys _ file from code _ array"""
for key in self . _section2writer : self . pys_file . write ( key ) self . _section2writer [ key ] ( ) try : if self . pys_file . aborted : break except AttributeError : # pys _ file is not opened via fileio . BZAopen pass if config [ "font_save_enabled" ] : # Clean up fonts used info self . fonts_used = [ ]
def tuples2ids ( tuples , ids ) : """Update ` ids ` according to ` tuples ` , e . g . ( 3 , 0 , X ) , ( 4 , 0 , X ) . . ."""
for value in tuples : if value [ 0 ] == 6 and value [ 2 ] : ids = value [ 2 ] elif value [ 0 ] == 5 : ids [ : ] = [ ] elif value [ 0 ] == 4 and value [ 1 ] and value [ 1 ] not in ids : ids . append ( value [ 1 ] ) elif value [ 0 ] == 3 and value [ 1 ] and value [ 1 ] in ids : ids . remove ( value [ 1 ] ) return ids
def area ( p ) : """Area of a polygone : param p : list of the points taken in any orientation , p [ 0 ] can differ from p [ - 1] : returns : area : complexity : linear"""
A = 0 for i in range ( len ( p ) ) : A += p [ i - 1 ] [ 0 ] * p [ i ] [ 1 ] - p [ i ] [ 0 ] * p [ i - 1 ] [ 1 ] return A / 2.
def are_equal ( self , sp1 , sp2 ) : """True if there is some overlap in composition between the species Args : sp1 : First species . A dict of { specie / element : amt } as per the definition in Site and PeriodicSite . sp2 : Second species . A dict of { specie / element : amt } as per the definition in Site and PeriodicSite . Returns : True always"""
set1 = set ( sp1 . elements ) set2 = set ( sp2 . elements ) return set1 . issubset ( set2 ) or set2 . issubset ( set1 )
def check_metadata_link ( self , ds ) : '''Checks if metadata link is formed in a rational manner : param netCDF4 . Dataset ds : An open netCDF dataset'''
if not hasattr ( ds , u'metadata_link' ) : return msgs = [ ] meta_link = getattr ( ds , 'metadata_link' ) if 'http' not in meta_link : msgs . append ( 'Metadata URL should include http:// or https://' ) valid_link = ( len ( msgs ) == 0 ) return Result ( BaseCheck . LOW , valid_link , 'metadata_link_valid' , msgs )