signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def getUncleByBlock ( self , block_identifier , uncle_index ) : """` eth _ getUncleByBlockHashAndIndex ` ` eth _ getUncleByBlockNumberAndIndex `"""
method = select_method_for_block_identifier ( block_identifier , if_predefined = 'eth_getUncleByBlockNumberAndIndex' , if_hash = 'eth_getUncleByBlockHashAndIndex' , if_number = 'eth_getUncleByBlockNumberAndIndex' , ) result = self . web3 . manager . request_blocking ( method , [ block_identifier , uncle_index ] , ) if result is None : raise BlockNotFound ( f"Uncle at index: {uncle_index} of block with id: {block_identifier} not found." ) return result
def job ( self , job_id ) : """A Job resource contains information about a particular job identified by jobid . : param str job _ id : The job id : returns : API response object with JSON data : rtype : : py : class : ` yarn _ api _ client . base . Response `"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}' . format ( jobid = job_id ) return self . request ( path )
def remove ( self , iterable , data = None , index = 0 ) : """Remove an element from the trie Args iterable ( hashable ) : key used to find what is to be removed data ( object ) : data associated with the key index ( int ) : index of what is to me removed Returns : bool : True : if it was removed False : if it was not removed"""
if index == len ( iterable ) : if self . is_terminal : if data : self . data . remove ( data ) if len ( self . data ) == 0 : self . is_terminal = False else : self . data . clear ( ) self . is_terminal = False return True else : return False elif iterable [ index ] in self . children : return self . children [ iterable [ index ] ] . remove ( iterable , index = index + 1 , data = data ) else : return False
async def post ( self , public_key , coinid ) : """Writes content to blockchain Accepts : Query string args : - " public _ key " - str - " coin id " - str Request body arguments : - message ( signed dict as json ) : - " cus " ( content ) - str - " description " - str - " read _ access " ( price for read access ) - int - " write _ access " ( price for write access ) - int - signature Returns : - dictionary with following fields : - " owneraddr " - str - " description " - str - " read _ price " - int - " write _ price " - int Verified : True"""
logging . debug ( "[+] -- Post content debugging. " ) # if settings . SIGNATURE _ VERIFICATION : # super ( ) . verify ( ) # Define genesis variables if coinid in settings . bridges . keys ( ) : # Define bridge url owneraddr = self . account . validator [ coinid ] ( public_key ) # Define owner address logging . debug ( "\n\n Owner address" ) logging . debug ( coinid ) logging . debug ( owneraddr ) self . account . blockchain . setendpoint ( settings . bridges [ coinid ] ) else : self . set_status ( 400 ) self . write ( { "error" : 400 , "reason" : "Invalid coinid" } ) raise tornado . web . Finish # Check if account exists account = await self . account . getaccountdata ( public_key = public_key ) logging . debug ( "\n Users account " ) logging . debug ( account ) if "error" in account . keys ( ) : self . set_status ( account [ "error" ] ) self . write ( account ) raise tornado . web . Finish # Get message from request try : data = json . loads ( self . request . body ) except : self . set_status ( 400 ) self . write ( { "error" : 400 , "reason" : "Unexpected data format. JSON required" } ) raise tornado . web . Finish if isinstance ( data [ "message" ] , str ) : message = json . loads ( data [ "message" ] ) elif isinstance ( data [ "message" ] , dict ) : message = data [ "message" ] cus = message . get ( "cus" ) description = message . get ( "description" ) read_access = message . get ( "read_access" ) write_access = message . get ( "write_access" ) if sys . getsizeof ( cus ) > 1000000 : self . set_status ( 403 ) self . write ( { "error" : 400 , "reason" : "Exceeded the content size limit." } ) raise tornado . web . Finish # Set fee fee = await billing . upload_content_fee ( cus = cus , owneraddr = owneraddr , description = description ) if "error" in fee . keys ( ) : self . set_status ( fee [ "error" ] ) self . write ( fee ) raise tornado . web . Finish # Send request to bridge data = { "cus" : cus , "owneraddr" : owneraddr , "description" : description , "read_price" : read_access , "write_price" : write_access } response = await self . account . blockchain . makecid ( ** data ) logging . debug ( "\n Bridge makecid" ) logging . debug ( response ) if "error" in response . keys ( ) : self . set_status ( 400 ) self . write ( response ) raise tornado . web . Finish # Write cid to database db_content = await self . account . setuserscontent ( public_key = public_key , hash = response [ "cus_hash" ] , coinid = coinid , txid = response [ "result" ] [ "txid" ] , access = "content" ) logging . debug ( "\n Database content" ) logging . debug ( db_content ) response = { i : data [ i ] for i in data if i != "cus" } self . write ( response )
def updateFGDBfromSDE ( fgdb , sde , logger = None ) : global changes """fgdb : file geodatabase sde : sde geodatabase connection logger : agrc . logging . Logger ( optional ) returns : String [ ] - the list of errors Loops through the file geodatabase feature classes and looks for matches in the SDE database . If there is a match , it does a schema check and then updates the data ."""
def log ( msg ) : if logger : logger . logMsg ( msg ) else : print msg def updateData ( isTable ) : try : # validate that there was not a schema change arcpy . env . workspace = fgdb layer = sdeFC + '_Layer' if not isTable : arcpy . MakeFeatureLayer_management ( sdeFC , layer , '1 = 2' ) else : arcpy . MakeTableView_management ( sdeFC , layer , '1 = 2' ) try : arcpy . Append_management ( layer , f , 'TEST' ) log ( 'schema test passed' ) passed = True except arcpy . ExecuteError as e : if '000466' in e . message : log ( e . message ) msg = 'schema change detected' msg += '\n\n{0}' . format ( getFieldDifferences ( sdeFC , f ) ) errors . append ( '{}: {}' . format ( f , msg ) ) log ( msg ) passed = False return passed else : raise e arcpy . Delete_management ( layer ) log ( 'checking for changes...' ) if checkForChanges ( f , sdeFC , isTable ) and passed : log ( 'updating data...' ) arcpy . TruncateTable_management ( f ) # edit session required for data that participates in relationships editSession = arcpy . da . Editor ( fgdb ) editSession . startEditing ( False , False ) editSession . startOperation ( ) fields = [ fld . name for fld in arcpy . ListFields ( f ) ] fields = filter_fields ( fields ) if not isTable : fields . append ( 'SHAPE@' ) outputSR = arcpy . Describe ( f ) . spatialReference else : outputSR = None with arcpy . da . InsertCursor ( f , fields ) as icursor , arcpy . da . SearchCursor ( sdeFC , fields , sql_clause = ( None , 'ORDER BY OBJECTID' ) , spatial_reference = outputSR ) as cursor : for row in cursor : icursor . insertRow ( row ) editSession . stopOperation ( ) editSession . stopEditing ( True ) changes . append ( f . upper ( ) ) else : log ( 'no changes found' ) except : errors . append ( 'Error updating: {}' . format ( f ) ) if logger : logger . logError ( ) log ( '** Updating {} from {}' . format ( fgdb , sde ) ) errors = [ ] # loop through local feature classes arcpy . env . workspace = fgdb fcs = arcpy . ListFeatureClasses ( ) + arcpy . ListTables ( ) totalFcs = len ( fcs ) i = 0 for f in fcs : i = i + 1 log ( '{} of {} | {}' . format ( i , totalFcs , f ) ) found = False # search for match in stand - alone feature classes arcpy . env . workspace = sde matches = arcpy . ListFeatureClasses ( '*.{}' . format ( f ) ) + arcpy . ListTables ( '*.{}' . format ( f ) ) if matches is not None and len ( matches ) > 0 : match = matches [ 0 ] sdeFC = join ( sde , match ) found = True else : # search in feature datasets datasets = arcpy . ListDatasets ( ) if len ( datasets ) > 0 : # loop through datasets for ds in datasets : matches = arcpy . ListFeatureClasses ( '*.{}' . format ( f ) , None , ds ) if matches is not None and len ( matches ) > 0 : match = matches [ 0 ] sdeFC = join ( sde , match ) found = True break if not found : msg = 'no match found in sde' errors . append ( "{}: {}" . format ( f , msg ) ) log ( msg ) continue updateData ( arcpy . Describe ( join ( fgdb , f ) ) . datasetType == 'Table' ) return ( errors , changes )
def default_ccache_dir ( ) -> str : """: return : ccache directory for the current platform"""
# Share ccache across containers if 'CCACHE_DIR' in os . environ : ccache_dir = os . path . realpath ( os . environ [ 'CCACHE_DIR' ] ) try : os . makedirs ( ccache_dir , exist_ok = True ) return ccache_dir except PermissionError : logging . info ( 'Unable to make dirs at %s, falling back to local temp dir' , ccache_dir ) # In osx tmpdir is not mountable by default import platform if platform . system ( ) == 'Darwin' : ccache_dir = "/tmp/_mxnet_ccache" os . makedirs ( ccache_dir , exist_ok = True ) return ccache_dir return os . path . join ( os . path . expanduser ( "~" ) , ".ccache" )
def functions ( self , value ) : """Setter for * * self . _ _ functions * * attribute . : param value : Attribute value . : type value : tuple"""
if value is not None : assert type ( value ) is tuple , "'{0}' attribute: '{1}' type is not 'tuple'!" . format ( "functions" , value ) for element in value : assert type ( element ) is LibraryHook , "'{0}' attribute: '{1}' type is not 'LibraryHook'!" . format ( "functions" , element ) self . __functions = value
def generate_common_reg_log_config ( json_value ) : """Generate common logtail config from loaded json value : param json _ value : : return :"""
input_detail = copy . deepcopy ( json_value [ 'inputDetail' ] ) output_detail = json_value [ 'outputDetail' ] logSample = json_value . get ( 'logSample' , '' ) config_name = json_value [ 'configName' ] logstore_name = output_detail [ 'logstoreName' ] endpoint = output_detail . get ( 'endpoint' , '' ) log_path = input_detail [ 'logPath' ] file_pattern = input_detail [ 'filePattern' ] time_format = input_detail [ 'timeFormat' ] log_begin_regex = input_detail . get ( 'logBeginRegex' , '' ) log_parse_regex = input_detail . get ( 'regex' , '' ) reg_keys = input_detail [ 'key' ] topic_format = input_detail [ 'topicFormat' ] filter_keys = input_detail [ 'filterKey' ] filter_keys_reg = input_detail [ 'filterRegex' ] log_type = input_detail . get ( 'logType' ) for item in ( 'logPath' , 'filePattern' , 'timeFormat' , 'logBeginRegex' , 'regex' , 'key' , 'topicFormat' , 'filterKey' , 'filterRegex' , 'logType' ) : if item in input_detail : del input_detail [ item ] config = CommonRegLogConfigDetail ( config_name , logstore_name , endpoint , log_path , file_pattern , time_format , log_begin_regex , log_parse_regex , reg_keys , topic_format , filter_keys , filter_keys_reg , logSample , log_type , ** input_detail ) return config
async def _read_next ( self ) : """Read next row"""
row = await self . _result . _read_rowdata_packet_unbuffered ( ) row = self . _conv_row ( row ) return row
def add_point_feature ( self , resnum , feat_type = None , feat_id = None , qualifiers = None ) : """Add a feature to the features list describing a single residue . Args : resnum ( int ) : Protein sequence residue number feat _ type ( str , optional ) : Optional description of the feature type ( ie . ' catalytic residue ' ) feat _ id ( str , optional ) : Optional ID of the feature type ( ie . ' TM1 ' )"""
if self . feature_file : raise ValueError ( 'Feature file associated with sequence, please remove file association to append ' 'additional features.' ) if not feat_type : feat_type = 'Manually added protein sequence single residue feature' newfeat = SeqFeature ( location = FeatureLocation ( ExactPosition ( resnum - 1 ) , ExactPosition ( resnum ) ) , type = feat_type , id = feat_id , qualifiers = qualifiers ) self . features . append ( newfeat )
def run ( self ) : """run the model"""
model = self . model configfile = self . configfile interval = self . interval sockets = self . sockets model . initialize ( configfile ) if model . state == 'pause' : logger . info ( "model initialized and started in pause mode, waiting for requests" ) else : logger . info ( "model started and initialized, running" ) if self . tracker : self . register ( ) atexit . register ( self . unregister ) self . process_incoming ( ) # Keep on counting indefinitely counter = itertools . count ( ) logger . info ( "Entering timeloop..." ) for i in counter : while model . state == "pause" : # keep waiting for messages when paused # process _ incoming should set model . state to play self . process_incoming ( ) else : # otherwise process messages once and continue self . process_incoming ( ) if model . state == "quit" : break # lookup dt or use - 1 ( default ) dt = model . get_time_step ( ) or - 1 model . update ( dt ) # check counter , if not a multiple of interval , skip this step if i % interval : continue for key in self . output_vars : value = model . get_var ( key ) metadata = { 'name' : key , 'iteration' : i } # 4ms for 1M doubles logger . debug ( "sending {}" . format ( metadata ) ) if 'pub' in sockets : send_array ( sockets [ 'pub' ] , value , metadata = metadata ) logger . info ( "Finalizing..." ) model . finalize ( )
async def create_session ( self , ** kwargs ) : """Creates an : class : ` aiohttp . ClientSession ` Override this or call it with ` ` kwargs ` ` to use other : mod : ` aiohttp ` functionality not covered by : class : ` ~ . InfluxDBClient . _ _ init _ _ `"""
self . opts . update ( kwargs ) self . _session = aiohttp . ClientSession ( ** self . opts , loop = self . _loop ) if self . redis_opts : if aioredis : self . _redis = await aioredis . create_redis ( ** self . redis_opts , loop = self . _loop ) else : warnings . warn ( no_redis_warning )
def validate_files ( directory , files_to_merge ) : """Asserts that the given files exist . files _ to _ merge is a list of file names ( no directories ) . directory is the directory ( a path object from path . py ) in which the files should appear . raises an Exception if any of the files are not in dir ."""
for file_path in files_to_merge : pathname = directory . joinpath ( file_path ) if not pathname . exists ( ) : raise Exception ( "I18N: Cannot generate because file not found: {0}" . format ( pathname ) ) # clean sources clean_pofile ( pathname )
def iter_options ( self ) : """Iterates configuration sections groups options ."""
for section in self . sections : name = str ( section ) for key , value in section . _get_options ( ) : yield name , key , value
def peak_memory_usage ( ) : """Return peak memory usage in MB"""
if sys . platform . startswith ( 'win' ) : p = psutil . Process ( ) return p . memory_info ( ) . peak_wset / 1024 / 1024 mem = resource . getrusage ( resource . RUSAGE_SELF ) . ru_maxrss factor_mb = 1 / 1024 if sys . platform == 'darwin' : factor_mb = 1 / ( 1024 * 1024 ) return mem * factor_mb
def alt_parser ( patterns ) : """This parser is able to handle multiple different patterns finding stuff in text - - while removing matches that overlap ."""
from reparse . util import remove_lower_overlapping get_first = lambda items : [ i [ 0 ] for i in items ] get_second = lambda items : [ i [ 1 ] for i in items ] def parse ( line ) : output = [ ] for pattern in patterns : results = pattern . scan ( line ) if results and any ( results ) : output . append ( ( pattern . order , results ) ) return get_first ( reduce ( remove_lower_overlapping , get_second ( sorted ( output ) ) , [ ] ) ) return parse
def as_requirement ( self ) : """Return a ` ` Requirement ` ` that matches this distribution exactly"""
if isinstance ( self . parsed_version , packaging . version . Version ) : spec = "%s==%s" % ( self . project_name , self . parsed_version ) else : spec = "%s===%s" % ( self . project_name , self . parsed_version ) return Requirement . parse ( spec )
def error ( self , s , pos ) : """Show text and a caret under that . For example : x = 2y + z"""
print ( "Lexical error:" ) print ( "%s" % s [ : pos + 10 ] ) # + 10 for trailing context print ( "%s^" % ( " " * ( pos - 1 ) ) ) for t in self . rv : print ( t ) raise SystemExit
def explore ( self ) : """INTERACTIVE exploration source capabilities . Will use sitemap URI taken either from explicit self . sitemap _ name or derived from the mappings supplied ."""
# Where do we start ? Build options in starts which has entries # that are a pair comprised of the uri and a list of acceptable # capabilities starts = [ ] if ( self . sitemap_name is not None ) : print ( "Starting from explicit --sitemap %s" % ( self . sitemap_name ) ) starts . append ( XResource ( self . sitemap_name ) ) elif ( len ( self . mapper ) > 0 ) : uri = self . mapper . default_src_uri ( ) ( scheme , netloc , path , params , query , fragment ) = urlparse ( uri ) if ( not scheme and not netloc ) : if ( os . path . isdir ( path ) ) : # have a dir , look for ' likely ' file names print ( "Looking for capability documents in local directory %s" % ( path ) ) for name in [ 'resourcesync' , 'capabilities.xml' , 'resourcelist.xml' , 'changelist.xml' ] : file = os . path . join ( path , name ) if ( os . path . isfile ( file ) ) : starts . append ( XResource ( file ) ) if ( len ( starts ) == 0 ) : raise ClientFatalError ( "No likely capability files found in local directory %s" % ( path ) ) else : # local file , might be anything ( or not exist ) print ( "Starting from local file %s" % ( path ) ) starts . append ( XResource ( path ) ) else : # remote , can ' t tell whether we have a sitemap or a server name or something # else , build list of options depending on whether there is a path and whether # there is an extension / name well_known = urlunparse ( [ scheme , netloc , '/.well-known/resourcesync' , '' , '' , '' ] ) if ( not path ) : # root , just look for . well - known starts . append ( XResource ( well_known , [ 'capabilitylist' , 'capabilitylistindex' ] ) ) else : starts . append ( XResource ( uri ) ) starts . append ( XResource ( well_known , [ 'capabilitylist' , 'capabilitylistindex' ] ) ) print ( "Looking for discovery information based on mappings" ) else : raise ClientFatalError ( "No source information (server base uri or capability uri) specified, use -h for help" ) # Have list of one or more possible starting point , try them in turn try : for start in starts : # For each starting point we create a fresh history history = [ start ] input = None while ( len ( history ) > 0 ) : print ( ) xr = history . pop ( ) new_xr = self . explore_uri ( xr , len ( history ) > 0 ) if ( new_xr ) : # Add current and new to history history . append ( xr ) history . append ( new_xr ) except ExplorerQuit : pass # expected way to exit print ( "\nresync-explorer done, bye...\n" )
def outputs ( self ) : """List of layers containing outputs from the IF . : returns : A list of vector layers . : rtype : list"""
outputs = self . _outputs ( ) if len ( outputs ) != len ( self . _output_layer_expected ) : # This will never happen in production . # Travis will fail before . # If this happen , it ' s an error from InaSAFE core developers . raise Exception ( 'The computed count of output layers is wrong. It should be ' '{expected} but the count is {count}.' . format ( expected = len ( self . _output_layer_expected ) , count = len ( outputs ) ) ) return outputs
def fmap ( self , f : Callable [ [ T ] , B ] ) -> 'List[B]' : """doufo . List . fmap : map ` List ` Args : ` self ` : ` f ` ( ` Callable [ [ T ] , B ] ` ) : any callable funtion Returns : return ( ` List [ B ] ` ) : A ` List ` of objected from ` f ` . Raises :"""
return List ( [ f ( x ) for x in self . unbox ( ) ] )
def connect ( self , ** kwargs ) : """Connect to Google Compute Engine ."""
try : self . gce = get_driver ( Provider . GCE ) ( self . user_id , self . key , project = self . project , ** kwargs ) except : raise ComputeEngineManagerException ( "Unable to connect to Google Compute Engine." )
def is_isolated ( self , p_id ) : """Returns True iff the given node has no incoming or outgoing edges ."""
return ( len ( self . incoming_neighbors ( p_id ) ) == 0 and len ( self . outgoing_neighbors ( p_id ) ) == 0 )
def p_poke2 ( p ) : """statement : POKE numbertype expr COMMA expr | POKE LP numbertype expr COMMA expr RP"""
i = 2 if isinstance ( p [ 2 ] , Symbol ) or p [ 2 ] is None else 3 if p [ i + 1 ] is None or p [ i + 3 ] is None : p [ 0 ] = None return p [ 0 ] = make_sentence ( 'POKE' , make_typecast ( TYPE . uinteger , p [ i + 1 ] , p . lineno ( i + 2 ) ) , make_typecast ( p [ i ] , p [ i + 3 ] , p . lineno ( i + 3 ) ) )
def _ref ( pname , conf = None , configurable = None , cname = None , path = None , history = 0 ) : """Resolve a parameter value . : param Configuration conf : configuration to use . : param str pname : parameter name . : param Configurable configurable : configurable . : param str cname : category name . : param str path : conf path . : param int history : parameter history research . : return : parameter . : raises : ParserError if conf and configurable are None ."""
result = None if configurable is not None : kwargs = { } if conf is not None : kwargs [ 'conf' ] = conf if path is not None : kwargs [ 'paths' ] = path if conf is None : conf = configurable . getconf ( ** kwargs ) if conf is None : raise ParserError ( 'Wrong ref parameters. Conf and configurable are both None.' ) result = conf . param ( pname = pname , cname = cname , history = history ) return result
def access ( path , mode ) : '''. . versionadded : : 2014.1.0 Test whether the Salt process has the specified access to the file . One of the following modes must be specified : . . code - block : : text f : Test the existence of the path r : Test the readability of the path w : Test the writability of the path x : Test whether the path can be executed CLI Example : . . code - block : : bash salt ' * ' file . access / path / to / file f salt ' * ' file . access / path / to / file x'''
path = os . path . expanduser ( path ) if not os . path . isabs ( path ) : raise SaltInvocationError ( 'Path to link must be absolute.' ) modes = { 'f' : os . F_OK , 'r' : os . R_OK , 'w' : os . W_OK , 'x' : os . X_OK } if mode in modes : return os . access ( path , modes [ mode ] ) elif mode in six . itervalues ( modes ) : return os . access ( path , mode ) else : raise SaltInvocationError ( 'Invalid mode specified.' )
def add_view ( self , request , form_url = '' , extra_context = None ) : """The ` ` add ` ` admin view for the : class : ` Page < pages . models . Page > ` ."""
extra_context = { 'language' : get_language_from_request ( request ) , 'page_languages' : settings . PAGE_LANGUAGES , } return super ( PageAdmin , self ) . add_view ( request , form_url , extra_context )
def _parse_weights ( weight_args , default_weight = 0.6 ) : """Parse list of weight assignments ."""
weights_dict = { } r_group_weight = default_weight for weight_arg in weight_args : for weight_assignment in weight_arg . split ( ',' ) : if '=' not in weight_assignment : raise ValueError ( 'Invalid weight assignment: {}' . format ( weight_assignment ) ) key , value = weight_assignment . split ( '=' , 1 ) value = float ( value ) if key == 'R' : r_group_weight = value elif key == '*' : default_weight = value elif hasattr ( Atom , key ) : weights_dict [ Atom ( key ) ] = value else : raise ValueError ( 'Invalid element: {}' . format ( key ) ) return weights_dict , r_group_weight , default_weight
def process_file ( self , filename ) : """Processing one file ."""
if self . config . dry_run : if not self . config . internal : self . logger . info ( "Dry run mode for script %s" , filename ) with open ( filename ) as handle : for line in handle : yield line [ 0 : - 1 ] if line [ - 1 ] == '\n' else line else : if not self . config . internal : self . logger . info ( "Running script %s" , filename ) for line in self . process_script ( filename ) : yield line
def evalSamples ( self , x ) : '''Evalautes the samples of quantity of interest and its gradient ( if supplied ) at the given values of the design variables : param iterable x : values of the design variables , this is passed as the first argument to the function fqoi : return : ( values of the quantity of interest , values of the gradient ) : rtype : Tuple'''
# Make sure dimensions are correct # u _ sample _ dimensions = self . _ processDimensions ( ) self . _N_dv = len ( _makeIter ( x ) ) if self . verbose : print ( 'Evaluating surrogate' ) if self . surrogate is None : def fqoi ( u ) : return self . fqoi ( x , u ) def fgrad ( u ) : return self . jac ( x , u ) jac = self . jac else : fqoi , fgrad , surr_jac = self . _makeSurrogates ( x ) jac = surr_jac u_samples = self . _getParameterSamples ( ) if self . verbose : print ( 'Evaluating quantity of interest at samples' ) q_samples , grad_samples = self . _evalSamples ( u_samples , fqoi , fgrad , jac ) return q_samples , grad_samples
def terminal_attribute_iterator ( self , mapped_class = None , key = None ) : """Returns an iterator over all terminal mapped attributes for the given mapped class and attribute key . See : method : ` get _ attribute _ map ` for details ."""
for attr in self . _attribute_iterator ( mapped_class , key ) : if attr . kind == RESOURCE_ATTRIBUTE_KINDS . TERMINAL : yield attr
def parse ( self , fo ) : """Convert ChIPMunk output to motifs Parameters fo : file - like File object containing ChIPMunk output . Returns motifs : list List of Motif instances ."""
# KDIC | 6.124756232026243 # A | 517.99999 42.99999 345.999994 25.999996 602.99999 155.999997 2.999996 91.99999 # C | 5.99999 4.99999 2.999996 956.99999 91.99999 17.999996 22.999996 275.999994 # G | 340.999994 943.99999 630.99999 6.99999 16.999996 48.99999 960.99999 14.999998 # T | 134.999997 7.99999 19.999996 9.999998 287.999994 776.99999 12.999998 616.99999 # N | 999.999998 line = fo . readline ( ) if not line : return [ ] while not line . startswith ( "A|" ) : line = fo . readline ( ) matrix = [ ] for _ in range ( 4 ) : matrix . append ( [ float ( x ) for x in line . strip ( ) . split ( "|" ) [ 1 ] . split ( " " ) ] ) line = fo . readline ( ) # print matrix matrix = [ [ matrix [ x ] [ y ] for x in range ( 4 ) ] for y in range ( len ( matrix [ 0 ] ) ) ] # print matrix m = Motif ( matrix ) m . id = "ChIPMunk_w%s" % len ( m ) return [ m ]
def import_image ( DryRun = None , Description = None , DiskContainers = None , LicenseType = None , Hypervisor = None , Architecture = None , Platform = None , ClientData = None , ClientToken = None , RoleName = None ) : """Import single or multi - volume disk images or EBS snapshots into an Amazon Machine Image ( AMI ) . For more information , see Importing a VM as an Image Using VM Import / Export in the VM Import / Export User Guide . See also : AWS API Documentation : example : response = client . import _ image ( DryRun = True | False , Description = ' string ' , DiskContainers = [ ' Description ' : ' string ' , ' Format ' : ' string ' , ' Url ' : ' string ' , ' UserBucket ' : { ' S3Bucket ' : ' string ' , ' S3Key ' : ' string ' ' DeviceName ' : ' string ' , ' SnapshotId ' : ' string ' LicenseType = ' string ' , Hypervisor = ' string ' , Architecture = ' string ' , Platform = ' string ' , ClientData = { ' UploadStart ' : datetime ( 2015 , 1 , 1 ) , ' UploadEnd ' : datetime ( 2015 , 1 , 1 ) , ' UploadSize ' : 123.0, ' Comment ' : ' string ' ClientToken = ' string ' , RoleName = ' string ' : type DryRun : boolean : param DryRun : Checks whether you have the required permissions for the action , without actually making the request , and provides an error response . If you have the required permissions , the error response is DryRunOperation . Otherwise , it is UnauthorizedOperation . : type Description : string : param Description : A description string for the import image task . : type DiskContainers : list : param DiskContainers : Information about the disk containers . ( dict ) - - Describes the disk container object for an import image task . Description ( string ) - - The description of the disk image . Format ( string ) - - The format of the disk image being imported . Valid values : RAW | VHD | VMDK | OVA Url ( string ) - - The URL to the Amazon S3 - based disk image being imported . The URL can either be a https URL ( https : / / . . ) or an Amazon S3 URL ( s3 : / / . . ) UserBucket ( dict ) - - The S3 bucket for the disk image . S3Bucket ( string ) - - The name of the S3 bucket where the disk image is located . S3Key ( string ) - - The file name of the disk image . DeviceName ( string ) - - The block device mapping for the disk . SnapshotId ( string ) - - The ID of the EBS snapshot to be used for importing the snapshot . : type LicenseType : string : param LicenseType : The license type to be used for the Amazon Machine Image ( AMI ) after importing . Note : You may only use BYOL if you have existing licenses with rights to use these licenses in a third party cloud like AWS . For more information , see Prerequisites in the VM Import / Export User Guide . Valid values : AWS | BYOL : type Hypervisor : string : param Hypervisor : The target hypervisor platform . Valid values : xen : type Architecture : string : param Architecture : The architecture of the virtual machine . Valid values : i386 | x86_64 : type Platform : string : param Platform : The operating system of the virtual machine . Valid values : Windows | Linux : type ClientData : dict : param ClientData : The client - specific data . UploadStart ( datetime ) - - The time that the disk upload starts . UploadEnd ( datetime ) - - The time that the disk upload ends . UploadSize ( float ) - - The size of the uploaded disk image , in GiB . Comment ( string ) - - A user - defined comment about the disk upload . : type ClientToken : string : param ClientToken : The token to enable idempotency for VM import requests . : type RoleName : string : param RoleName : The name of the role to use when not using the default role , ' vmimport ' . : rtype : dict : return : { ' ImportTaskId ' : ' string ' , ' Architecture ' : ' string ' , ' LicenseType ' : ' string ' , ' Platform ' : ' string ' , ' Hypervisor ' : ' string ' , ' Description ' : ' string ' , ' SnapshotDetails ' : [ ' DiskImageSize ' : 123.0, ' Description ' : ' string ' , ' Format ' : ' string ' , ' Url ' : ' string ' , ' UserBucket ' : { ' S3Bucket ' : ' string ' , ' S3Key ' : ' string ' ' DeviceName ' : ' string ' , ' SnapshotId ' : ' string ' , ' Progress ' : ' string ' , ' StatusMessage ' : ' string ' , ' Status ' : ' string ' ' ImageId ' : ' string ' , ' Progress ' : ' string ' , ' StatusMessage ' : ' string ' , ' Status ' : ' string '"""
pass
def fetch_path ( self , name ) : """Fetch contents from the path retrieved via lookup _ path . No caching will be done ."""
with codecs . open ( self . lookup_path ( name ) , encoding = 'utf-8' ) as fd : return fd . read ( )
def grab_xml ( host , token = None ) : """Grab XML data from Gateway , returned as a dict ."""
urllib3 . disable_warnings ( ) if token : scheme = "https" if not token : scheme = "http" token = "1234567890" url = ( scheme + '://' + host + '/gwr/gop.php?cmd=GWRBatch&data=<gwrcmds><gwrcmd><gcmd>RoomGetCarousel</gcmd><gdata><gip><version>1</version><token>' + token + '</token><fields>name,status</fields></gip></gdata></gwrcmd></gwrcmds>&fmt=xml' ) response = requests . get ( url , verify = False ) parsed = xmltodict . parse ( response . content , force_list = { 'room' , 'device' } ) parsed = parsed [ 'gwrcmds' ] [ 'gwrcmd' ] [ 'gdata' ] [ 'gip' ] [ 'room' ] return parsed
def _serialize_icons ( icons ) : """Serialize [ IndividualConstraints ] into the SimpleMRS encoding ."""
toks = [ 'ICONS:' , '<' ] for ic in icons : toks . extend ( ic ) # toks + = [ str ( icon . left ) , # icon . relation , # str ( icon . right ) ] toks += [ '>' ] return ' ' . join ( toks )
def insert_concurrent_execution_histories ( self , parent , concurrent_execution_histories ) : """Adds the child execution histories of a concurrency state . : param Gtk . TreeItem parent : the parent to add the next history item to : param list [ ExecutionHistory ] concurrent _ execution _ histories : a list of all child execution histories : return :"""
for execution_history in concurrent_execution_histories : if len ( execution_history ) >= 1 : first_history_item = execution_history [ 0 ] # this is just a dummy item to have an extra parent for each branch # gives better overview in case that one of the child state is a simple execution state tree_item = self . insert_history_item ( parent , first_history_item , "Concurrency Branch" , dummy = True ) self . insert_execution_history ( tree_item , execution_history )
def _calculate_matches_closures ( groups ) : """Find the transitive closure of each unique identity . This function uses a BFS algorithm to build set of matches . For instance , given a list of matched unique identities like A = { A , B } ; B = { B , A , C } , C = { C , } and D = { D , } the output will be A = { A , B , C } and D = { D , } . : param groups : groups of unique identities"""
matches = [ ] ns = sorted ( groups . groups . keys ( ) ) while ns : n = ns . pop ( 0 ) visited = [ n ] vs = [ v for v in groups . get_group ( n ) [ 'uuid_y' ] ] while vs : v = vs . pop ( 0 ) if v in visited : continue nvs = [ nv for nv in groups . get_group ( v ) [ 'uuid_y' ] ] vs += nvs visited . append ( v ) try : ns . remove ( v ) except : pass matches . append ( visited ) return matches
def setCoords ( self , x1 , y1 , x2 , y2 ) : """Set coordinates of window to run from ( x1 , y1 ) in the lower - left corner to ( x2 , y2 ) in the upper - right corner ."""
self . trans = Transform ( self . size [ 0 ] , self . size [ 1 ] , x1 , y1 , x2 , y2 )
def phred_13_to_18_sed ( self , new_path = None , in_place = True ) : """Illumina - 1.3 format conversion to Illumina - 1.8 format via sed ( faster ) ."""
# String # sed_command = r"""4~4y/@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghi/!"#$%&'\''()*+,-.\/0123456789:;<=>?@ABCDEFGHIJ/""" # Faster with bash utilities # if in_place is True : sh . sed ( '-i' , sed_command , self . path ) return self # New file # if new_path is None : new_fastq = self . __class__ ( new_temp_path ( ) ) else : new_fastq = self . __class__ ( new_path ) sh . sed ( sed_command + " " + new_fastq , self . path ) return new_fastq
def p_dynamic_class_name_reference ( p ) : '''dynamic _ class _ name _ reference : base _ variable OBJECT _ OPERATOR object _ property dynamic _ class _ name _ variable _ properties | base _ variable'''
if len ( p ) == 5 : name , dims = p [ 3 ] p [ 0 ] = ast . ObjectProperty ( p [ 1 ] , name , lineno = p . lineno ( 2 ) ) for class_ , dim , lineno in dims : p [ 0 ] = class_ ( p [ 0 ] , dim , lineno = lineno ) for name , dims in p [ 4 ] : p [ 0 ] = ast . ObjectProperty ( p [ 0 ] , name , lineno = p . lineno ( 2 ) ) for class_ , dim , lineno in dims : p [ 0 ] = class_ ( p [ 0 ] , dim , lineno = lineno ) else : p [ 0 ] = p [ 1 ]
def verify_credentials ( self , delegate = None ) : "Verify a user ' s credentials ."
parser = txml . Users ( delegate ) return self . __downloadPage ( '/account/verify_credentials.xml' , parser )
async def copy ( self , key_source , storage_dest , key_dest ) : """Return True if data are copied * optimized for http - > fs copy * not supported return _ status"""
from aioworkers . storage . filesystem import FileSystemStorage if not isinstance ( storage_dest , FileSystemStorage ) : return super ( ) . copy ( key_source , storage_dest , key_dest ) url = self . raw_key ( key_source ) logger = self . context . logger async with self . _semaphore : async with self . session . get ( url ) as response : if response . status == 404 : return elif response . status >= 400 : if logger . getEffectiveLevel ( ) == logging . DEBUG : logger . debug ( 'HttpStorage request to %s ' 'returned code %s:\n%s' % ( url , response . status , ( await response . read ( ) ) . decode ( ) ) ) return async with storage_dest . raw_key ( key_dest ) . open ( 'wb' ) as f : async for chunk in response . content . iter_any ( ) : await f . write ( chunk ) return True
def validate ( self , value ) : """validate"""
# obj can be None or a DataFrame if value is None : return True else : try : with value . open ( ) as hdulist : self . validate_hdulist ( hdulist ) except Exception : _type , exc , tb = sys . exc_info ( ) six . reraise ( ValidationError , exc , tb )
def installProductOn ( self , userstore ) : """Creates an Installation in this user store for our collection of powerups , and then install those powerups on the user ' s store ."""
def install ( ) : i = Installation ( store = userstore ) i . types = self . types i . install ( ) userstore . transact ( install )
def annotate ( results , settings ) : '''Concatenate the annotations of all checkers'''
annotations = ( generate_annotation ( result , setting ) for result , setting in zip ( results , settings ) ) return '\n' . join ( annot for annot in annotations if annot )
def detachAcceptMsOriginating ( ) : """DETACH ACCEPT Section 9.4.6.2"""
a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x6 ) # 00000110 c = ForceToStandbyAndSpareHalfOctets ( ) packet = a / b / c return packet
def awaitAnyTermination ( self , timeout = None ) : """Wait until any of the queries on the associated SQLContext has terminated since the creation of the context , or since : func : ` resetTerminated ( ) ` was called . If any query was terminated with an exception , then the exception will be thrown . If ` timeout ` is set , it returns whether the query has terminated or not within the ` timeout ` seconds . If a query has terminated , then subsequent calls to : func : ` awaitAnyTermination ( ) ` will either return immediately ( if the query was terminated by : func : ` query . stop ( ) ` ) , or throw the exception immediately ( if the query was terminated with exception ) . Use : func : ` resetTerminated ( ) ` to clear past terminations and wait for new terminations . In the case where multiple queries have terminated since : func : ` resetTermination ( ) ` was called , if any query has terminated with exception , then : func : ` awaitAnyTermination ( ) ` will throw any of the exception . For correctly documenting exceptions across multiple queries , users need to stop all of them after any of them terminates with exception , and then check the ` query . exception ( ) ` for each query . throws : class : ` StreamingQueryException ` , if ` this ` query has terminated with an exception"""
if timeout is not None : if not isinstance ( timeout , ( int , float ) ) or timeout < 0 : raise ValueError ( "timeout must be a positive integer or float. Got %s" % timeout ) return self . _jsqm . awaitAnyTermination ( int ( timeout * 1000 ) ) else : return self . _jsqm . awaitAnyTermination ( )
def split_label_fuzzy ( self , label ) : """Splits a label entered as user input . It ' s more flexible in it ' s syntax parsing than the L { split _ label _ strict } method , as it allows the exclamation mark ( B { C { ! } } ) to be omitted . The ambiguity is resolved by searching the modules in the snapshot to guess if a label refers to a module or a function . It also tries to rebuild labels when they contain hardcoded addresses . @ warning : This method only parses the label , it doesn ' t make sure the label actually points to a valid memory location . @ type label : str @ param label : Label to split . @ rtype : tuple ( str or None , str or int or None , int or None ) @ return : Tuple containing the C { module } name , the C { function } name or ordinal , and the C { offset } value . If the label doesn ' t specify a module , then C { module } is C { None } . If the label doesn ' t specify a function , then C { function } is C { None } . If the label doesn ' t specify an offset , then C { offset } is C { 0 } . @ raise ValueError : The label is malformed ."""
module = function = None offset = 0 # Special case : None if not label : label = compat . b ( "0x0" ) else : # Remove all blanks . label = label . replace ( compat . b ( ' ' ) , compat . b ( '' ) ) label = label . replace ( compat . b ( '\t' ) , compat . b ( '' ) ) label = label . replace ( compat . b ( '\r' ) , compat . b ( '' ) ) label = label . replace ( compat . b ( '\n' ) , compat . b ( '' ) ) # Special case : empty label . if not label : label = compat . b ( "0x0" ) # If an exclamation sign is present , we know we can parse it strictly . if compat . b ( '!' ) in label : return self . split_label_strict ( label ) # # # Try to parse it strictly , on error do it the fuzzy way . # # try : # # return self . split _ label ( label ) # # except ValueError : # # pass # * + offset if compat . b ( '+' ) in label : try : prefix , offset = label . split ( compat . b ( '+' ) ) except ValueError : raise ValueError ( "Malformed label: %s" % label ) try : offset = HexInput . integer ( offset ) except ValueError : raise ValueError ( "Malformed label: %s" % label ) label = prefix # This parses both filenames and base addresses . modobj = self . get_module_by_name ( label ) if modobj : # module # module + offset module = modobj . get_name ( ) else : # TODO # If 0xAAAAA + 0xBBBBB is given , # A is interpreted as a module base address , # and B as an offset . # If that fails , it ' d be good to add A + B and try to # use the nearest loaded module . # offset # base address + offset ( when no module has that base address ) try : address = HexInput . integer ( label ) if offset : # If 0xAAAAA + 0xBBBBB is given , # A is interpreted as a module base address , # and B as an offset . # If that fails , we get here , meaning no module was found # at A . Then add up A + B and work with that as a hardcoded # address . offset = address + offset else : # If the label is a hardcoded address , we get here . offset = address # If only a hardcoded address is given , # rebuild the label using get _ label _ at _ address . # Then parse it again , but this time strictly , # both because there is no need for fuzzy syntax and # to prevent an infinite recursion if there ' s a bug here . try : new_label = self . get_label_at_address ( offset ) module , function , offset = self . split_label_strict ( new_label ) except ValueError : pass # function # function + offset except ValueError : function = label # Convert function ordinal strings into integers . if function and function . startswith ( compat . b ( '#' ) ) : try : function = HexInput . integer ( function [ 1 : ] ) except ValueError : pass # Convert null offsets to None . if not offset : offset = None return ( module , function , offset )
def write_command ( self , command : Command ) : '''Write a command to the stream . Args : command : The command . Coroutine .'''
_logger . debug ( 'Write command.' ) data = command . to_bytes ( ) yield from self . _connection . write ( data ) self . _data_event_dispatcher . notify_write ( data )
def setNotches ( self , notches ) : """Manually sets the notches list for this ruler to the inputed notches . : param notches | [ < str > , . . ] | | None"""
self . _rulerType = XChartRuler . Type . Custom self . _notches = notches
def has_successor ( self , graph , orig , dest , branch , turn , tick , * , forward = None ) : """Return whether an edge connects the origin to the destination at the given time . Doesn ' t require the edge ' s index , which makes it slower than retrieving a particular edge ."""
if forward is None : forward = self . db . _forward return dest in self . _get_destcache ( graph , orig , branch , turn , tick , forward = forward )
def handle ( self , * args , ** options ) : """With no arguments , find the first user in the system with the is _ superuser or is _ staff flag set to true , or just the first user in the system period . With a single argument , look for the user with that value as the USERNAME _ FIELD value . When a user is found , print out a URL slug you can paste into your browser to login as the user ."""
user_model = get_user_model ( ) if len ( args ) == 0 : # find the first superuser , or staff member or user filters = [ { "is_superuser" : True } , { "is_staff" : True } , { } ] user = None for f in filters : try : user = user_model . _default_manager . filter ( ** f ) . order_by ( "pk" ) . first ( ) if user : break except FieldError as e : pass if user is None : raise CommandError ( "No users found!" ) elif len ( args ) == 1 : # find the user with the USERNAME _ FIELD equal to the command line # argument try : user = user_model . _default_manager . get_by_natural_key ( args [ 0 ] ) except user_model . DoesNotExist as e : raise CommandError ( "The user does not exist" ) else : raise CommandError ( "You passed me too many arguments" ) signer = TimestampSigner ( ) signature = signer . sign ( str ( user . pk ) ) self . stdout . write ( reverse ( login , args = ( signature , ) ) )
def _addDPFiles ( self , * files ) : """callback to add DPs corresponding to files ."""
# quiet flag is always true self . new_entry_dialog . addDataProducts ( self . purrer . makeDataProducts ( [ ( file , True ) for file in files ] , unbanish = True , unignore = True ) )
def move ( self , node , destination , position = None , save = False ) : """Moves the given : class : ` CTENode ` ` node ` and places it as a child node of the ` destination ` : class : ` CTENode ` ( or makes it a root node if ` destination ` is ` ` None ` ` ) . Optionally , ` position ` can be a callable which is invoked prior to placement of the ` node ` with the ` node ` and the ` destination ` node as the sole two arguments ; this can be useful in implementing specific sibling ordering semantics . Optionally , if ` save ` is ` ` True ` ` , after the move operation completes ( after the : attr : ` CTENode . parent ` foreign key is updated and the ` position ` callable is called if present ) , a call to : meth : ` Model . save ` is made . : param destination : the destination node of this move , ` ` None ` ` denoting that the node will become a root node . : param position : optional callable invoked prior to placement for purposes of custom sibling ordering semantics . : param save : optional flag indicating whether this model ' s : meth : ` save ` method should be invoked after the move . : return : this node ."""
# Allow custom positioning semantics to specify the position before # setting the parent . if position is not None : position ( node , destination ) node . parent = destination if save : node . save ( ) return node
def list_group_maintainers ( self , name ) : """Get the maintainers of a group . Args : name ( string ) : Name of group to query . Returns : ( list [ string ] ) : List of maintainer names ."""
return self . service . list_group_maintainers ( name , self . url_prefix , self . auth , self . session , self . session_send_opts )
def _ReadString ( self , file_object , file_offset , data_type_map , description ) : """Reads a string . Args : file _ object ( FileIO ) : file - like object . file _ offset ( int ) : offset of the data relative from the start of the file - like object . data _ type _ map ( dtfabric . DataTypeMap ) : data type map of the string . description ( str ) : description of the string . Returns : object : structure values object . Raises : FileFormatError : if the string cannot be read . ValueError : if file - like object or date type map are invalid ."""
# pylint : disable = protected - access element_data_size = ( data_type_map . _element_data_type_definition . GetByteSize ( ) ) elements_terminator = ( data_type_map . _data_type_definition . elements_terminator ) byte_stream = [ ] element_data = file_object . read ( element_data_size ) byte_stream . append ( element_data ) while element_data and element_data != elements_terminator : element_data = file_object . read ( element_data_size ) byte_stream . append ( element_data ) byte_stream = b'' . join ( byte_stream ) return self . _ReadStructureFromByteStream ( byte_stream , file_offset , data_type_map , description )
def commit ( self ) : """Commit the current transaction . Make all statements executed since the transaction was begun permanent ."""
if hasattr ( self . local , 'tx' ) and self . local . tx : tx = self . local . tx . pop ( ) tx . commit ( ) self . _flush_tables ( )
def _redundant_stack_variable_removal ( self , function , data_graph ) : """If an argument passed from the stack ( i . e . dword ptr [ ebp + 4h ] ) is saved to a local variable on the stack at the beginning of the function , and this local variable was never modified anywhere in this function , and no pointer of any stack variable is saved in any register , then we can replace all references to this local variable to that argument instead . : param function : : param networkx . MultiDiGraph data _ graph : : return :"""
# check if there is any stack pointer being stored into any register other than esp # basically check all consumers of stack pointers stack_ptrs = [ ] sp_offset = self . project . arch . registers [ 'esp' ] [ 0 ] bp_offset = self . project . arch . registers [ 'ebp' ] [ 0 ] for n in data_graph . nodes ( ) : if isinstance ( n . variable , SimRegisterVariable ) and n . variable . reg in ( sp_offset , bp_offset ) : stack_ptrs . append ( n ) # for each stack pointer variable , make sure none of its consumers is a general purpose register for stack_ptr in stack_ptrs : out_edges = data_graph . out_edges ( stack_ptr , data = True ) for _ , dst , data in out_edges : if 'type' in data and data [ 'type' ] == 'kill' : # we don ' t care about killing edges continue if isinstance ( dst . variable , SimRegisterVariable ) and dst . variable . reg < 40 and dst . variable . reg not in ( sp_offset , bp_offset ) : # oops l . debug ( 'Function %s does not satisfy requirements of redundant stack variable removal.' , repr ( function ) ) return argument_variables = [ ] for n in data_graph . nodes ( ) : if isinstance ( n . variable , SimStackVariable ) and n . variable . base == 'bp' and n . variable . offset >= 0 : argument_variables . append ( n ) if not argument_variables : return # print function # print argument _ variables argument_to_local = { } argument_register_as_retval = set ( ) # for each argument , find its correspondence on the local stack frame for argument_variable in argument_variables : # is it copied to the stack ? successors0 = list ( data_graph . successors ( argument_variable ) ) if not successors0 : continue if len ( successors0 ) != 1 : continue if isinstance ( successors0 [ 0 ] . variable , SimRegisterVariable ) : # argument - > register - > stack out_edges = data_graph . out_edges ( successors0 [ 0 ] , data = True ) successors1 = [ s for _ , s , data in out_edges if 'type' not in data or data [ 'type' ] != 'kill' ] if len ( successors1 ) == 1 : successor1 = successors1 [ 0 ] if isinstance ( successor1 . variable , SimStackVariable ) : if ( successor1 . variable . base == 'sp' and successor1 . variable . offset > 0 ) or ( successor1 . variable . base == 'bp' and successor1 . variable . offset < 0 ) : # yes it ' s copied onto the stack ! argument_to_local [ argument_variable ] = successor1 # if the register is eax , and it ' s not killed later , it might be the return value of this function # in that case , we cannot eliminate the instruction that moves stack argument to that register if successors0 [ 0 ] . variable . reg == self . project . arch . registers [ 'eax' ] [ 0 ] : killers = [ s for _ , s , data in out_edges if 'type' in data and data [ 'type' ] == 'kill' ] if not killers : # it might be the return value argument_register_as_retval . add ( argument_variable ) else : # TODO : import ipdb ; ipdb . set_trace ( ) # import pprint # pprint . pprint ( argument _ to _ local , width = 160) # find local correspondence that are not modified throughout this function redundant_stack_variables = [ ] for argument , local_var in argument_to_local . items ( ) : # local _ var cannot be killed anywhere out_edges = data_graph . out_edges ( local_var , data = True ) consuming_locs = [ ] for _ , consumer , data in out_edges : consuming_locs . append ( consumer . location ) if 'type' in data and data [ 'type' ] == 'kill' : break else : # no killing edges . the value is not changed ! rsv = RedundantStackVariable ( argument , local_var , consuming_locs ) if argument in argument_register_as_retval : rsv . argument_register_as_retval = True redundant_stack_variables . append ( rsv ) self . redundant_stack_variables . extend ( redundant_stack_variables )
def start ( parallel , items , config , dirs = None , name = None , multiplier = 1 , max_multicore = None ) : """Start a parallel cluster or machines to be used for running remote functions . Returns a function used to process , in parallel items with a given function . Allows sharing of a single cluster across multiple functions with identical resource requirements . Uses local execution for non - distributed clusters or completed jobs . A checkpoint directory keeps track of finished tasks , avoiding spinning up clusters for sections that have been previous processed . multiplier - Number of expected jobs per initial input item . Used to avoid underscheduling cores when an item is split during processing . max _ multicore - - The maximum number of cores to use for each process . Can be used to process less multicore usage when jobs run faster on more single cores ."""
if name : checkpoint_dir = utils . safe_makedir ( os . path . join ( dirs [ "work" ] , "checkpoints_parallel" ) ) checkpoint_file = os . path . join ( checkpoint_dir , "%s.done" % name ) else : checkpoint_file = None sysinfo = system . get_info ( dirs , parallel , config . get ( "resources" , { } ) ) items = [ x for x in items if x is not None ] if items else [ ] max_multicore = int ( max_multicore or sysinfo . get ( "cores" , 1 ) ) parallel = resources . calculate ( parallel , items , sysinfo , config , multiplier = multiplier , max_multicore = max_multicore ) try : view = None if parallel [ "type" ] == "ipython" : if checkpoint_file and os . path . exists ( checkpoint_file ) : logger . info ( "Running locally instead of distributed -- checkpoint passed: %s" % name ) parallel [ "cores_per_job" ] = 1 parallel [ "num_jobs" ] = 1 parallel [ "checkpointed" ] = True yield multi . runner ( parallel , config ) else : from bcbio . distributed import ipython with ipython . create ( parallel , dirs , config ) as view : yield ipython . runner ( view , parallel , dirs , config ) else : yield multi . runner ( parallel , config ) except : if view is not None : from bcbio . distributed import ipython ipython . stop ( view ) raise else : for x in [ "cores_per_job" , "num_jobs" , "mem" ] : parallel . pop ( x , None ) if checkpoint_file : with open ( checkpoint_file , "w" ) as out_handle : out_handle . write ( "done\n" )
def _scrape_document ( self ) : '''Extract links from the DOM .'''
mock_response = self . _new_mock_response ( self . _response , self . _get_temp_path ( 'phantom' , '.html' ) ) self . _item_session . request = self . _request self . _item_session . response = mock_response self . _processing_rule . scrape_document ( item_session ) if mock_response . body : mock_response . body . close ( )
def get_dim_index ( js_dict , dim ) : """Get index from a given dimension . Args : js _ dict ( dict ) : dictionary containing dataset data and metadata . dim ( string ) : dimension name obtained from JSON file . Returns : dim _ index ( pandas . DataFrame ) : DataFrame with index - based dimension data ."""
try : dim_index = js_dict [ 'dimension' ] [ dim ] [ 'category' ] [ 'index' ] except KeyError : dim_label = get_dim_label ( js_dict , dim ) dim_index = pd . DataFrame ( list ( zip ( [ dim_label [ 'id' ] [ 0 ] ] , [ 0 ] ) ) , index = [ 0 ] , columns = [ 'id' , 'index' ] ) else : if type ( dim_index ) is list : dim_index = pd . DataFrame ( list ( zip ( dim_index , range ( 0 , len ( dim_index ) ) ) ) , index = dim_index , columns = [ 'id' , 'index' ] ) else : dim_index = pd . DataFrame ( list ( zip ( dim_index . keys ( ) , dim_index . values ( ) ) ) , index = dim_index . keys ( ) , columns = [ 'id' , 'index' ] ) dim_index = dim_index . sort_index ( by = 'index' ) return dim_index
def mode ( values , dropna = True ) : """Returns the mode ( s ) of an array . Parameters values : array - like Array over which to check for duplicate values . dropna : boolean , default True Don ' t consider counts of NaN / NaT . . . versionadded : : 0.24.0 Returns mode : Series"""
from pandas import Series values = _ensure_arraylike ( values ) original = values # categorical is a fast - path if is_categorical_dtype ( values ) : if isinstance ( values , Series ) : return Series ( values . values . mode ( dropna = dropna ) , name = values . name ) return values . mode ( dropna = dropna ) if dropna and is_datetimelike ( values ) : mask = values . isnull ( ) values = values [ ~ mask ] values , dtype , ndtype = _ensure_data ( values ) f = getattr ( htable , "mode_{dtype}" . format ( dtype = ndtype ) ) result = f ( values , dropna = dropna ) try : result = np . sort ( result ) except TypeError as e : warn ( "Unable to sort modes: {error}" . format ( error = e ) ) result = _reconstruct_data ( result , original . dtype , original ) return Series ( result )
def rebuild_auth ( self , prepared_request , response ) : """Never rebuild auth for archive . org URLs ."""
u = urlparse ( prepared_request . url ) if u . netloc . endswith ( 'archive.org' ) : return super ( ArchiveSession , self ) . rebuild_auth ( prepared_request , response )
def view_attr ( attr_name ) : """Creates a setter that will set the specified view attribute to the current value . @ param attr _ name : the name of an attribute belonging to the view . @ type attr _ name : str"""
def view_attr ( value , context , ** _params ) : setattr ( context [ "view" ] , attr_name , value ) return _attr ( ) return view_attr
def closest ( self , obj , group , defaults = True ) : """This method is designed to be called from the root of the tree . Given any LabelledData object , this method will return the most appropriate Options object , including inheritance . In addition , closest supports custom options by checking the object"""
components = ( obj . __class__ . __name__ , group_sanitizer ( obj . group ) , label_sanitizer ( obj . label ) ) target = '.' . join ( [ c for c in components if c ] ) return self . find ( components ) . options ( group , target = target , defaults = defaults )
def serial_wire_viewer ( jlink_serial , device ) : """Implements a Serial Wire Viewer ( SWV ) . A Serial Wire Viewer ( SWV ) allows us implement real - time logging of output from a connected device over Serial Wire Output ( SWO ) . Args : jlink _ serial ( str ) : the J - Link serial number device ( str ) : the target CPU Returns : Always returns ` ` 0 ` ` . Raises : JLinkException : on error"""
buf = StringIO . StringIO ( ) jlink = pylink . JLink ( log = buf . write , detailed_log = buf . write ) jlink . open ( serial_no = jlink_serial ) # Use Serial Wire Debug as the target interface . Need this in order to use # Serial Wire Output . jlink . set_tif ( pylink . enums . JLinkInterfaces . SWD ) jlink . connect ( device , verbose = True ) jlink . coresight_configure ( ) jlink . set_reset_strategy ( pylink . enums . JLinkResetStrategyCortexM3 . RESETPIN ) # Have to halt the CPU before getitng its speed . jlink . reset ( ) jlink . halt ( ) cpu_speed = jlink . cpu_speed ( ) swo_speed = jlink . swo_supported_speeds ( cpu_speed , 10 ) [ 0 ] # Start logging serial wire output . jlink . swo_start ( swo_speed ) jlink . swo_flush ( ) # Output the information about the program . sys . stdout . write ( 'Serial Wire Viewer\n' ) sys . stdout . write ( 'Press Ctrl-C to Exit\n' ) sys . stdout . write ( 'Reading data from port 0:\n\n' ) # Reset the core without halting so that it runs . jlink . reset ( ms = 10 , halt = False ) # Use the ` try ` loop to catch a keyboard interrupt in order to stop logging # serial wire output . try : while True : # Check for any bytes in the stream . num_bytes = jlink . swo_num_bytes ( ) if num_bytes == 0 : # If no bytes exist , sleep for a bit before trying again . time . sleep ( 1 ) continue data = jlink . swo_read_stimulus ( 0 , num_bytes ) sys . stdout . write ( '' . join ( map ( chr , data ) ) ) sys . stdout . flush ( ) except KeyboardInterrupt : pass sys . stdout . write ( '\n' ) # Stop logging serial wire output . jlink . swo_stop ( ) return 0
def remove ( cls , pid ) : """Remove a pool , closing all connections : param str pid : The pool ID"""
with cls . _lock : cls . _ensure_pool_exists ( pid ) cls . _pools [ pid ] . close ( ) del cls . _pools [ pid ]
def read_bytes ( self ) : """reading bytes ; update progress bar after 1 ms"""
global exit_flag for self . i in range ( 0 , self . length ) : self . bytes [ self . i ] = i_max [ self . i ] self . maxbytes [ self . i ] = total_chunks [ self . i ] self . progress [ self . i ] [ "maximum" ] = total_chunks [ self . i ] self . progress [ self . i ] [ "value" ] = self . bytes [ self . i ] self . str [ self . i ] . set ( file_name [ self . i ] + " " + str ( self . bytes [ self . i ] ) + "KB / " + str ( int ( self . maxbytes [ self . i ] + 1 ) ) + " KB" ) if exit_flag == self . length : exit_flag = 0 self . frame . destroy ( ) else : self . frame . after ( 10 , self . read_bytes )
def _symlink_in_files ( in_files , data ) : """Symlink ( shared filesystem ) or copy ( CWL ) inputs into align _ prep directory ."""
work_dir = utils . safe_makedir ( os . path . join ( data [ "dirs" ] [ "work" ] , "align_prep" ) ) out = [ ] for in_file in in_files : out_file = os . path . join ( work_dir , "%s_%s" % ( dd . get_sample_name ( data ) , os . path . basename ( in_file ) ) ) out_file = _symlink_or_copy_grabix ( in_file , out_file , data ) out . append ( out_file ) return out
def gcv ( data , channels = None ) : """Calculate the geometric CV of the events in an FCSData object . Parameters data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters ( aka channels ) . channels : int or str or list of int or list of str , optional Channels on which to calculate the statistic . If None , use all channels . Returns float or numpy array The geometric coefficient of variation of the events in the specified channels of ` data ` ."""
# Slice data to take statistics from if channels is None : data_stats = data else : data_stats = data [ : , channels ] # Calculate and return statistic return np . sqrt ( np . exp ( np . std ( np . log ( data_stats ) , axis = 0 ) ** 2 ) - 1 )
def get_logging_stream_handler ( logger = None , formatter = LOGGING_DEFAULT_FORMATTER ) : """Adds a logging stream handler to given logger or default logger using given file . : param logger : Logger to add the handler to . : type logger : Logger : param file : File to verbose into . : type file : unicode : param formatter : Handler formatter . : type formatter : Formatter : return : Added handler . : rtype : Handler"""
logger = LOGGER if logger is None else logger logging_stream_handler = logging . StreamHandler ( Streamer ( ) ) logging_stream_handler . setFormatter ( formatter ) logger . addHandler ( logging_stream_handler ) return logging_stream_handler
def compute_busiest_date ( feed : "Feed" , dates : List [ str ] ) -> str : """Given a list of dates , return the first date that has the maximum number of active trips . Notes Assume the following feed attributes are not ` ` None ` ` : - Those used in : func : ` compute _ trip _ activity `"""
f = feed . compute_trip_activity ( dates ) s = [ ( f [ c ] . sum ( ) , c ) for c in f . columns if c != "trip_id" ] return max ( s ) [ 1 ]
def from_file ( campaign_file , ** kwargs ) : """Load campaign from YAML file : return : memory representation of the YAML file : rtype : dictionary"""
realpath = osp . realpath ( campaign_file ) if osp . isdir ( realpath ) : campaign_file = osp . join ( campaign_file , YAML_CAMPAIGN_FILE ) campaign = Configuration . from_file ( campaign_file ) return default_campaign ( campaign , ** kwargs )
def built ( self ) : """Called just after this node is successfully built ."""
# Clear the implicit dependency caches of any Nodes # waiting for this Node to be built . for parent in self . waiting_parents : parent . implicit = None self . clear ( ) if self . pseudo : if self . exists ( ) : raise SCons . Errors . UserError ( "Pseudo target " + str ( self ) + " must not exist" ) else : if not self . exists ( ) and do_store_info : SCons . Warnings . warn ( SCons . Warnings . TargetNotBuiltWarning , "Cannot find target " + str ( self ) + " after building" ) self . ninfo . update ( self )
def _at ( cls , verb ) : """A verb with a select text match"""
# Named ( listed ) columns are always included columns = cls . select ( verb ) final_columns_set = set ( cls . select ( verb ) ) groups_set = set ( _get_groups ( verb ) ) final_columns_set -= groups_set - set ( verb . names ) def pred ( col ) : if col not in verb . data : raise KeyError ( "Unknown column name, {!r}" . format ( col ) ) return col in final_columns_set return [ col for col in columns if pred ( col ) ]
def get_readable_tasks ( self , course ) : """Returns the list of all available tasks in a course"""
course_fs = self . _filesystem . from_subfolder ( course . get_id ( ) ) tasks = [ task [ 0 : len ( task ) - 1 ] # remove trailing / for task in course_fs . list ( folders = True , files = False , recursive = False ) if self . _task_file_exists ( course_fs . from_subfolder ( task ) ) ] return tasks
def cmdify ( self ) : """Encode into a cmd - executable string . This re - implements CreateProcess ' s quoting logic to turn a list of arguments into one single string for the shell to interpret . * All double quotes are escaped with a backslash . * Existing backslashes before a quote are doubled , so they are all escaped properly . * Backslashes elsewhere are left as - is ; cmd will interpret them literally . The result is then quoted into a pair of double quotes to be grouped . An argument is intentionally not quoted if it does not contain foul characters . This is done to be compatible with Windows built - in commands that don ' t work well with quotes , e . g . everything with ` echo ` , and DOS - style ( forward slash ) switches . Foul characters include : * Whitespaces . * Carets ( ^ ) . ( pypa / pipenv # 3307) * Parentheses in the command . ( pypa / pipenv # 3168) Carets introduce a difficult situation since they are essentially " lossy " when parsed . Consider this in cmd . exe : : > echo " foo ^ bar " " foo ^ bar " > echo foo ^ ^ bar foo ^ bar The two commands produce different results , but are both parsed by the shell as ` foo ^ bar ` , and there ' s essentially no sensible way to tell what was actually passed in . This implementation assumes the quoted variation ( the first ) since it is easier to implement , and arguably the more common case . The intended use of this function is to pre - process an argument list before passing it into ` ` subprocess . Popen ( . . . , shell = True ) ` ` . See also : https : / / docs . python . org / 3 / library / subprocess . html # converting - argument - sequence"""
return " " . join ( itertools . chain ( [ _quote_if_contains ( self . command , r'[\s^()]' ) ] , ( _quote_if_contains ( arg , r'[\s^]' ) for arg in self . args ) , ) )
def extract_value ( self , agg , idx , name = '' ) : """Extract member number * idx * from aggregate ."""
if not isinstance ( idx , ( tuple , list ) ) : idx = [ idx ] instr = instructions . ExtractValue ( self . block , agg , idx , name = name ) self . _insert ( instr ) return instr
def dehydrate ( self ) : """Return a dict representing this limit ."""
# Only concerned about very specific attributes result = dict ( limit_class = self . _limit_full_name ) for attr in self . attrs : # Using getattr allows the properties to come into play result [ attr ] = getattr ( self , attr ) return result
def get_referents ( object , level = 1 ) : """Get all referents of an object up to a certain level . The referents will not be returned in a specific order and will not contain duplicate objects . Duplicate objects will be removed . Keyword arguments : level - - level of indirection to which referents considered . This function is recursive ."""
res = gc . get_referents ( object ) level -= 1 if level > 0 : for o in res : res . extend ( get_referents ( o , level ) ) res = _remove_duplicates ( res ) return res
def unicode_key ( key ) : """CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME"""
if not isinstance ( key , ( text_type , binary_type ) ) : from mo_logs import Log Log . error ( "{{key|quote}} is not a valid key" , key = key ) return quote ( text_type ( key ) )
def vsep ( v1 , v2 ) : """Find the separation angle in radians between two double precision , 3 - dimensional vectors . This angle is defined as zero if either vector is zero . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / vsep _ c . html : param v1 : First vector : type v1 : 3 - Element Array of floats : param v2 : Second vector : type v2 : 3 - Element Array of floats : return : separation angle in radians : rtype : float"""
v1 = stypes . toDoubleVector ( v1 ) v2 = stypes . toDoubleVector ( v2 ) return libspice . vsep_c ( v1 , v2 )
def get_caller_stack_info ( start_back : int = 1 ) -> List [ str ] : r"""Retrieves a textual representation of the call stack . Args : start _ back : number of calls back in the frame stack ( starting from the frame stack as seen by : func : ` get _ caller _ stack _ info ` ) to begin with Returns : list of descriptions Example : . . code - block : : python from cardinal _ pythonlib . debugging import get _ caller _ stack _ info def who _ am _ i ( ) : return get _ caller _ name ( ) class MyClass ( object ) : def classfunc ( self ) : print ( " Stack info : \ n " + " \ n " . join ( get _ caller _ stack _ info ( ) ) ) def f2 ( ) : x = MyClass ( ) x . classfunc ( ) def f1 ( ) : f2 ( ) f1 ( ) if called from the Python prompt will produce : . . code - block : : none Stack info : < module > ( ) . . . defined at < stdin > : 1 . . . line 1 calls next in stack ; code is : f1 ( ) . . . defined at < stdin > : 1 . . . line 2 calls next in stack ; code is : f2 ( ) . . . defined at < stdin > : 1 . . . line 3 calls next in stack ; code is : classfunc ( self = < _ _ main _ _ . MyClass object at 0x7f86a009c6d8 > ) . . . defined at < stdin > : 2 . . . line 3 calls next in stack ; code is : and if called from a Python file will produce : . . code - block : : none Stack info : < module > ( ) . . . defined at / home / rudolf / tmp / stack . py : 1 . . . line 17 calls next in stack ; code is : f1 ( ) f1 ( ) . . . defined at / home / rudolf / tmp / stack . py : 14 . . . line 15 calls next in stack ; code is : f2 ( ) f2 ( ) . . . defined at / home / rudolf / tmp / stack . py : 10 . . . line 12 calls next in stack ; code is : x . classfunc ( ) classfunc ( self = < _ _ main _ _ . MyClass object at 0x7fd7a731f358 > ) . . . defined at / home / rudolf / tmp / stack . py : 7 . . . line 8 calls next in stack ; code is : print ( " Stack info : \ n " + " \ n " . join ( get _ caller _ stack _ info ( ) ) )"""
# "0 back " is debug _ callers , so " 1 back " its caller # https : / / docs . python . org / 3 / library / inspect . html callers = [ ] # type : List [ str ] frameinfolist = inspect . stack ( ) # type : List [ FrameInfo ] # noqa frameinfolist = frameinfolist [ start_back : ] for frameinfo in frameinfolist : frame = frameinfo . frame function_defined_at = "... defined at {filename}:{line}" . format ( filename = frame . f_code . co_filename , line = frame . f_code . co_firstlineno , ) argvalues = inspect . getargvalues ( frame ) formatted_argvalues = inspect . formatargvalues ( * argvalues ) function_call = "{funcname}{argvals}" . format ( funcname = frame . f_code . co_name , argvals = formatted_argvalues , ) code_context = frameinfo . code_context code = "" . join ( code_context ) if code_context else "" onwards = "... line {line} calls next in stack; code is:\n{c}" . format ( line = frame . f_lineno , c = code , ) description = "\n" . join ( [ function_call , function_defined_at , onwards ] ) callers . append ( description ) return list ( reversed ( callers ) )
def run_command ( self , command , stdin = None , env = None ) : """Launch a shell command line . : param command : Command line to launch : type command : str : param stdin : Standard input of command : type stdin : file : param env : Environment variable used in command : type env : dict : return : Standard output of command : rtype : file"""
cmd = shlex . split ( command ) stdout = SpooledTemporaryFile ( max_size = settings . TMP_FILE_MAX_SIZE , dir = settings . TMP_DIR ) stderr = SpooledTemporaryFile ( max_size = settings . TMP_FILE_MAX_SIZE , dir = settings . TMP_DIR ) full_env = os . environ . copy ( ) if self . use_parent_env else { } full_env . update ( self . env ) full_env . update ( env or { } ) try : if isinstance ( stdin , ( ContentFile , SFTPStorageFile ) ) : process = Popen ( cmd , stdin = PIPE , stdout = stdout , stderr = stderr , env = full_env ) process . communicate ( input = stdin . read ( ) ) else : process = Popen ( cmd , stdin = stdin , stdout = stdout , stderr = stderr , env = full_env ) process . wait ( ) if process . poll ( ) : stderr . seek ( 0 ) raise exceptions . CommandConnectorError ( "Error running: {}\n{}" . format ( command , stderr . read ( ) . decode ( 'utf-8' ) ) ) stdout . seek ( 0 ) stderr . seek ( 0 ) return stdout , stderr except OSError as err : raise exceptions . CommandConnectorError ( "Error running: {}\n{}" . format ( command , str ( err ) ) )
def new_instance ( cls , classname , length ) : """Creates a new array with the given classname and length ; initial values are null . : param classname : the classname in Java notation ( eg " weka . core . DenseInstance " ) : type classname : str : param length : the length of the array : type length : int : return : the Java array : rtype : JB _ Object"""
return javabridge . static_call ( "Ljava/lang/reflect/Array;" , "newInstance" , "(Ljava/lang/Class;I)Ljava/lang/Object;" , get_jclass ( classname = classname ) , length )
def serialize ( self , submit = None ) : """Serialize each form field to a Payload container . : param Submit submit : Optional ` Submit ` to click , if form includes multiple submits : return : Payload instance"""
include_fields = prepare_fields ( self . fields , self . submit_fields , submit ) return Payload . from_fields ( include_fields )
def stop_processing ( self , warning = True ) : """Registers the end of a processing operation . : param warning : Emit warning message . : type warning : int : return : Method success . : rtype : bool"""
if not self . __is_processing : warning and LOGGER . warning ( "!> {0} | Engine is not processing, 'stop_processing' request has been ignored!" . format ( self . __class__ . __name__ ) ) return False LOGGER . debug ( "> Stopping processing operation!" ) self . __is_processing = False self . Application_Progress_Status_processing . Processing_label . setText ( QString ( ) ) self . Application_Progress_Status_processing . Processing_progressBar . setRange ( 0 , 100 ) self . Application_Progress_Status_processing . Processing_progressBar . setValue ( 0 ) self . Application_Progress_Status_processing . hide ( ) return True
def rename_file ( source_path , target_path , allow_undo = True , no_confirm = False , rename_on_collision = True , silent = False , extra_flags = 0 , hWnd = None ) : """Perform a shell - based file rename . Renaming in this way allows the possibility of undo , auto - renaming , and showing the " flying file " animation during the copy . The default options allow for undo , don ' t automatically clobber on a name clash , automatically rename on collision and display the animation ."""
return _file_operation ( shellcon . FO_RENAME , source_path , target_path , allow_undo , no_confirm , rename_on_collision , silent , extra_flags , hWnd )
def write_to ( output , txt ) : """Write some text to some output"""
if ( isinstance ( txt , six . binary_type ) or six . PY3 and isinstance ( output , StringIO ) ) or isinstance ( output , TextIOWrapper ) : output . write ( txt ) else : output . write ( txt . encode ( "utf-8" , "replace" ) )
def _start_consumer ( self , consumer ) : """Start a consumer as a new Thread . : param Consumer consumer : : return :"""
thread = threading . Thread ( target = consumer . start , args = ( self . _connection , ) ) thread . daemon = True thread . start ( )
def _proxy ( self ) : """Generate an instance context for the instance , the context is capable of performing various actions . All instance actions are proxied to the context : returns : ServiceContext for this ServiceInstance : rtype : twilio . rest . preview . acc _ security . service . ServiceContext"""
if self . _context is None : self . _context = ServiceContext ( self . _version , sid = self . _solution [ 'sid' ] , ) return self . _context
def build_rules ( rule_yaml , match_plugins , action_plugins ) : """Convert parsed rule YAML in to a list of ruleset objects : param rule _ yaml : Dictionary parsed from YAML rule file : param match _ plugins : Dictionary of match plugins ( key = config _ name , value = plugin object ) : param action _ plugins : Dictionary of action plugins ( key = config _ name , value = plugin object ) : return : list of rules"""
rule_sets = [ ] for yaml_section in rule_yaml : rule_sets . append ( RuleSet ( yaml_section , match_plugins = match_plugins , action_plugins = action_plugins ) ) return rule_sets
def single ( C , namespace = None ) : """An element maker with a single namespace that uses that namespace as the default"""
if namespace is None : B = C ( ) . _ else : B = C ( default = namespace , _ = namespace ) . _ return B
def compatibility_mode ( ) : """Use this function to turn on the compatibility mode . The compatibility mode is used to improve compatibility with Pyinotify 0.7.1 ( or older ) programs . The compatibility mode provides additional variables ' is _ dir ' , ' event _ name ' , ' EventsCodes . IN _ * ' and ' EventsCodes . ALL _ EVENTS ' as Pyinotify 0.7.1 provided . Do not call this function from new programs ! ! Especially if there are developped for Pyinotify > = 0.8 . x ."""
setattr ( EventsCodes , 'ALL_EVENTS' , ALL_EVENTS ) for evname in globals ( ) : if evname . startswith ( 'IN_' ) : setattr ( EventsCodes , evname , globals ( ) [ evname ] ) global COMPATIBILITY_MODE COMPATIBILITY_MODE = True
def url_unquote_plus ( s , charset = 'utf-8' , errors = 'replace' ) : """URL decode a single string with the given ` charset ` and decode " + " to whitespace . Per default encoding errors are ignored . If you want a different behavior you can set ` errors ` to ` ` ' replace ' ` ` or ` ` ' strict ' ` ` . In strict mode a : exc : ` HTTPUnicodeError ` is raised . : param s : The string to unquote . : param charset : the charset of the query string . If set to ` None ` no unicode decoding will take place . : param errors : The error handling for the ` charset ` decoding ."""
if isinstance ( s , text_type ) : s = s . replace ( u'+' , u' ' ) else : s = s . replace ( b'+' , b' ' ) return url_unquote ( s , charset , errors )
def _penalize_client ( self , client ) : """Place client in the penalty box . : param client : Client object"""
if client in self . active_clients : # hasn ' t been removed yet log . warning ( "%r marked down." , client ) self . active_clients . remove ( client ) self . penalty_box . add ( client ) else : log . info ( "%r not in active client list." )
def latcyl ( radius , lon , lat ) : """Convert from latitudinal coordinates to cylindrical coordinates . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / latcyl _ c . html : param radius : Distance of a point from the origin . : type radius : : param lon : Angle of the point from the XZ plane in radians . : param lat : Angle of the point from the XY plane in radians . : return : ( r , lonc , z ) : rtype : tuple"""
radius = ctypes . c_double ( radius ) lon = ctypes . c_double ( lon ) lat = ctypes . c_double ( lat ) r = ctypes . c_double ( ) lonc = ctypes . c_double ( ) z = ctypes . c_double ( ) libspice . latcyl_c ( radius , lon , lat , ctypes . byref ( r ) , ctypes . byref ( lonc ) , ctypes . byref ( z ) ) return r . value , lonc . value , z . value
def get_proficiency_admin_session_for_objective_bank ( self , objective_bank_id = None ) : """Gets the OsidSession associated with the proficiency administration service for the given objective bank . arg : objectiveBankId ( osid . id . Id ) : the Id of the ObjectiveBank return : ( osid . learning . ProficiencyAdminSession ) - a ProficiencyAdminSession raise : NotFound - no objective bank found by the given Id raise : NullArgument - objectiveBankId is null raise : OperationFailed - unable to complete request raise : Unimplemented - supports _ proficiency _ admin ( ) or supports _ visible _ federation ( ) is false compliance : optional - This method must be implemented if supports _ proficiency _ admin ( ) and supports _ visible _ federation ( ) are true"""
if not objective_bank_id : raise NullArgument if not self . supports_proficiency_admin ( ) : raise Unimplemented ( ) try : from . import sessions except ImportError : raise OperationFailed ( ) try : session = sessions . ProficiencyAdminSession ( objective_bank_id , runtime = self . _runtime ) except AttributeError : raise OperationFailed ( ) return session
def _on_session_destroyed ( session_context ) : '''Calls any on _ session _ destroyed callbacks defined on the Document'''
callbacks = session_context . _document . session_destroyed_callbacks session_context . _document . session_destroyed_callbacks = set ( ) for callback in callbacks : try : callback ( session_context ) except Exception as e : log . warning ( 'DocumentLifeCycleHandler on_session_destroyed ' 'callback %s failed with following error: %s' % ( callback , e ) ) if callbacks : # If any session callbacks were defined garbage collect after deleting all references del callback del callbacks import gc gc . collect ( )