idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
43,200
def index_data ( self , data , index_name , doc_type ) : print 'ELS Stub Indexer getting called...' print '%s %s %s %s' % ( self , data , index_name , doc_type )
Index data in Stub Indexer .
43,201
def execute ( self , input_data ) : input_data = input_data [ 'help_base' ] type_tag = input_data [ 'type_tag' ] if type_tag == 'help' : output = '%s%s%s' % ( color . LightBlue , input_data [ 'help' ] , color . Normal ) elif type_tag == 'worker' : output = '%s%s' % ( color . Yellow , input_data [ 'name' ] ) output += '\n %sInput: %s%s%s' % ( color . LightBlue , color . Green , input_data [ 'dependencies' ] , color . Normal ) output += '\n %s%s' % ( color . Green , input_data [ 'docstring' ] ) elif type_tag == 'command' : output = '%s%s%s %s' % ( color . Yellow , input_data [ 'command' ] , color . LightBlue , input_data [ 'sig' ] ) output += '\n %s%s%s' % ( color . Green , input_data [ 'docstring' ] , color . Normal ) else : print 'Alert: help_formatter worker received malformed object: %s' % str ( input_data ) output = '\n%s%s%s' % ( color . Red , str ( input_data ) , color . Normal ) return { 'help' : output }
Do CLI formatting and coloring based on the type_tag
43,202
def run ( ) : config_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , 'config.ini' ) workbench_conf = ConfigParser . ConfigParser ( ) config_ini = workbench_conf . read ( config_path ) if not config_ini : print 'Could not locate config.ini file, tried %s : exiting...' % config_path exit ( 1 ) datastore_uri = workbench_conf . get ( 'workbench' , 'datastore_uri' ) database = workbench_conf . get ( 'workbench' , 'database' ) worker_cap = workbench_conf . getint ( 'workbench' , 'worker_cap' ) samples_cap = workbench_conf . getint ( 'workbench' , 'samples_cap' ) try : store_args = { 'uri' : datastore_uri , 'database' : database , 'worker_cap' : worker_cap , 'samples_cap' : samples_cap } workbench = zerorpc . Server ( WorkBench ( store_args = store_args ) , name = 'workbench' , heartbeat = 60 ) workbench . bind ( 'tcp://0.0.0.0:4242' ) print '\nWorkbench is ready and feeling super duper!' gevent_signal ( signal . SIGTERM , workbench . stop ) gevent_signal ( signal . SIGINT , workbench . stop ) gevent_signal ( signal . SIGKILL , workbench . stop ) workbench . run ( ) print '\nWorkbench Server Shutting Down... and dreaming of sheep...' except zmq . error . ZMQError : print '\nInfo: Could not start Workbench server (no worries, probably already running...)\n'
Run the workbench server
43,203
def combine_samples ( self , md5_list , filename , type_tag ) : total_bytes = "" for md5 in md5_list : total_bytes += self . get_sample ( md5 ) [ 'sample' ] [ 'raw_bytes' ] self . remove_sample ( md5 ) return self . store_sample ( total_bytes , filename , type_tag )
Combine samples together . This may have various use cases the most significant involving a bunch of sample chunks got uploaded and now we combine them together
43,204
def guess_type_tag ( self , input_bytes , filename ) : mime_to_type = { 'application/jar' : 'jar' , 'application/java-archive' : 'jar' , 'application/octet-stream' : 'data' , 'application/pdf' : 'pdf' , 'application/vnd.ms-cab-compressed' : 'cab' , 'application/vnd.ms-fontobject' : 'ms_font' , 'application/vnd.tcpdump.pcap' : 'pcap' , 'application/x-dosexec' : 'exe' , 'application/x-empty' : 'empty' , 'application/x-shockwave-flash' : 'swf' , 'application/xml' : 'xml' , 'application/zip' : 'zip' , 'image/gif' : 'gif' , 'text/html' : 'html' , 'image/jpeg' : 'jpg' , 'image/png' : 'png' , 'image/x-icon' : 'icon' , 'text/plain' : 'txt' } with magic . Magic ( flags = magic . MAGIC_MIME_TYPE ) as mag : mime_type = mag . id_buffer ( input_bytes [ : 1024 ] ) if mime_type in mime_to_type : type_tag = mime_to_type [ mime_type ] if type_tag == 'data' : print 'Info: File -- Trying to Determine Type from filename...' ext = os . path . splitext ( filename ) [ 1 ] [ 1 : ] if ext in [ 'mem' , 'vmem' ] : type_tag = 'mem' else : print 'Alert: Failed to Determine Type for %s' % filename exit ( 1 ) return type_tag else : print 'Alert: Sample Type could not be Determined' return 'unknown'
Try to guess the type_tag for this sample
43,205
def add_tags ( self , md5 , tags ) : if not tags : return tag_set = set ( self . get_tags ( md5 ) ) if self . get_tags ( md5 ) else set ( ) if isinstance ( tags , str ) : tags = [ tags ] for tag in tags : tag_set . add ( tag ) self . data_store . store_work_results ( { 'tags' : list ( tag_set ) } , 'tags' , md5 )
Add tags to this sample
43,206
def set_tags ( self , md5 , tags ) : if isinstance ( tags , str ) : tags = [ tags ] tag_set = set ( tags ) self . data_store . store_work_results ( { 'tags' : list ( tag_set ) } , 'tags' , md5 )
Set the tags for this sample
43,207
def get_tags ( self , md5 ) : tag_data = self . data_store . get_work_results ( 'tags' , md5 ) return tag_data [ 'tags' ] if tag_data else None
Get tags for this sample
43,208
def generate_sample_set ( self , tags = None ) : if isinstance ( tags , str ) : tags = [ tags ] md5_list = self . data_store . tag_match ( tags ) return self . store_sample_set ( md5_list )
Generate a sample_set that maches the tags or all if tags are not specified .
43,209
def help ( self , topic = None ) : if not topic : topic = 'workbench' try : return self . work_request ( 'help_formatter' , topic ) [ 'help_formatter' ] [ 'help' ] except WorkBench . DataNotFound as e : sample_md5 = e . args [ 0 ] return '%s%s\n\t%s%s%s' % ( color . Yellow , sample_md5 , color . Green , e . message ( ) , color . Normal )
Returns the formatted colored help
43,210
def _help_workbench ( self ) : help = '%sWelcome to Workbench Help:%s' % ( color . Yellow , color . Normal ) help += '\n\t%s- workbench.help(\'basic\') %s for getting started help' % ( color . Green , color . LightBlue ) help += '\n\t%s- workbench.help(\'workers\') %s for help on available workers' % ( color . Green , color . LightBlue ) help += '\n\t%s- workbench.help(\'commands\') %s for help on workbench commands' % ( color . Green , color . LightBlue ) help += '\n\t%s- workbench.help(topic) %s where topic can be a help, command or worker' % ( color . Green , color . LightBlue ) help += '\n\n%sSee http://github.com/SuperCowPowers/workbench for more information\n%s' % ( color . Yellow , color . Normal ) return help
Help on Workbench
43,211
def _help_basic ( self ) : help = '%sWorkbench: Getting started...' % ( color . Yellow ) help += '\n%sStore a sample into Workbench:' % ( color . Green ) help += '\n\t%s$ workbench.store_sample(raw_bytes, filename, type_tag)' % ( color . LightBlue ) help += '\n\n%sNotice store_sample returns an md5 of the sample...' % ( color . Yellow ) help += '\n%sRun workers on the sample (view, meta, whatever...):' % ( color . Green ) help += '\n\t%s$ workbench.work_request(\'view\', md5)%s' % ( color . LightBlue , color . Normal ) return help
Help for Workbench Basics
43,212
def _help_commands ( self ) : help = 'Workbench Commands:' for command in self . list_all_commands ( ) : full_help = self . work_request ( 'help_formatter' , command ) [ 'help_formatter' ] [ 'help' ] compact_help = full_help . split ( '\n' ) [ : 2 ] help += '\n\n%s' % '\n' . join ( compact_help ) return help
Help on all the available commands
43,213
def _help_workers ( self ) : help = 'Workbench Workers:' for worker in self . list_all_workers ( ) : full_help = self . work_request ( 'help_formatter' , worker ) [ 'help_formatter' ] [ 'help' ] compact_help = full_help . split ( '\n' ) [ : 4 ] help += '\n\n%s' % '\n' . join ( compact_help ) return help
Help on all the available workers
43,214
def get_info ( self , component ) : work_results = self . _get_work_results ( 'info' , component ) return self . data_store . clean_for_serialization ( work_results )
Get the information about this component
43,215
def store_info ( self , info_dict , component , type_tag ) : if not isinstance ( info_dict , dict ) : print 'Critical: info_dict must be a python dictionary, got %s' % type ( info_dict ) return info_storage = { key : value for key , value in info_dict . iteritems ( ) if not hasattr ( value , '__call__' ) } info_storage [ 'type_tag' ] = type_tag self . _store_work_results ( info_storage , 'info' , component )
Store information about a component . The component could be a worker or a commands or a class or whatever you want the only thing to be aware of is name collisions .
43,216
def _store_information ( self ) : print '<<< Generating Information Storage >>>' for name , meth in inspect . getmembers ( self , predicate = inspect . isroutine ) : if not name . startswith ( '_' ) : info = { 'command' : name , 'sig' : str ( funcsigs . signature ( meth ) ) , 'docstring' : meth . __doc__ } self . store_info ( info , name , type_tag = 'command' ) self . store_info ( { 'help' : '<<< Workbench Server Version %s >>>' % self . version } , 'version' , type_tag = 'help' ) self . store_info ( { 'help' : self . _help_workbench ( ) } , 'workbench' , type_tag = 'help' ) self . store_info ( { 'help' : self . _help_basic ( ) } , 'basic' , type_tag = 'help' ) self . store_info ( { 'help' : self . _help_commands ( ) } , 'commands' , type_tag = 'help' ) self . store_info ( { 'help' : self . _help_workers ( ) } , 'workers' , type_tag = 'help' )
Store infomation about Workbench and its commands
43,217
def tags ( self ) : tags = self . workbench . get_all_tags ( ) if not tags : return tag_df = pd . DataFrame ( tags ) tag_df = self . vectorize ( tag_df , 'tags' ) print '\n%sSamples in Database%s' % ( color . LightPurple , color . Normal ) self . top_corr ( tag_df )
Display tag information for all samples in database
43,218
def vectorize ( self , df , column_name ) : vec_df = df [ column_name ] . str . join ( sep = '-' ) . str . get_dummies ( sep = '-' ) return vec_df
Vectorize a column in the dataframe
43,219
def flatten ( self , df , column_name ) : _exp_list = [ [ md5 , x ] for md5 , value_list in zip ( df [ 'md5' ] , df [ column_name ] ) for x in value_list ] return pd . DataFrame ( _exp_list , columns = [ 'md5' , column_name ] )
Flatten a column in the dataframe that contains lists
43,220
def top_corr ( self , df ) : tag_freq = df . sum ( ) tag_freq . sort ( ascending = False ) corr = df . corr ( ) . fillna ( 1 ) corr_dict = corr . to_dict ( ) for tag , count in tag_freq . iteritems ( ) : print ' %s%s: %s%s%s (' % ( color . Green , tag , color . LightBlue , count , color . Normal ) , tag_corrs = sorted ( corr_dict [ tag ] . iteritems ( ) , key = operator . itemgetter ( 1 ) , reverse = True ) for corr_tag , value in tag_corrs [ : 5 ] : if corr_tag != tag and ( value > .2 ) : print '%s%s:%s%.1f' % ( color . Green , corr_tag , color . LightBlue , value ) , print '%s)' % color . Normal
Give aggregation counts and correlations
43,221
def run ( self ) : self . versions ( ) self . tags ( ) print '\n%s' % self . workbench . help ( 'cli' ) cfg = Config ( ) cfg . InteractiveShellEmbed . autocall = 2 cfg . InteractiveShellEmbed . colors = 'Linux' cfg . InteractiveShellEmbed . color_info = True cfg . InteractiveShellEmbed . autoindent = True cfg . InteractiveShellEmbed . deep_reload = True cfg . PromptManager . in_template = ( r'{color.LightPurple}{short_md5}{color.Yellow}{prompt_deco}{color.LightBlue} Workbench{color.Green}[\#]> ' ) self . ipshell = IPython . terminal . embed . InteractiveShellEmbed ( config = cfg , banner1 = '' , exit_msg = '\nWorkbench has SuperCowPowers...' ) auto_quoter = auto_quote_xform . AutoQuoteTransformer ( self . ipshell , self . ipshell . prefilter_manager ) auto_quoter . register_command_set ( self . command_set ) pd . set_option ( 'display.width' , 140 ) pd . set_option ( 'max_colwidth' , 15 ) self . ipshell ( local_ns = self . command_dict )
Running the workbench CLI
43,222
def _connect ( self , server_info ) : _tmp_connect = zerorpc . Client ( timeout = 300 , heartbeat = 2 ) _tmp_connect . connect ( 'tcp://' + server_info [ 'server' ] + ':' + server_info [ 'port' ] ) try : _tmp_connect . _zerorpc_name ( ) _tmp_connect . close ( ) del _tmp_connect except zerorpc . exceptions . LostRemote : print '%sError: Could not connect to Workbench Server at %s:%s%s' % ( color . Red , server_info [ 'server' ] , server_info [ 'port' ] , color . Normal ) sys . exit ( 1 ) if self . workbench : self . workbench . close ( ) self . workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) self . workbench . connect ( 'tcp://' + server_info [ 'server' ] + ':' + server_info [ 'port' ] ) print '\n%s<<< Connected: %s:%s >>>%s' % ( color . Green , server_info [ 'server' ] , server_info [ 'port' ] , color . Normal )
Connect to the workbench server
43,223
def _work_request ( self , worker , md5 = None ) : if not md5 and not self . session . md5 : return 'Must call worker with an md5 argument...' elif not md5 : md5 = self . session . md5 if self . workbench . is_sample_set ( md5 ) : return self . workbench . set_work_request ( worker , md5 ) try : return self . workbench . work_request ( worker , md5 ) except zerorpc . exceptions . RemoteError as e : return repr_to_str_decorator . r_to_s ( self . _data_not_found ) ( e )
Wrapper for a work_request to workbench
43,224
def _register_info ( self ) : for name , meth in inspect . getmembers ( self , predicate = inspect . isroutine ) : if not name . startswith ( '_' ) and name != 'run' : info = { 'command' : name , 'sig' : str ( funcsigs . signature ( meth ) ) , 'docstring' : meth . __doc__ } self . workbench . store_info ( info , name , 'command' ) self . workbench . store_info ( { 'help' : self . help . help_cli ( ) } , 'cli' , 'help' ) self . workbench . store_info ( { 'help' : self . help . help_cli_basic ( ) } , 'cli_basic' , 'help' ) self . workbench . store_info ( { 'help' : self . help . help_cli_search ( ) } , 'search' , 'help' ) self . workbench . store_info ( { 'help' : self . help . help_dataframe ( ) } , 'dataframe' , 'help' ) self . workbench . store_info ( { 'help' : self . help . help_dataframe_memory ( ) } , 'dataframe_memory' , 'help' ) self . workbench . store_info ( { 'help' : self . help . help_dataframe_pe ( ) } , 'dataframe_pe' , 'help' )
Register local methods in the Workbench Information system
43,225
def transform ( self , line , _continue_prompt ) : orig_line = line ns_token_set = set ( [ token for nspace in self . shell . all_ns_refs for token in nspace ] ) token_list = re . split ( ' |;|,|(|)|\'|"' , line ) token_list = [ item for item in token_list if item != None and item != '' ] num_tokens = len ( token_list ) first_token = token_list [ 0 ] token_set = set ( token_list ) if first_token == 'load_sample' : if token_list [ 1 ] not in ns_token_set : line = line . replace ( token_list [ 1 ] , '"' + token_list [ 1 ] + '",' ) return line if first_token == 'pivot' : for token in token_list : if token not in ns_token_set : line = line . replace ( token , '"' + token + '",' ) return line skip_symbols = [ ';' , ',' , '\'' , '"' , '(' , ')' ] if any ( [ sym in line for sym in skip_symbols ] ) : return line if num_tokens > 1 and first_token in self . command_set : if first_token == 'help' : token_set . remove ( 'help' ) for token in token_set : line = line . replace ( token , '"' + token + '"' ) else : for token in token_set : if token not in ns_token_set : line = line . replace ( token , '"' + token + '"' ) return line
Shortcut Workbench commands by using auto - quotes
43,226
def decodes ( self , s : str ) -> BioCCollection : tree = etree . parse ( io . BytesIO ( bytes ( s , encoding = 'UTF-8' ) ) ) collection = self . __parse_collection ( tree . getroot ( ) ) collection . encoding = tree . docinfo . encoding collection . standalone = tree . docinfo . standalone collection . version = tree . docinfo . xml_version return collection
Deserialize s to a BioC collection object .
43,227
def decode ( self , fp : TextIO ) -> BioCCollection : tree = etree . parse ( fp ) collection = self . __parse_collection ( tree . getroot ( ) ) collection . encoding = tree . docinfo . encoding collection . standalone = tree . docinfo . standalone collection . version = tree . docinfo . xml_version return collection
Deserialize fp to a BioC collection object .
43,228
def process_row ( cls , data , column_map ) : row = { } for key , value in data . iteritems ( ) : if not value : value = '-' elif isinstance ( value , list ) : value = value [ 1 ] elif isinstance ( value , dict ) : if 'type_name' in value : if 'UnixTimeStamp' in value [ 'type_name' ] : value = datetime . datetime . utcfromtimestamp ( value [ 'epoch' ] ) if value == datetime . datetime ( 1970 , 1 , 1 , 0 , 0 ) : value = '-' row [ column_map [ key ] ] = value return row
Process the row data from Rekall
43,229
def format ( self , formatstring , * args ) : if self . incoming_section : self . SendMessage ( [ 's' , { 'name' : args } ] ) self . incoming_section = False
Presentation Information from the Plugin
43,230
def SendMessage ( self , statement ) : message_type = statement [ 0 ] message_data = statement [ 1 ] self . output . append ( { 'type' : message_type , 'data' : message_data } )
Here we re actually capturing messages and putting them into our output
43,231
def open ( self , directory = None , filename = None , mode = "rb" ) : path = os . path . join ( directory , filename ) return open ( path , mode )
Opens a file for writing or reading .
43,232
def _file_chunks ( self , data , chunk_size ) : for i in xrange ( 0 , len ( data ) , chunk_size ) : yield self . compressor ( data [ i : i + chunk_size ] )
Yield compressed chunks from a data array
43,233
def stream_to_workbench ( self , raw_bytes , filename , type_tag , tags ) : md5_list = [ ] sent_bytes = 0 total_bytes = len ( raw_bytes ) for chunk in self . _file_chunks ( raw_bytes , self . chunk_size ) : md5_list . append ( self . workbench . store_sample ( chunk , filename , self . compress_ident ) ) sent_bytes += self . chunk_size self . progress ( sent_bytes , total_bytes ) full_md5 = self . workbench . combine_samples ( md5_list , filename , type_tag ) self . workbench . add_tags ( full_md5 , tags ) return full_md5
Split up a large file into chunks and send to Workbench
43,234
def dumps ( obj , ** kwargs ) -> str : return json . dumps ( obj , cls = BioCJSONEncoder , ** kwargs )
Serialize a BioC obj to a JSON formatted str .
43,235
def write ( self , obj : BioCDocument or BioCPassage or BioCSentence ) : if self . level == DOCUMENT and not isinstance ( obj , BioCDocument ) : raise ValueError if self . level == PASSAGE and not isinstance ( obj , BioCPassage ) : raise ValueError if self . level == SENTENCE and not isinstance ( obj , BioCSentence ) : raise ValueError self . writer . write ( BioCJSONEncoder ( ) . default ( obj ) )
Encode and write a single object .
43,236
def execute ( self , input_data ) : output = input_data [ 'view_memory' ] output [ 'tables' ] = { } for data in [ input_data [ key ] for key in ViewMemoryDeep . dependencies ] : for name , table in data [ 'tables' ] . iteritems ( ) : output [ 'tables' ] . update ( { name : table } ) return output
Execute the ViewMemoryDeep worker
43,237
def execute ( self , input_data ) : output = { 'meta' : input_data [ 'mem_meta' ] [ 'tables' ] [ 'info' ] } output [ 'connscan' ] = list ( set ( [ item [ 'Remote Address' ] for item in input_data [ 'mem_connscan' ] [ 'tables' ] [ 'connscan' ] ] ) ) pslist_md5s = { self . file_to_pid ( item [ 'filename' ] ) : item [ 'md5' ] for item in input_data [ 'mem_procdump' ] [ 'tables' ] [ 'dumped_files' ] } output [ 'pslist' ] = [ 'PPID: %d PID: %d Name: %s - %s' % ( item [ 'PPID' ] , item [ 'PID' ] , item [ 'Name' ] , pslist_md5s [ item [ 'PID' ] ] ) for item in input_data [ 'mem_pslist' ] [ 'tables' ] [ 'pslist' ] ] return output
Execute the ViewMemory worker
43,238
def multi_session ( self ) : _val = 0 if "multi_session" in self . _dict : _val = self . _dict [ "multi_session" ] if str ( _val ) . lower ( ) == 'all' : _val = - 1 return int ( _val )
convert the multi_session param a number
43,239
def _raw_aspera_metadata ( self , bucket ) : response = self . _client . get_bucket_aspera ( Bucket = bucket ) aspera_access_key = response [ 'AccessKey' ] [ 'Id' ] aspera_secret_key = response [ 'AccessKey' ] [ 'Secret' ] ats_endpoint = response [ 'ATSEndpoint' ] return aspera_access_key , aspera_secret_key , ats_endpoint
get the Aspera connection details on Aspera enabled buckets
43,240
def _fetch_transfer_spec ( self , node_action , token , bucket_name , paths ) : aspera_access_key , aspera_secret_key , ats_endpoint = self . _get_aspera_metadata ( bucket_name ) _headers = { 'accept' : "application/json" , 'Content-Type' : "application/json" } credentials = { 'type' : 'token' , 'token' : { 'delegated_refresh_token' : token } } _url = ats_endpoint _headers [ 'X-Aspera-Storage-Credentials' ] = json . dumps ( credentials ) _data = { 'transfer_requests' : [ { 'transfer_request' : { 'paths' : paths , 'tags' : { 'aspera' : { 'node' : { 'storage_credentials' : credentials } } } } } ] } _session = requests . Session ( ) _response = _session . post ( url = _url + "/files/" + node_action , auth = ( aspera_access_key , aspera_secret_key ) , headers = _headers , json = _data , verify = self . _config . verify_ssl ) return _response
make hhtp call to Aspera to fetch back trasnfer spec
43,241
def _create_transfer_spec ( self , call_args ) : _paths = [ ] for _file_pair in call_args . file_pair_list : _path = OrderedDict ( ) if call_args . direction == enumAsperaDirection . SEND : _action = "upload_setup" _path [ 'source' ] = _file_pair . fileobj _path [ 'destination' ] = _file_pair . key else : _action = "download_setup" _path [ 'source' ] = _file_pair . key _path [ 'destination' ] = _file_pair . fileobj _paths . append ( _path ) delegated_token = self . _delegated_token_manager . get_token ( ) _response = self . _fetch_transfer_spec ( _action , delegated_token , call_args . bucket , _paths ) tspec_dict = json . loads ( _response . content ) [ 'transfer_specs' ] [ 0 ] [ 'transfer_spec' ] tspec_dict [ "destination_root" ] = "/" if ( call_args . transfer_config ) : tspec_dict . update ( call_args . transfer_config . dict ) if call_args . transfer_config . is_multi_session_all : tspec_dict [ 'multi_session' ] = 0 _remote_host = tspec_dict [ 'remote_host' ] . split ( '.' ) _remote_host [ 0 ] += "-all" tspec_dict [ 'remote_host' ] = "." . join ( _remote_host ) logger . info ( "New remote_host(%s)" % tspec_dict [ 'remote_host' ] ) call_args . transfer_spec = json . dumps ( tspec_dict ) return True
pass the transfer details to aspera and receive back a populated transfer spec complete with access token
43,242
def upload_directory ( self , directory , bucket , key , transfer_config = None , subscribers = None ) : check_io_access ( directory , os . R_OK ) return self . _queue_task ( bucket , [ FilePair ( key , directory ) ] , transfer_config , subscribers , enumAsperaDirection . SEND )
upload a directory using Aspera
43,243
def download_directory ( self , bucket , key , directory , transfer_config = None , subscribers = None ) : check_io_access ( directory , os . W_OK ) return self . _queue_task ( bucket , [ FilePair ( key , directory ) ] , transfer_config , subscribers , enumAsperaDirection . RECEIVE )
download a directory using Aspera
43,244
def upload ( self , fileobj , bucket , key , transfer_config = None , subscribers = None ) : check_io_access ( fileobj , os . R_OK , True ) return self . _queue_task ( bucket , [ FilePair ( key , fileobj ) ] , transfer_config , subscribers , enumAsperaDirection . SEND )
upload a file using Aspera
43,245
def download ( self , bucket , key , fileobj , transfer_config = None , subscribers = None ) : check_io_access ( os . path . dirname ( fileobj ) , os . W_OK ) return self . _queue_task ( bucket , [ FilePair ( key , fileobj ) ] , transfer_config , subscribers , enumAsperaDirection . RECEIVE )
download a file using Aspera
43,246
def set_log_details ( aspera_log_path = None , sdk_log_level = logging . NOTSET ) : if aspera_log_path : check_io_access ( aspera_log_path , os . W_OK ) AsperaTransferCoordinator . set_log_location ( aspera_log_path ) if sdk_log_level != logging . NOTSET : if logger : if not len ( logger . handlers ) : handler = logging . StreamHandler ( ) _fmt = '%(asctime)s %(levelname)s %(message)s' handler . setFormatter ( logging . Formatter ( _fmt ) ) logger . addHandler ( handler ) logger . setLevel ( sdk_log_level )
set the aspera log path - used by th Ascp process set the internal aspera sdk activity - for debug purposes
43,247
def _validate_args ( self , args ) : assert ( args . bucket ) if args . subscribers : for _subscriber in args . subscribers : assert ( isinstance ( _subscriber , AsperaBaseSubscriber ) ) if ( args . transfer_config ) : assert ( isinstance ( args . transfer_config , AsperaConfig ) ) if args . transfer_config . multi_session > self . _config . ascp_max_concurrent : raise ValueError ( "Max sessions is %d" % self . _config . ascp_max_concurrent ) for _pair in args . file_pair_list : if not _pair . key or not _pair . fileobj : raise ValueError ( "Invalid file pair" )
validate the user arguments
43,248
def _shutdown ( self , cancel , cancel_msg , exc_type = CancelledError ) : if cancel : self . _coordinator_controller . cancel ( cancel_msg , exc_type ) try : self . _coordinator_controller . wait ( ) except KeyboardInterrupt : self . _coordinator_controller . cancel ( 'KeyboardInterrupt()' ) raise finally : self . _coordinator_controller . cleanup ( )
Internal shutdown used by shutdown method above
43,249
def cleanup ( self ) : self . _processing_stop = True self . _wakeup_processing_thread ( ) self . _processing_stopped_event . wait ( 3 )
Stop backgroud thread and cleanup resources
43,250
def tracked_coordinator_count ( self , count_ascps = False ) : with self . _lock : _count = 0 if count_ascps : for _coordinator in self . _tracked_transfer_coordinators : _count += _coordinator . session_count else : _count = len ( self . _tracked_transfer_coordinators ) return _count
count the number of cooridnators currently being processed or count the number of ascps currently being used
43,251
def _queue_task ( self , args ) : if self . _cancel_called : raise AsperaTransferQueueError ( "Cancel already called" ) elif self . _wait_called : raise AsperaTransferQueueError ( "Cant queue items during wait" ) elif self . waiting_coordinator_count ( ) >= self . _config . max_submission_queue_size : raise AsperaTransferQueueError ( "Max queued items reached" ) else : _coordinator = AsperaTransferCoordinator ( args ) _components = { 'meta' : TransferMeta ( args , transfer_id = args . transfer_id ) , 'coordinator' : _coordinator } _transfer_future = AsperaTransferFuture ( ** _components ) _coordinator . add_subscribers ( args . subscribers , future = _transfer_future ) _coordinator . add_done_callback ( self . remove_aspera_coordinator , transfer_coordinator = _coordinator ) self . append_waiting_queue ( _coordinator ) if not self . _processing_thread : self . _processing_thread = threading . Thread ( target = self . _process_waiting_queue ) self . _processing_thread . daemon = True self . _processing_thread . start ( ) self . _wakeup_processing_thread ( ) return _transfer_future
add transfer to waiting queue if possible then notify the background thread to process it
43,252
def remove_aspera_coordinator ( self , transfer_coordinator ) : if self . _in_waiting_queue ( transfer_coordinator ) : logger . info ( "Remove from waiting queue count=%d" % self . waiting_coordinator_count ( ) ) with self . _lockw : self . _waiting_transfer_coordinators . remove ( transfer_coordinator ) else : logger . info ( "Remove from processing queue count=%d" % self . tracked_coordinator_count ( ) ) try : self . remove_transfer_coordinator ( transfer_coordinator ) self . append_processed_queue ( transfer_coordinator ) except Exception : pass self . _wakeup_processing_thread ( )
remove entry from the waiting waiting or remove item from processig queue and add to processed quque notify background thread as it may be able to process watiign requests
43,253
def append_waiting_queue ( self , transfer_coordinator ) : logger . debug ( "Add to waiting queue count=%d" % self . waiting_coordinator_count ( ) ) with self . _lockw : self . _waiting_transfer_coordinators . append ( transfer_coordinator )
append item to waiting queue
43,254
def free_processed_queue ( self ) : with self . _lock : if len ( self . _processed_coordinators ) > 0 : for _coordinator in self . _processed_coordinators : _coordinator . free_resources ( ) self . _processed_coordinators = [ ]
call the Aspera sdk to freeup resources
43,255
def is_stop ( self ) : if len ( self . _processed_coordinators ) > 0 : self . free_processed_queue ( ) return self . _cancel_called or self . _processing_stop
has either of the stop processing flags been set
43,256
def _process_waiting_queue ( self ) : logger . info ( "Queue processing thread started" ) while not self . is_stop ( ) : self . _processing_event . wait ( 3 ) self . _processing_event . clear ( ) if self . is_stop ( ) : break while self . waiting_coordinator_count ( ) > 0 : if self . is_stop ( ) : break _used_slots = self . tracked_coordinator_count ( True ) _free_slots = self . _config . ascp_max_concurrent - _used_slots if _free_slots <= 0 : break with self . _lockw : _req_slots = self . _waiting_transfer_coordinators [ 0 ] . session_count if _req_slots > _free_slots : break _coordinator = self . _waiting_transfer_coordinators . popleft ( ) self . add_transfer_coordinator ( _coordinator ) if not _coordinator . set_transfer_spec ( ) : self . remove_aspera_coordinator ( _coordinator ) else : logger . info ( "ASCP process queue - Max(%d) InUse(%d) Free(%d) New(%d)" % ( self . _config . ascp_max_concurrent , _used_slots , _free_slots , _req_slots ) ) _coordinator . start_transfer ( ) logger . info ( "Queue processing thread stopped" ) self . _processing_stopped_event . set ( )
thread to processes the waiting queue fetches transfer spec then calls start transfer ensures that max ascp is not exceeded
43,257
def clear_waiting_coordinators ( self , cancel = False ) : with self . _lockw : if cancel : for _coordinator in self . _waiting_transfer_coordinators : _coordinator . notify_cancelled ( "Clear Waiting Queue" , False ) self . _waiting_transfer_coordinators . clear ( )
remove all entries from waiting queue or cancell all in waiting queue
43,258
def cancel ( self , * args , ** kwargs ) : self . _cancel_called = True self . clear_waiting_coordinators ( cancel = True ) super ( AsperaTransferCoordinatorController , self ) . cancel ( * args , ** kwargs )
Cancel all queue items - then attempt to cancel all in progress items
43,259
def wait ( self ) : self . _wait_called = True while self . tracked_coordinator_count ( ) > 0 or self . waiting_coordinator_count ( ) > 0 : time . sleep ( 1 ) super ( AsperaTransferCoordinatorController , self ) . wait ( ) self . _wait_called = False
Wait until all in progress and queued items are processed
43,260
def execute ( self , input_data ) : if ( input_data [ 'meta' ] [ 'type_tag' ] != 'zip' ) : return { 'error' : self . __class__ . __name__ + ': called on ' + input_data [ 'meta' ] [ 'type_tag' ] } view = { } view [ 'payload_md5s' ] = input_data [ 'unzip' ] [ 'payload_md5s' ] view [ 'yara_sigs' ] = input_data [ 'yara_sigs' ] [ 'matches' ] . keys ( ) view . update ( input_data [ 'meta' ] ) view [ 'payload_meta' ] = [ self . workbench . work_request ( 'meta' , md5 ) for md5 in input_data [ 'unzip' ] [ 'payload_md5s' ] ] return view
Execute the ViewZip worker
43,261
def set_exception ( self , exception ) : if not self . is_done ( ) : raise TransferNotDoneError ( 'set_exception can only be called once the transfer is ' 'complete.' ) self . _coordinator . set_exception ( exception , override = True )
Sets the exception on the future .
43,262
def transferReporter ( self , xferId , message ) : if self . is_stopped ( ) : return True _asp_message = AsperaMessage ( message ) if not _asp_message . is_msg_type ( [ enumAsperaMsgType . INIT , enumAsperaMsgType . DONE , enumAsperaMsgType . ERROR , enumAsperaMsgType . FILEERROR , enumAsperaMsgType . STATS ] ) : return _session_id = _asp_message . get_session_id ( ) _msg = self . debug_id ( xferId , _session_id ) + " : " + _asp_message . _msg_type logger . info ( _msg ) with self . _session_lock : if _asp_message . is_msg_type ( [ enumAsperaMsgType . INIT ] ) : assert ( _session_id not in self . _sessions ) _session = AsperaSession ( _session_id ) self . _sessions [ _session_id ] = _session self . notify_init ( ) else : _session = self . _sessions [ _session_id ] if _asp_message . is_msg_type ( [ enumAsperaMsgType . DONE ] ) : if _session . set_bytes_transferred ( _asp_message . get_bytes_transferred ( ) ) : self . notify_progress ( ) _session . set_success ( ) self . notify_done ( ) elif _asp_message . is_msg_type ( [ enumAsperaMsgType . ERROR , enumAsperaMsgType . FILEERROR ] ) : _session . set_error ( _asp_message . get_error_descr ( ) ) self . notify_done ( error = True ) elif _asp_message . is_msg_type ( [ enumAsperaMsgType . STATS ] ) : if _session . set_bytes_transferred ( _asp_message . get_bytes_transferred ( ) ) : self . notify_progress ( )
the callback method used by the Aspera sdk during transfer to notify progress error or successful completion
43,263
def start_transfer ( self ) : try : if not self . is_done ( ) : faspmanager2 . startTransfer ( self . get_transfer_id ( ) , None , self . get_transfer_spec ( ) , self ) except Exception as ex : self . notify_exception ( ex )
pass the transfer spec to the Aspera sdk and start the transfer
43,264
def is_running ( self , is_stopped ) : if is_stopped and self . is_stopped ( ) : return False return faspmanager2 . isRunning ( self . get_transfer_id ( ) )
check whether a transfer is currently running
43,265
def is_stopped ( self , is_stopping = True ) : if is_stopping : return self . _is_stopped or self . _is_stopping return self . _is_stopped
check whether a transfer is stopped or is being stopped
43,266
def free_resources ( self ) : if not self . is_stopped ( ) : logger . info ( "Freeing resources: %s" % self . get_transfer_id ( ) ) self . stop ( True )
call stop to free up resources
43,267
def extract_message_value ( self , name ) : name += ":" assert ( self . _message ) _start = self . _message . find ( name ) if _start >= 0 : _start += len ( name ) + 1 _end = self . _message . find ( "\n" , _start ) _value = self . _message [ _start : _end ] return _value . strip ( ) return None
search message to find and extract a named value
43,268
def set_bytes_transferred ( self , bytes_transferred ) : _changed = False if bytes_transferred : _changed = ( self . _bytes_transferred != int ( bytes_transferred ) ) if _changed : self . _bytes_transferred = int ( bytes_transferred ) logger . debug ( "(%s) BytesTransferred: %d" % ( self . session_id , self . _bytes_transferred ) ) if AsperaSession . PROGRESS_MSGS_SEND_ALL : return True return _changed
set the number of bytes transferred - if it has changed return True
43,269
def set_exception ( self , exception ) : logger . error ( "%s : %s" % ( exception . __class__ . __name__ , str ( exception ) ) ) self . _set_status ( enumAsperaControllerStatus . FAILED , exception )
set the exception message and set the status to failed
43,270
def wait ( self ) : self . _done_event . wait ( MAXINT ) return self . _status , self . _exception
wait for the done event to be set - no timeout
43,271
def result ( self , raise_exception = True ) : _status = None _exception = None self . _done_event . wait ( MAXINT ) if self . is_failed ( ) : _exception = self . _exception _status = enumAsperaControllerStatus . FAILED else : for _session in self . _sessions . values ( ) : _status_tmp , _exception_tmp = _session . wait ( ) if _exception_tmp and not _exception : _exception = _exception_tmp _status = _status_tmp if _exception and raise_exception : raise _exception return _status
Waits until TransferFuture is done and returns the result
43,272
def notify_init ( self ) : _session_count = len ( self . _sessions ) self . _update_session_count ( 1 , _session_count ) if _session_count == 1 : self . _run_queued_callbacks ( )
run the queed callback for just the first session only
43,273
def notify_done ( self , error = False , run_done_callbacks = True ) : if error : for _session in self . _sessions . values ( ) : _session . set_done ( ) self . _session_count = 0 else : self . _update_session_count ( - 1 ) for _session in self . _sessions . values ( ) : if not _session . is_done ( ) : return if run_done_callbacks : self . _run_done_callbacks ( ) self . _done_event . set ( )
if error clear all sessions otherwise check to see if all other sessions are complete then run the done callbacks
43,274
def notify_progress ( self ) : _total = 0 for _session in self . _sessions . values ( ) : _total += _session . bytes_transferred if AsperaSession . PROGRESS_MSGS_SEND_ALL : self . _run_progress_callbacks ( _total ) else : if self . _total_bytes_transferred != _total : self . _total_bytes_transferred = _total self . _run_progress_callbacks ( _total )
only call the progress callback if total has changed or PROGRESS_MSGS_SEND_ALL is set
43,275
def notify_exception ( self , exception , run_done_callbacks = True ) : logger . error ( "%s : %s" % ( exception . __class__ . __name__ , str ( exception ) ) ) self . _exception = exception if self . is_running ( True ) : for _cnt in range ( 0 , 5 ) : if not self . _cancel ( ) : time . sleep ( 1 ) else : break self . notify_done ( error = True , run_done_callbacks = run_done_callbacks )
set the exception message stop transfer if running and set the done event
43,276
def is_success ( self ) : for _session in self . _sessions . values ( ) : if not _session . is_success ( ) : return False return True
check all sessions to see if they have completed successfully
43,277
def set_transfer_spec ( self ) : _ret = False try : self . _args . transfer_spec_func ( self . _args ) _ret = True except Exception as ex : self . notify_exception ( AsperaTransferSpecError ( ex ) , False ) return _ret
run the function to set the transfer spec on error set associated exception
43,278
def add_done_callback ( self , function , ** kwargs ) : with self . _callbacks_lock : _function = functools . partial ( function , ** kwargs ) self . _done_callbacks . append ( _function )
Add a done callback to be invoked when transfer is complete
43,279
def add_subscribers ( self , subscribers , ** kwargs ) : if subscribers : with self . _callbacks_lock : self . _add_subscribers_for_type ( 'done' , subscribers , self . _done_callbacks , ** kwargs ) self . _add_subscribers_for_type ( 'queued' , subscribers , self . _queued_callbacks , ** kwargs ) self . _add_subscribers_for_type ( 'progress' , subscribers , self . _progress_callbacks , ** kwargs )
Add a callbacks to be invoked during transfer
43,280
def _run_progress_callbacks ( self , bytes_transferred ) : if bytes_transferred : for callback in self . _progress_callbacks : try : callback ( bytes_transferred = bytes_transferred ) except Exception as ex : logger . error ( "Exception: %s" % str ( ex ) )
pass the number of bytes process to progress callbacks
43,281
def _run_done_callbacks ( self ) : with self . _callbacks_lock : for callback in self . _done_callbacks : try : callback ( ) except Exception as ex : logger . error ( "Exception: %s" % str ( ex ) ) logger . error ( "Exception raised in %s." % callback , exc_info = True ) self . _done_callbacks = [ ]
Run the callbacks and remove the callbacks from the internal List so they do not get run again if done is notified more than once .
43,282
def total_span ( self ) -> BioCLocation : if not self . locations : raise ValueError ( 'BioCAnnotation must have at least one location' ) start = min ( l . offset for l in self . locations ) end = max ( l . end for l in self . locations ) return BioCLocation ( start , end - start )
The total span of this annotation . Discontinued locations will be merged .
43,283
def get_node ( self , role : str , default = None ) -> BioCNode : return next ( ( node for node in self . nodes if node . role == role ) , default )
Get the first node with role
43,284
def get_sentence ( self , offset : int ) -> BioCSentence or None : for sentence in self . sentences : if sentence . offset == offset : return sentence return None
Gets sentence with specified offset
43,285
def of ( cls , * passages : BioCPassage ) : if len ( passages ) <= 0 : raise ValueError ( "There has to be at least one passage." ) c = BioCDocument ( ) for passage in passages : if passage is None : raise ValueError ( 'Passage is None' ) c . add_passage ( passage ) return c
Returns a collection passages
43,286
def of ( cls , * documents : BioCDocument ) : if len ( documents ) <= 0 : raise ValueError ( "There has to be at least one document." ) c = BioCCollection ( ) for document in documents : if document is None : raise ValueError ( 'Document is None' ) c . add_document ( document ) return c
Returns a collection documents
43,287
def add_it ( workbench , file_list , labels ) : md5s = [ ] for filename in file_list : if filename != '.DS_Store' : with open ( filename , 'rb' ) as pe_file : base_name = os . path . basename ( filename ) md5 = workbench . store_sample ( pe_file . read ( ) , base_name , 'exe' ) workbench . add_node ( md5 , md5 [ : 6 ] , labels ) md5s . append ( md5 ) return md5s
Add the given file_list to workbench as samples also add them as nodes .
43,288
def jaccard_sims ( feature_list ) : sim_info_list = [ ] for feature_info in feature_list : md5_source = feature_info [ 'md5' ] features_source = feature_info [ 'features' ] for feature_info in feature_list : md5_target = feature_info [ 'md5' ] features_target = feature_info [ 'features' ] if md5_source == md5_target : continue sim = jaccard_sim ( features_source , features_target ) if sim > .5 : sim_info_list . append ( { 'source' : md5_source , 'target' : md5_target , 'sim' : sim } ) return sim_info_list
Compute Jaccard similarities between all the observations in the feature list .
43,289
def jaccard_sim ( features1 , features2 ) : set1 = set ( features1 ) set2 = set ( features2 ) try : return len ( set1 . intersection ( set2 ) ) / float ( max ( len ( set1 ) , len ( set2 ) ) ) except ZeroDivisionError : return 0
Compute similarity between two sets using Jaccard similarity .
43,290
def run ( ) : args = client_helper . grab_server_args ( ) workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) workbench . connect ( 'tcp://' + args [ 'server' ] + ':' + args [ 'port' ] ) data_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '../data/pe/bad' ) bad_files = [ os . path . join ( data_path , child ) for child in os . listdir ( data_path ) ] [ : 5 ] data_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '../data/pe/good' ) good_files = [ os . path . join ( data_path , child ) for child in os . listdir ( data_path ) ] [ : 5 ] workbench . clear_graph_db ( ) all_md5s = add_it ( workbench , bad_files , [ 'exe' , 'bad' ] ) + add_it ( workbench , good_files , [ 'exe' , 'good' ] ) sample_set = workbench . store_sample_set ( all_md5s ) import_gen = workbench . set_work_request ( 'pe_features' , sample_set , [ 'md5' , 'sparse_features.imported_symbols' ] ) imports = [ { 'md5' : r [ 'md5' ] , 'features' : r [ 'imported_symbols' ] } for r in import_gen ] warning_gen = workbench . set_work_request ( 'pe_features' , sample_set , [ 'md5' , 'sparse_features.pe_warning_strings' ] ) warnings = [ { 'md5' : r [ 'md5' ] , 'features' : r [ 'pe_warning_strings' ] } for r in warning_gen ] string_gen = workbench . set_work_request ( 'strings' , sample_set , [ 'md5' , 'string_list' ] ) strings = [ { 'md5' : r [ 'md5' ] , 'features' : r [ 'string_list' ] } for r in string_gen ] sims = jaccard_sims ( imports ) for sim_info in sims : workbench . add_rel ( sim_info [ 'source' ] , sim_info [ 'target' ] , 'imports' ) sims = jaccard_sims ( warnings ) for sim_info in sims : workbench . add_rel ( sim_info [ 'source' ] , sim_info [ 'target' ] , 'warnings' ) sims = jaccard_sims ( strings ) for sim_info in sims : workbench . add_rel ( sim_info [ 'source' ] , sim_info [ 'target' ] , 'strings' ) results = workbench . set_work_request ( 'pe_deep_sim' , sample_set ) for result in list ( results ) : for sim_info in result [ 'sim_list' ] : workbench . add_rel ( result [ 'md5' ] , sim_info [ 'md5' ] , 'ssdeep' ) print 'All done: go to http://localhost:7474/browser and execute this query: "%s"' % ( 'match (n)-[r]-() return n,r' )
This client generates a similarity graph from features in PE Files .
43,291
def pad_char ( text : str , width : int , char : str = '\n' ) -> str : dis = width - len ( text ) if dis < 0 : raise ValueError if dis > 0 : text += char * dis return text
Pads a text until length width .
43,292
def pretty_print ( source , dest ) : parser = etree . XMLParser ( remove_blank_text = True ) if not isinstance ( source , str ) : source = str ( source ) tree = etree . parse ( source , parser ) docinfo = tree . docinfo with open ( dest , 'wb' ) as fp : fp . write ( etree . tostring ( tree , pretty_print = True , encoding = docinfo . encoding , standalone = docinfo . standalone ) )
Pretty print the XML file
43,293
def shorten_text ( text : str ) : if len ( text ) <= 40 : text = text else : text = text [ : 17 ] + ' ... ' + text [ - 17 : ] return repr ( text )
Return a short repr of text if it is longer than 40
43,294
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] self . meta [ 'md5' ] = hashlib . md5 ( raw_bytes ) . hexdigest ( ) self . meta [ 'tags' ] = input_data [ 'tags' ] [ 'tags' ] self . meta [ 'type_tag' ] = input_data [ 'sample' ] [ 'type_tag' ] with magic . Magic ( ) as mag : self . meta [ 'file_type' ] = mag . id_buffer ( raw_bytes [ : 1024 ] ) with magic . Magic ( flags = magic . MAGIC_MIME_TYPE ) as mag : self . meta [ 'mime_type' ] = mag . id_buffer ( raw_bytes [ : 1024 ] ) with magic . Magic ( flags = magic . MAGIC_MIME_ENCODING ) as mag : try : self . meta [ 'encoding' ] = mag . id_buffer ( raw_bytes [ : 1024 ] ) except magic . MagicError : self . meta [ 'encoding' ] = 'unknown' self . meta [ 'file_size' ] = len ( raw_bytes ) self . meta [ 'filename' ] = input_data [ 'sample' ] [ 'filename' ] self . meta [ 'import_time' ] = input_data [ 'sample' ] [ 'import_time' ] self . meta [ 'customer' ] = input_data [ 'sample' ] [ 'customer' ] self . meta [ 'length' ] = input_data [ 'sample' ] [ 'length' ] return self . meta
This worker computes meta data for any file type .
43,295
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] strings = self . find_strings . findall ( raw_bytes ) return { 'string_list' : strings }
Execute the Strings worker
43,296
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] try : self . pefile_handle = pefile . PE ( data = raw_bytes , fast_load = False ) except ( AttributeError , pefile . PEFormatError ) , error : return { 'error' : str ( error ) , 'indicator_list' : [ { 'Error' : 'PE module failed!' } ] } indicators = [ ] indicators += [ { 'description' : warn , 'severity' : 2 , 'category' : 'PE_WARN' } for warn in self . pefile_handle . get_warnings ( ) ] check_methods = self . _get_check_methods ( ) for check_method in check_methods : hit_data = check_method ( ) if hit_data : indicators . append ( hit_data ) return { 'indicator_list' : indicators }
Execute the PEIndicators worker
43,297
def check_checksum_mismatch ( self ) : if self . pefile_handle . OPTIONAL_HEADER : if self . pefile_handle . OPTIONAL_HEADER . CheckSum != self . pefile_handle . generate_checksum ( ) : return { 'description' : 'Reported Checksum does not match actual checksum' , 'severity' : 2 , 'category' : 'MALFORMED' } return None
Checking for a checksum that doesn t match the generated checksum
43,298
def check_nonstandard_section_name ( self ) : std_sections = [ '.text' , '.bss' , '.rdata' , '.data' , '.rsrc' , '.edata' , '.idata' , '.pdata' , '.debug' , '.reloc' , '.stab' , '.stabstr' , '.tls' , '.crt' , '.gnu_deb' , '.eh_fram' , '.exptbl' , '.rodata' ] for i in range ( 200 ) : std_sections . append ( '/' + str ( i ) ) non_std_sections = [ ] for section in self . pefile_handle . sections : name = convert_to_ascii_null_term ( section . Name ) . lower ( ) if ( name not in std_sections ) : non_std_sections . append ( name ) if non_std_sections : return { 'description' : 'Section(s) with a non-standard name, tamper indication' , 'severity' : 3 , 'category' : 'MALFORMED' , 'attributes' : non_std_sections } return None
Checking for an non - standard section name
43,299
def check_image_size_incorrect ( self ) : last_virtual_address = 0 last_virtual_size = 0 section_alignment = self . pefile_handle . OPTIONAL_HEADER . SectionAlignment total_image_size = self . pefile_handle . OPTIONAL_HEADER . SizeOfImage for section in self . pefile_handle . sections : if section . VirtualAddress > last_virtual_address : last_virtual_address = section . VirtualAddress last_virtual_size = section . Misc_VirtualSize last_virtual_size += section_alignment - ( last_virtual_size % section_alignment ) if ( last_virtual_address + last_virtual_size ) != total_image_size : return { 'description' : 'Image size does not match reported size' , 'severity' : 3 , 'category' : 'MALFORMED' } return None
Checking if the reported image size matches the actual image size