idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
43,300
def check_section_unaligned ( self ) : file_alignment = self . pefile_handle . OPTIONAL_HEADER . FileAlignment unaligned_sections = [ ] for section in self . pefile_handle . sections : if section . PointerToRawData % file_alignment : unaligned_sections . append ( section . Name ) if unaligned_sections : return { 'description' : 'Unaligned section, tamper indication' , 'severity' : 3 , 'category' : 'MALFORMED' , 'attributes' : unaligned_sections } return None
Checking if any of the sections are unaligned
43,301
def check_section_oversized ( self ) : total_image_size = self . pefile_handle . OPTIONAL_HEADER . SizeOfImage for section in self . pefile_handle . sections : if section . PointerToRawData + section . SizeOfRawData > total_image_size : return { 'description' : 'Oversized section, storing addition data within the PE' , 'severity' : 3 , 'category' : 'MALFORMED' , 'attributes' : section . Name } return None
Checking if any of the sections go past the total size of the image
43,302
def encode_location ( location : BioCLocation ) : return etree . Element ( 'location' , { 'offset' : str ( location . offset ) , 'length' : str ( location . length ) } )
Encode a single location .
43,303
def encode_collection ( collection ) : tree = etree . Element ( 'collection' ) etree . SubElement ( tree , 'source' ) . text = collection . source etree . SubElement ( tree , 'date' ) . text = collection . date etree . SubElement ( tree , 'key' ) . text = collection . key encode_infons ( tree , collection . infons ) for doc in collection . documents : tree . append ( encode_document ( doc ) ) return tree
Encode a single collection .
43,304
def default ( self , obj ) : if isinstance ( obj , BioCDocument ) : return encode_document ( obj ) if isinstance ( obj , BioCCollection ) : return encode_collection ( obj ) raise TypeError ( f'Object of type {obj.__class__.__name__} is not BioC XML serializable' )
Implement this method in a subclass such that it returns a tree for o .
43,305
def write_document ( self , document : BioCDocument ) : tree = self . encoder . encode ( document ) self . __writer . send ( tree )
Encode and write a single document .
43,306
def run ( ) : args = client_helper . grab_server_args ( ) workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) workbench . connect ( 'tcp://' + args [ 'server' ] + ':' + args [ 'port' ] ) all_set = workbench . generate_sample_set ( ) results = workbench . set_work_request ( 'view_customer' , all_set ) for customer in results : print customer [ 'customer' ]
This client generates customer reports on all the samples in workbench .
43,307
def validate ( collection , onerror : Callable [ [ str , List ] , None ] = None ) : BioCValidator ( onerror ) . validate ( collection )
Validate BioC data structure .
43,308
def validate_doc ( self , document : BioCDocument ) : annotations = [ ] annotations . extend ( document . annotations ) annotations . extend ( document . relations ) for passage in document . passages : annotations . extend ( passage . annotations ) annotations . extend ( passage . relations ) for sentence in passage . sentences : annotations . extend ( sentence . annotations ) annotations . extend ( sentence . relations ) self . current_docid = document . id self . traceback . append ( document ) text = self . __get_doc_text ( document ) self . __validate_ann ( document . annotations , text , 0 ) self . __validate_rel ( annotations , document . relations , f'document {document.id}' ) for passage in document . passages : self . traceback . append ( passage ) text = self . __get_passage_text ( passage ) self . __validate_ann ( passage . annotations , text , passage . offset ) self . __validate_rel ( annotations , passage . relations , f'document {document.id} ) for sentence in passage . sentences : self . traceback . append ( sentence ) self . __validate_ann ( sentence . annotations , sentence . text , sentence . offset ) self . __validate_rel ( annotations , sentence . relations , f'document {document.id} ) self . traceback . pop ( ) self . traceback . pop ( ) self . traceback . pop ( )
Validate a single document .
43,309
def validate ( self , collection : BioCCollection ) : for document in collection . documents : self . validate_doc ( document )
Validate a single collection .
43,310
def run ( ) : args = client_helper . grab_server_args ( ) workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) workbench . connect ( 'tcp://' + args [ 'server' ] + ':' + args [ 'port' ] ) data_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '../data/pe/bad' ) file_list = [ os . path . join ( data_path , child ) for child in os . listdir ( data_path ) ] [ : 20 ] for filename in file_list : if '.DS_Store' in filename : continue with open ( filename , 'rb' ) as f : base_name = os . path . basename ( filename ) md5 = workbench . store_sample ( f . read ( ) , base_name , 'exe' ) workbench . index_worker_output ( 'strings' , md5 , 'strings' , None ) print '\n<<< Strings for PE: %s Indexed>>>' % ( base_name ) workbench . index_worker_output ( 'pe_features' , md5 , 'pe_features' , None ) print '<<< Features for PE: %s Indexed>>>' % ( base_name ) facet_query = '{"facets" : {"tag" : {"terms" : {"field" : "string_list"}}}}' results = workbench . search_index ( 'strings' , facet_query ) try : print '\nQuery: %s' % facet_query print 'Number of hits: %d' % results [ 'hits' ] [ 'total' ] print 'Max Score: %f' % results [ 'hits' ] [ 'max_score' ] pprint . pprint ( results [ 'facets' ] ) except TypeError : print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme' fuzzy_query = '{"fields":["md5","sparse_features.imported_symbols"],' '"query": {"fuzzy" : {"sparse_features.imported_symbols" : "loadlibrary"}}}' results = workbench . search_index ( 'pe_features' , fuzzy_query ) try : print '\nQuery: %s' % fuzzy_query print 'Number of hits: %d' % results [ 'hits' ] [ 'total' ] print 'Max Score: %f' % results [ 'hits' ] [ 'max_score' ] pprint . pprint ( [ ( hit [ 'fields' ] [ 'md5' ] , hit [ 'fields' ] [ 'sparse_features.imported_symbols' ] ) for hit in results [ 'hits' ] [ 'hits' ] ] ) except TypeError : print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme'
This client pushes PE Files - > ELS Indexer .
43,311
def convert_to_utf8 ( string ) : if ( isinstance ( string , unicode ) ) : return string . encode ( 'utf-8' ) try : u = unicode ( string , 'utf-8' ) except TypeError : return str ( string ) utf8 = u . encode ( 'utf-8' ) return utf8
Convert string to UTF8
43,312
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] pefile_handle , error_str = self . open_using_pefile ( 'unknown' , raw_bytes ) if not pefile_handle : return { 'error' : error_str , 'dense_features' : [ ] , 'sparse_features' : [ ] } dense_features , sparse_features = self . extract_features_using_pefile ( pefile_handle ) return { 'dense_features' : dense_features , 'sparse_features' : sparse_features , 'tags' : input_data [ 'tags' ] [ 'tags' ] }
Process the input bytes with pefile
43,313
def open_using_pefile ( input_name , input_bytes ) : try : pef = pefile . PE ( data = input_bytes , fast_load = False ) except ( AttributeError , pefile . PEFormatError ) , error : print 'warning: pe_fail (with exception from pefile module) on file: %s' % input_name error_str = '(Exception):, %s' % ( str ( error ) ) return None , error_str if pef . PE_TYPE is None or pef . OPTIONAL_HEADER is None or len ( pef . OPTIONAL_HEADER . DATA_DIRECTORY ) < 7 : print 'warning: pe_fail on file: %s' % input_name error_str = 'warning: pe_fail on file: %s' % input_name return None , error_str return pef , None
Open the PE File using the Python pefile module .
43,314
def read_log ( self , logfile ) : logfile . seek ( 0 ) field_names , _ = self . _parse_bro_header ( logfile ) while 1 : _line = next ( logfile ) . strip ( ) if not _line . startswith ( '#close' ) : yield self . _cast_dict ( dict ( zip ( field_names , _line . split ( self . delimiter ) ) ) ) else : time . sleep ( .1 ) break
The read_log method returns a memory efficient generator for rows in a Bro log .
43,315
def _parse_bro_header ( self , logfile ) : _line = next ( logfile ) while ( not _line . startswith ( '#fields' ) ) : _line = next ( logfile ) _field_names = _line . strip ( ) . split ( self . delimiter ) [ 1 : ] _line = next ( logfile ) _field_types = _line . strip ( ) . split ( self . delimiter ) [ 1 : ] return _field_names , _field_types
This method tries to parse the Bro log header section .
43,316
def _cast_dict ( self , data_dict ) : for key , value in data_dict . iteritems ( ) : data_dict [ key ] = self . _cast_value ( value ) if 'resp_body_data' in data_dict : del data_dict [ 'resp_body_data' ] return data_dict
Internal method that makes sure any dictionary elements are properly cast into the correct types instead of just treating everything like a string from the csv file .
43,317
def _cast_value ( self , value ) : if ( self . convert_datetimes ) : try : date_time = datetime . datetime . fromtimestamp ( float ( value ) ) if datetime . datetime ( 1970 , 1 , 1 ) > date_time : raise ValueError else : return date_time except ValueError : pass tests = ( int , float , str ) for test in tests : try : return test ( value ) except ValueError : continue return value
Internal method that makes sure every value in dictionary is properly cast into the correct types instead of just treating everything like a string from the csv file .
43,318
def run ( ) : args = client_helper . grab_server_args ( ) workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) workbench . connect ( 'tcp://' + args [ 'server' ] + ':' + args [ 'port' ] ) data_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '../data/zip' ) file_list = [ os . path . join ( data_path , child ) for child in os . listdir ( data_path ) ] for filename in file_list : with open ( filename , 'rb' ) as f : base_name = os . path . basename ( filename ) md5 = workbench . store_sample ( f . read ( ) , base_name , 'zip' ) results = workbench . work_request ( 'view' , md5 ) print 'Filename: %s ' % ( base_name ) pprint . pprint ( results ) results = workbench . work_request ( 'unzip' , md5 ) print '\n*** Filename: %s ***' % ( base_name ) for child_md5 in results [ 'unzip' ] [ 'payload_md5s' ] : pprint . pprint ( workbench . work_request ( 'meta' , child_md5 ) )
This client shows workbench extacting files from a zip file .
43,319
def execute ( self , input_data ) : md5 = input_data [ 'meta' ] [ 'md5' ] response = requests . get ( 'http://www.virustotal.com/vtapi/v2/file/report' , params = { 'apikey' : self . apikey , 'resource' : md5 , 'allinfo' : 1 } ) try : vt_output = response . json ( ) except ValueError : return { 'vt_error' : 'VirusTotal Query Error, no valid response... past per min quota?' } output = { field : vt_output [ field ] for field in vt_output . keys ( ) if field not in self . exclude } not_found = False if output else True output [ 'file_type' ] = input_data [ 'meta' ] [ 'file_type' ] if not_found : output [ 'not_found' ] = True return output scan_results = collections . Counter ( ) for scan in vt_output [ 'scans' ] . values ( ) : if 'result' in scan : if scan [ 'result' ] : scan_results [ scan [ 'result' ] ] += 1 output [ 'scan_results' ] = scan_results . most_common ( 5 ) return output
Execute the VTQuery worker
43,320
def get_peid_db ( ) : my_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) db_path = os . path . join ( my_dir , 'peid_userdb.txt' ) if not os . path . exists ( db_path ) : raise RuntimeError ( 'peid could not find peid_userdb.txt under: %s' % db_path ) signatures = peutils . SignatureDatabase ( data = open ( db_path , 'rb' ) . read ( ) ) return signatures
Grab the peid_userdb . txt file from local disk
43,321
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] try : pefile_handle = pefile . PE ( data = raw_bytes , fast_load = False ) except ( AttributeError , pefile . PEFormatError ) , error : return { 'error' : str ( error ) , 'match_list' : [ ] } peid_match = self . peid_features ( pefile_handle ) return { 'match_list' : peid_match }
Execute the PEIDWorker
43,322
def peid_features ( self , pefile_handle ) : peid_match = self . peid_sigs . match ( pefile_handle ) return peid_match if peid_match else [ ]
Get features from PEid signature database
43,323
def run ( ) : global WORKBENCH args = client_helper . grab_server_args ( ) WORKBENCH = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) WORKBENCH . connect ( 'tcp://' + args [ 'server' ] + ':' + args [ 'port' ] ) data_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '../data/pcap' ) file_list = [ os . path . join ( data_path , child ) for child in os . listdir ( data_path ) ] results = [ ] for filename in file_list : if '.DS_Store' in filename : continue with open ( filename , 'rb' ) as f : md5 = WORKBENCH . store_sample ( f . read ( ) , filename , 'pcap' ) result = WORKBENCH . work_request ( 'view_pcap' , md5 ) result . update ( WORKBENCH . work_request ( 'meta' , result [ 'view_pcap' ] [ 'md5' ] ) ) result [ 'filename' ] = result [ 'meta' ] [ 'filename' ] . split ( '/' ) [ - 1 ] results . append ( result ) return results
This client pulls PCAP files for building report .
43,324
def show_files ( md5 ) : if not WORKBENCH : return flask . redirect ( '/' ) md5_view = WORKBENCH . work_request ( 'view' , md5 ) return flask . render_template ( 'templates/md5_view.html' , md5_view = md5_view [ 'view' ] , md5 = md5 )
Renders template with view of the md5 .
43,325
def show_md5_view ( md5 ) : if not WORKBENCH : return flask . redirect ( '/' ) md5_view = WORKBENCH . stream_sample ( md5 ) return flask . render_template ( 'templates/md5_view.html' , md5_view = list ( md5_view ) , md5 = md5 )
Renders template with stream_sample of the md5 .
43,326
def execute ( self , input_data ) : if 'sample' in input_data : print 'Warning: PEFeaturesDF is supposed to be called on a sample_set' self . samples . append ( input_data [ 'sample' ] [ 'md5' ] ) else : self . samples = input_data [ 'sample_set' ] [ 'md5_list' ] sample_set = self . workbench . store_sample_set ( self . samples ) dense_features = self . workbench . set_work_request ( 'pe_features' , sample_set , [ 'md5' , 'tags' , 'dense_features' ] ) flat_features = [ ] for feat in dense_features : feat [ 'dense_features' ] . update ( { 'md5' : feat [ 'md5' ] , 'tags' : feat [ 'tags' ] } ) flat_features . append ( feat [ 'dense_features' ] ) dense_df = pd . DataFrame ( flat_features ) df_packed = dense_df . to_msgpack ( ) dense_df_md5 = self . workbench . store_sample ( df_packed , 'pe_features_dense_df' , 'dataframe' ) sparse_features = self . workbench . set_work_request ( 'pe_features' , sample_set , [ 'md5' , 'tags' , 'sparse_features' ] ) flat_features = [ ] for feat in sparse_features : feat [ 'sparse_features' ] . update ( { 'md5' : feat [ 'md5' ] , 'tags' : feat [ 'tags' ] } ) flat_features . append ( feat [ 'sparse_features' ] ) sparse_df = pd . DataFrame ( flat_features ) df_packed = sparse_df . to_msgpack ( ) sparse_df_md5 = self . workbench . store_sample ( df_packed , 'pe_features_sparse_df' , 'dataframe' ) return { 'dense_features' : dense_df_md5 , 'sparse_features' : sparse_df_md5 }
This worker puts the output of pe_features into a dictionary of dataframes
43,327
def add_node ( self , node_id , name , labels ) : node = self . graph_db . get_or_create_indexed_node ( 'Node' , 'node_id' , node_id , { 'node_id' : node_id , 'name' : name } ) try : node . add_labels ( * labels ) except NotImplementedError : pass
Add the node with name and labels .
43,328
def add_rel ( self , source_node_id , target_node_id , rel ) : n1_ref = self . graph_db . get_indexed_node ( 'Node' , 'node_id' , source_node_id ) n2_ref = self . graph_db . get_indexed_node ( 'Node' , 'node_id' , target_node_id ) if not n1_ref or not n2_ref : print 'Cannot add relationship between unfound nodes: %s % ( source_node_id , target_node_id ) return path = neo4j . Path ( n1_ref , rel , n2_ref ) path . get_or_create ( self . graph_db )
Add a relationship between nodes .
43,329
def execute ( self , input_data ) : if ( input_data [ 'meta' ] [ 'type_tag' ] != 'exe' ) : return { 'error' : self . __class__ . __name__ + ': called on ' + input_data [ 'meta' ] [ 'type_tag' ] } view = { } view [ 'indicators' ] = list ( set ( [ item [ 'category' ] for item in input_data [ 'pe_indicators' ] [ 'indicator_list' ] ] ) ) view [ 'peid_matches' ] = input_data [ 'pe_peid' ] [ 'match_list' ] view [ 'yara_sigs' ] = input_data [ 'yara_sigs' ] [ 'matches' ] . keys ( ) view [ 'classification' ] = input_data [ 'pe_classifier' ] [ 'classification' ] view [ 'disass' ] = self . safe_get ( input_data , [ 'pe_disass' , 'decode' ] ) [ : 15 ] view . update ( input_data [ 'meta' ] ) return view
Execute the ViewPE worker
43,330
def safe_get ( data , key_list ) : for key in key_list : data = data . get ( key , { } ) return data if data else 'plugin_failed'
Safely access dictionary keys when plugin may have failed
43,331
def execute ( self ) : self . temp_dir = tempfile . mkdtemp ( ) os . chdir ( self . temp_dir ) DirWatcher ( self . temp_dir , self . file_created ) self . subprocess_manager ( self . tcpdump_cmd )
Begin capturing PCAPs and sending them to workbench
43,332
def file_created ( self , filepath ) : if self . on_deck : self . store_file ( self . on_deck ) os . remove ( self . on_deck ) self . on_deck = filepath
File created callback
43,333
def store_file ( self , filename ) : self . workbench = zerorpc . Client ( timeout = 300 , heartbeat = 60 ) self . workbench . connect ( "tcp://127.0.0.1:4242" ) storage_name = "streaming_pcap" + str ( self . pcap_index ) print filename , storage_name with open ( filename , 'rb' ) as f : self . workbench . store_sample ( f . read ( ) , storage_name , 'pcap' ) self . pcap_index += 1 self . workbench . close ( )
Store a file into workbench
43,334
def parse_eprocess ( self , eprocess_data ) : Name = eprocess_data [ '_EPROCESS' ] [ 'Cybox' ] [ 'Name' ] PID = eprocess_data [ '_EPROCESS' ] [ 'Cybox' ] [ 'PID' ] PPID = eprocess_data [ '_EPROCESS' ] [ 'Cybox' ] [ 'Parent_PID' ] return { 'Name' : Name , 'PID' : PID , 'PPID' : PPID }
Parse the EProcess object we get from some rekall output
43,335
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] zipfile_output = zipfile . ZipFile ( StringIO ( raw_bytes ) ) payload_md5s = [ ] for name in zipfile_output . namelist ( ) : filename = os . path . basename ( name ) payload_md5s . append ( self . workbench . store_sample ( zipfile_output . read ( name ) , name , 'unknown' ) ) return { 'payload_md5s' : payload_md5s }
Execute the Unzip worker
43,336
def help_cli ( self ) : help = '%sWelcome to Workbench CLI Help:%s' % ( color . Yellow , color . Normal ) help += '\n\t%s> help cli_basic %s for getting started help' % ( color . Green , color . LightBlue ) help += '\n\t%s> help workers %s for help on available workers' % ( color . Green , color . LightBlue ) help += '\n\t%s> help search %s for help on searching samples' % ( color . Green , color . LightBlue ) help += '\n\t%s> help dataframe %s for help on making dataframes' % ( color . Green , color . LightBlue ) help += '\n\t%s> help commands %s for help on workbench commands' % ( color . Green , color . LightBlue ) help += '\n\t%s> help topic %s where topic can be a help, command or worker' % ( color . Green , color . LightBlue ) help += '\n\n%sNote: cli commands are transformed into python calls' % ( color . Yellow ) help += '\n\t%s> help cli_basic % ( color . Green , color . Normal ) return help
Help on Workbench CLI
43,337
def help_cli_basic ( self ) : help = '%sWorkbench: Getting started...' % ( color . Yellow ) help += '\n%sLoad in a sample:' % ( color . Green ) help += '\n\t%s> load_sample /path/to/file' % ( color . LightBlue ) help += '\n\n%sNotice the prompt now shows the md5 of the sample...' % ( color . Yellow ) help += '\n%sRun workers on the sample:' % ( color . Green ) help += '\n\t%s> view' % ( color . LightBlue ) help += '\n%sType the \'help workers\' or the first part of the worker <tab>...' % ( color . Green ) help += '\n\t%s> help workers (lists all possible workers)' % ( color . LightBlue ) help += '\n\t%s> pe_<tab> (will give you pe_classifier, pe_deep_sim, pe_features, pe_indicators, pe_peid)%s' % ( color . LightBlue , color . Normal ) return help
Help for Workbench CLI Basics
43,338
def help_cli_search ( self ) : help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % ( color . Yellow , color . Green ) help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % ( color . Green ) help += '\n\t%sthis command returns the sample_set containing the matching items' % ( color . Green ) help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % ( color . LightBlue ) help += '\n\n\t%sRun workers on this sample_set:' % ( color . Green ) help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % ( color . LightBlue , color . Normal ) help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % ( color . Green ) help += '\n\t%s> for output in pe_outputs: %s' % ( color . LightBlue , color . Normal ) help += '\n\t\t%s print output %s' % ( color . LightBlue , color . Normal ) return help
Help for Workbench CLI Search
43,339
def execute ( self , input_data ) : view = input_data [ 'view_pcap' ] extracted_files = input_data [ 'view_pcap' ] [ 'extracted_files' ] del view [ 'extracted_files' ] view [ 'extracted_files' ] = [ self . workbench . work_request ( 'meta_deep' , md5 , [ 'md5' , 'sha256' , 'entropy' , 'ssdeep' , 'file_size' , 'file_type' ] ) for md5 in extracted_files ] return view
ViewPcapDeep execute method
43,340
def parse_collection ( obj : dict ) -> BioCCollection : collection = BioCCollection ( ) collection . source = obj [ 'source' ] collection . date = obj [ 'date' ] collection . key = obj [ 'key' ] collection . infons = obj [ 'infons' ] for doc in obj [ 'documents' ] : collection . add_document ( parse_doc ( doc ) ) return collection
Deserialize a dict obj to a BioCCollection object
43,341
def parse_annotation ( obj : dict ) -> BioCAnnotation : ann = BioCAnnotation ( ) ann . id = obj [ 'id' ] ann . infons = obj [ 'infons' ] ann . text = obj [ 'text' ] for loc in obj [ 'locations' ] : ann . add_location ( BioCLocation ( loc [ 'offset' ] , loc [ 'length' ] ) ) return ann
Deserialize a dict obj to a BioCAnnotation object
43,342
def parse_relation ( obj : dict ) -> BioCRelation : rel = BioCRelation ( ) rel . id = obj [ 'id' ] rel . infons = obj [ 'infons' ] for node in obj [ 'nodes' ] : rel . add_node ( BioCNode ( node [ 'refid' ] , node [ 'role' ] ) ) return rel
Deserialize a dict obj to a BioCRelation object
43,343
def parse_sentence ( obj : dict ) -> BioCSentence : sentence = BioCSentence ( ) sentence . offset = obj [ 'offset' ] sentence . infons = obj [ 'infons' ] sentence . text = obj [ 'text' ] for annotation in obj [ 'annotations' ] : sentence . add_annotation ( parse_annotation ( annotation ) ) for relation in obj [ 'relations' ] : sentence . add_relation ( parse_relation ( relation ) ) return sentence
Deserialize a dict obj to a BioCSentence object
43,344
def parse_passage ( obj : dict ) -> BioCPassage : passage = BioCPassage ( ) passage . offset = obj [ 'offset' ] passage . infons = obj [ 'infons' ] if 'text' in obj : passage . text = obj [ 'text' ] for sentence in obj [ 'sentences' ] : passage . add_sentence ( parse_sentence ( sentence ) ) for annotation in obj [ 'annotations' ] : passage . add_annotation ( parse_annotation ( annotation ) ) for relation in obj [ 'relations' ] : passage . add_relation ( parse_relation ( relation ) ) return passage
Deserialize a dict obj to a BioCPassage object
43,345
def parse_doc ( obj : dict ) -> BioCDocument : doc = BioCDocument ( ) doc . id = obj [ 'id' ] doc . infons = obj [ 'infons' ] for passage in obj [ 'passages' ] : doc . add_passage ( parse_passage ( passage ) ) for annotation in obj [ 'annotations' ] : doc . add_annotation ( parse_annotation ( annotation ) ) for relation in obj [ 'relations' ] : doc . add_relation ( parse_relation ( relation ) ) return doc
Deserialize a dict obj to a BioCDocument object
43,346
def execute ( self , input_data ) : bro_logs = input_data [ 'pcap_bro' ] if 'weird_log' in bro_logs : stream = self . workbench . stream_sample ( bro_logs [ 'weird_log' ] ) self . weird_log_graph ( stream ) gsleep ( ) stream = self . workbench . stream_sample ( bro_logs [ 'http_log' ] ) self . http_log_graph ( stream ) gsleep ( ) stream = self . workbench . stream_sample ( bro_logs [ 'files_log' ] ) self . files_log_graph ( stream ) return { 'output' : 'go to http://localhost:7474/browser and execute this query "match (s:origin), (t:file), p=allShortestPaths((s)--(t)) return p"' }
Okay this worker is going build graphs from PCAP Bro output logs
43,347
def register_callbacks ( self , on_create , on_modify , on_delete ) : self . on_create = on_create self . on_modify = on_modify self . on_delete = on_delete
Register callbacks for file creation modification and deletion
43,348
def _start_monitoring ( self ) : before = self . _file_timestamp_info ( self . path ) while True : gevent . sleep ( 1 ) after = self . _file_timestamp_info ( self . path ) added = [ fname for fname in after . keys ( ) if fname not in before . keys ( ) ] removed = [ fname for fname in before . keys ( ) if fname not in after . keys ( ) ] modified = [ ] for fname in before . keys ( ) : if fname not in removed : if os . path . getmtime ( fname ) != before . get ( fname ) : modified . append ( fname ) if added : self . on_create ( added ) if removed : self . on_delete ( removed ) if modified : self . on_modify ( modified ) before = after
Internal method that monitors the directory for changes
43,349
def _file_timestamp_info ( self , path ) : files = [ os . path . join ( path , fname ) for fname in os . listdir ( path ) if '.py' in fname ] return dict ( [ ( fname , os . path . getmtime ( fname ) ) for fname in files ] )
Grab all the timestamps for the files in the directory
43,350
def execute ( self , input_data ) : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] matches = self . rules . match_data ( raw_bytes ) flat_data = collections . defaultdict ( list ) for filename , match_list in matches . iteritems ( ) : for match in match_list : if 'description' in match [ 'meta' ] : new_tag = filename + '_' + match [ 'meta' ] [ 'description' ] else : new_tag = filename + '_' + match [ 'rule' ] for match in match [ 'strings' ] : flat_data [ new_tag ] . append ( match [ 'data' ] ) flat_data [ new_tag ] = list ( set ( flat_data [ new_tag ] ) ) return { 'matches' : flat_data }
yara worker execute method
43,351
def chunks ( data , chunk_size ) : for i in xrange ( 0 , len ( data ) , chunk_size ) : yield data [ i : i + chunk_size ]
Yield chunk_size chunks from data .
43,352
def setup_pcap_inputs ( self , input_data ) : file_list = [ ] if 'sample' in input_data : raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ] filename = os . path . basename ( input_data [ 'sample' ] [ 'filename' ] ) file_list . append ( { 'filename' : filename , 'bytes' : raw_bytes } ) else : for md5 in input_data [ 'sample_set' ] [ 'md5_list' ] : sample = self . workbench . get_sample ( md5 ) [ 'sample' ] raw_bytes = sample [ 'raw_bytes' ] filename = os . path . basename ( sample [ 'filename' ] ) file_list . append ( { 'filename' : filename , 'bytes' : raw_bytes } ) for file_info in file_list : with open ( file_info [ 'filename' ] , 'wb' ) as pcap_file : pcap_file . write ( file_info [ 'bytes' ] ) return [ file_info [ 'filename' ] for file_info in file_list ]
Write the PCAPs to disk for Bro to process and return the pcap filenames
43,353
def subprocess_manager ( self , exec_args ) : try : sp = gevent . subprocess . Popen ( exec_args , stdout = gevent . subprocess . PIPE , stderr = gevent . subprocess . PIPE ) except OSError : raise RuntimeError ( 'Could not run bro executable (either not installed or not in path): %s' % ( exec_args ) ) out , err = sp . communicate ( ) if out : print 'standard output of subprocess: %s' % out if err : raise RuntimeError ( '%s\npcap_bro had output on stderr: %s' % ( exec_args , err ) ) if sp . returncode : raise RuntimeError ( '%s\npcap_bro had returncode: %d' % ( exec_args , sp . returncode ) )
Bro subprocess manager
43,354
def getDirectory ( rh ) : rh . printSysLog ( "Enter getVM.getDirectory" ) parms = [ "-T" , rh . userid ] results = invokeSMCLI ( rh , "Image_Query_DM" , parms ) if results [ 'overallRC' ] == 0 : results [ 'response' ] = re . sub ( '\*DVHOPT.*' , '' , results [ 'response' ] ) rh . printLn ( "N" , results [ 'response' ] ) else : rh . printLn ( "ES" , results [ 'response' ] ) rh . updateResults ( results ) rh . printSysLog ( "Exit getVM.getDirectory, rc: " + str ( rh . results [ 'overallRC' ] ) ) return rh . results [ 'overallRC' ]
Get the virtual machine s directory statements .
43,355
def getStatus ( rh ) : rh . printSysLog ( "Enter getVM.getStatus, userid: " + rh . userid ) results = isLoggedOn ( rh , rh . userid ) if results [ 'rc' ] != 0 : rh . updateResults ( results ) rh . printSysLog ( "Exit getVM.getStatus, rc: " + str ( rh . results [ 'overallRC' ] ) ) return rh . results [ 'overallRC' ] if results [ 'rs' ] == 1 : powerStr = "Power state: off" memStr = "Total Memory: 0M" usedMemStr = "Used Memory: 0M" procStr = "Processors: 0" timeStr = "CPU Used Time: 0 sec" else : powerStr = "Power state: on" if 'power' in rh . parms : rh . printLn ( "N" , powerStr ) rh . updateResults ( results ) rh . printSysLog ( "Exit getVM.getStatus, rc: " + str ( rh . results [ 'overallRC' ] ) ) return rh . results [ 'overallRC' ] if results [ 'rs' ] != 1 : results = getPerfInfo ( rh , rh . userid ) if results [ 'overallRC' ] != 0 : rh . updateResults ( results ) rh . printSysLog ( "Exit getVM.getStatus, rc: " + str ( rh . results [ 'overallRC' ] ) ) return rh . results [ 'overallRC' ] else : memStr = results [ 'response' ] . split ( "\n" ) [ 0 ] usedMemStr = results [ 'response' ] . split ( "\n" ) [ 1 ] procStr = results [ 'response' ] . split ( "\n" ) [ 2 ] timeStr = results [ 'response' ] . split ( "\n" ) [ 3 ] if 'memory' in rh . parms : outStr = memStr + "\n" + usedMemStr elif 'cpu' in rh . parms : outStr = procStr + "\n" + timeStr else : outStr = powerStr + "\n" + memStr + "\n" + usedMemStr outStr += "\n" + procStr + "\n" + timeStr rh . printLn ( "N" , outStr ) rh . printSysLog ( "Exit getVM.getStatus, rc: " + str ( rh . results [ 'overallRC' ] ) ) return rh . results [ 'overallRC' ]
Get the basic status of a virtual machine .
43,356
def fcpinfo ( rh ) : rh . printSysLog ( "Enter changeVM.dedicate" ) parms = [ "-T" , rh . userid ] hideList = [ ] results = invokeSMCLI ( rh , "System_WWPN_Query" , parms , hideInLog = hideList ) if results [ 'overallRC' ] != 0 : rh . printLn ( "ES" , results [ 'response' ] ) rh . updateResults ( results ) if results [ 'overallRC' ] == 0 : ret = extract_fcp_data ( results [ 'response' ] , rh . parms [ 'status' ] ) rh . printLn ( "N" , ret ) else : rh . printLn ( "ES" , results [ 'response' ] ) rh . updateResults ( results ) return rh . results [ 'overallRC' ]
Get fcp info and filter by the status .
43,357
def _get_variable_names ( arr ) : if VARIABLELABEL in arr . dims : return arr . coords [ VARIABLELABEL ] . tolist ( ) else : return arr . name
Return the variable names of an array
43,358
def setup_coords ( arr_names = None , sort = [ ] , dims = { } , ** kwargs ) : try : return OrderedDict ( arr_names ) except ( ValueError , TypeError ) : pass if arr_names is None : arr_names = repeat ( 'arr{0}' ) elif isstring ( arr_names ) : arr_names = repeat ( arr_names ) dims = OrderedDict ( dims ) for key , val in six . iteritems ( kwargs ) : dims . setdefault ( key , val ) sorted_dims = OrderedDict ( ) if sort : for key in sort : sorted_dims [ key ] = dims . pop ( key ) for key , val in six . iteritems ( dims ) : sorted_dims [ key ] = val else : if 'name' in dims : sorted_dims [ 'name' ] = None for key , val in sorted ( dims . items ( ) ) : sorted_dims [ key ] = val for key , val in six . iteritems ( kwargs ) : sorted_dims . setdefault ( key , val ) for key , val in six . iteritems ( sorted_dims ) : sorted_dims [ key ] = iter ( safe_list ( val ) ) return OrderedDict ( [ ( arr_name . format ( i ) , dict ( zip ( sorted_dims . keys ( ) , dim_tuple ) ) ) for i , ( arr_name , dim_tuple ) in enumerate ( zip ( arr_names , product ( * map ( list , sorted_dims . values ( ) ) ) ) ) ] )
Sets up the arr_names dictionary for the plot
43,359
def to_slice ( arr ) : if isinstance ( arr , slice ) : return arr if len ( arr ) == 1 : return slice ( arr [ 0 ] , arr [ 0 ] + 1 ) step = np . unique ( arr [ 1 : ] - arr [ : - 1 ] ) if len ( step ) == 1 : return slice ( arr [ 0 ] , arr [ - 1 ] + step [ 0 ] , step [ 0 ] )
Test whether arr is an integer array that can be replaced by a slice
43,360
def get_index_from_coord ( coord , base_index ) : try : values = coord . values except AttributeError : values = coord if values . ndim == 0 : return base_index . get_loc ( values [ ( ) ] ) if len ( values ) == len ( base_index ) and ( values == base_index ) . all ( ) : return slice ( None ) values = np . array ( list ( map ( lambda i : base_index . get_loc ( i ) , values ) ) ) return to_slice ( values ) or values
Function to return the coordinate as integer integer array or slice
43,361
def get_tdata ( t_format , files ) : def median ( arr ) : return arr . min ( ) + ( arr . max ( ) - arr . min ( ) ) / 2 import re from pandas import Index t_pattern = t_format for fmt , patt in t_patterns . items ( ) : t_pattern = t_pattern . replace ( fmt , patt ) t_pattern = re . compile ( t_pattern ) time = list ( range ( len ( files ) ) ) for i , f in enumerate ( files ) : time [ i ] = median ( np . array ( list ( map ( lambda s : np . datetime64 ( dt . datetime . strptime ( s , t_format ) ) , t_pattern . findall ( f ) ) ) ) ) ind = np . argsort ( time ) files = np . array ( files ) [ ind ] time = np . array ( time ) [ ind ] return to_datetime ( Index ( time , name = 'time' ) ) , files
Get the time information from file names
43,362
def to_netcdf ( ds , * args , ** kwargs ) : to_update = { } for v , obj in six . iteritems ( ds . variables ) : units = obj . attrs . get ( 'units' , obj . encoding . get ( 'units' , None ) ) if units == 'day as %Y%m%d.%f' and np . issubdtype ( obj . dtype , np . datetime64 ) : to_update [ v ] = xr . Variable ( obj . dims , AbsoluteTimeEncoder ( obj ) , attrs = obj . attrs . copy ( ) , encoding = obj . encoding ) to_update [ v ] . attrs [ 'units' ] = units if to_update : ds = ds . copy ( ) ds . update ( to_update ) return xarray_api . to_netcdf ( ds , * args , ** kwargs )
Store the given dataset as a netCDF file
43,363
def _get_fname_nio ( store ) : try : f = store . ds . file except AttributeError : return None try : return f . path except AttributeError : return None
Try to get the file name from the NioDataStore store
43,364
def get_filename_ds ( ds , dump = True , paths = None , ** kwargs ) : from tempfile import NamedTemporaryFile if ds . psy . _filename is not None : return tuple ( [ ds . psy . _filename ] + list ( ds . psy . data_store ) ) def dump_nc ( ) : if xr_version < ( 0 , 11 ) : kwargs . setdefault ( 'writer' , xarray_api . ArrayWriter ( ) ) store = to_netcdf ( ds , fname , ** kwargs ) else : kwargs . setdefault ( 'multifile' , True ) store = to_netcdf ( ds , fname , ** kwargs ) [ 1 ] store_mod = store . __module__ store_cls = store . __class__ . __name__ ds . _file_obj = store return store_mod , store_cls def tmp_it ( ) : while True : yield NamedTemporaryFile ( suffix = '.nc' ) . name fname = None if paths is True or ( dump and paths is None ) : paths = tmp_it ( ) elif paths is not None : if isstring ( paths ) : paths = iter ( [ paths ] ) else : paths = iter ( paths ) store_mod , store_cls = ds . psy . data_store if store_mod is not None : store = ds . _file_obj if hasattr ( store , 'file_objs' ) : fname = [ ] store_mod = [ ] store_cls = [ ] for obj in store . file_objs : _fname = None for func in get_fname_funcs : if _fname is None : _fname = func ( obj ) if _fname is not None : fname . append ( _fname ) store_mod . append ( obj . __module__ ) store_cls . append ( obj . __class__ . __name__ ) fname = tuple ( fname ) store_mod = tuple ( store_mod ) store_cls = tuple ( store_cls ) else : for func in get_fname_funcs : fname = func ( store ) if fname is not None : break if fname is None and paths is not None : fname = next ( paths , None ) if dump and fname is not None : store_mod , store_cls = dump_nc ( ) ds . psy . filename = fname ds . psy . data_store = ( store_mod , store_cls ) return fname , store_mod , store_cls
Return the filename of the corresponding to a dataset
43,365
def _open_ds_from_store ( fname , store_mod = None , store_cls = None , ** kwargs ) : if isinstance ( fname , xr . Dataset ) : return fname if not isstring ( fname ) : try : fname [ 0 ] except TypeError : pass else : if store_mod is not None and store_cls is not None : if isstring ( store_mod ) : store_mod = repeat ( store_mod ) if isstring ( store_cls ) : store_cls = repeat ( store_cls ) fname = [ _open_store ( sm , sc , f ) for sm , sc , f in zip ( store_mod , store_cls , fname ) ] kwargs [ 'engine' ] = None kwargs [ 'lock' ] = False return open_mfdataset ( fname , ** kwargs ) if store_mod is not None and store_cls is not None : fname = _open_store ( store_mod , store_cls , fname ) return open_dataset ( fname , ** kwargs )
Open a dataset and return it
43,366
def disconnect ( self , func = None ) : if func is None : self . _connections = [ ] else : self . _connections . remove ( func )
Disconnect a function call to the signal . If None all connections are disconnected
43,367
def get_decoder ( cls , ds , var ) : for decoder_cls in cls . _registry : if decoder_cls . can_decode ( ds , var ) : return decoder_cls ( ds ) return CFDecoder ( ds )
Class method to get the right decoder class that can decode the given dataset and variable
43,368
def decode_coords ( ds , gridfile = None ) : def add_attrs ( obj ) : if 'coordinates' in obj . attrs : extra_coords . update ( obj . attrs [ 'coordinates' ] . split ( ) ) obj . encoding [ 'coordinates' ] = obj . attrs . pop ( 'coordinates' ) if 'bounds' in obj . attrs : extra_coords . add ( obj . attrs [ 'bounds' ] ) if gridfile is not None and not isinstance ( gridfile , xr . Dataset ) : gridfile = open_dataset ( gridfile ) extra_coords = set ( ds . coords ) for k , v in six . iteritems ( ds . variables ) : add_attrs ( v ) add_attrs ( ds ) if gridfile is not None : ds . update ( { k : v for k , v in six . iteritems ( gridfile . variables ) if k in extra_coords } ) if xr_version < ( 0 , 11 ) : ds . set_coords ( extra_coords . intersection ( ds . variables ) , inplace = True ) else : ds . _coord_names . update ( extra_coords . intersection ( ds . variables ) ) return ds
Sets the coordinates and bounds in a dataset
43,369
def is_triangular ( self , var ) : warn ( "The 'is_triangular' method is depreceated and will be removed " "soon! Use the 'is_unstructured' method!" , DeprecationWarning , stacklevel = 1 ) return str ( var . attrs . get ( 'grid_type' ) ) == 'unstructured' or self . _check_triangular_bounds ( var ) [ 0 ]
Test if a variable is on a triangular grid
43,370
def _get_coord_cell_node_coord ( self , coord , coords = None , nans = None , var = None ) : bounds = coord . attrs . get ( 'bounds' ) if bounds is not None : bounds = self . ds . coords . get ( bounds ) if bounds is not None : if coords is not None : bounds = bounds . sel ( ** { key : coords [ key ] for key in set ( coords ) . intersection ( bounds . dims ) } ) if nans is not None and var is None : raise ValueError ( "Need the variable to deal with NaN!" ) elif nans is None : pass elif nans == 'skip' : bounds = bounds [ ~ np . isnan ( var . values ) ] elif nans == 'only' : bounds = bounds [ np . isnan ( var . values ) ] else : raise ValueError ( "`nans` must be either None, 'skip', or 'only'! " "Not {0}!" . format ( str ( nans ) ) ) return bounds
Get the boundaries of an unstructed coordinate
43,371
def is_unstructured ( self , var ) : if str ( var . attrs . get ( 'grid_type' ) ) == 'unstructured' : return True xcoord = self . get_x ( var ) if xcoord is not None : bounds = self . _get_coord_cell_node_coord ( xcoord ) if bounds is not None and bounds . shape [ - 1 ] > 2 : return True
Test if a variable is on an unstructered grid
43,372
def is_circumpolar ( self , var ) : xcoord = self . get_x ( var ) return xcoord is not None and xcoord . ndim == 2
Test if a variable is on a circumpolar grid
43,373
def get_variable_by_axis ( self , var , axis , coords = None ) : axis = axis . lower ( ) if axis not in list ( 'xyzt' ) : raise ValueError ( "Axis must be one of X, Y, Z, T, not {0}" . format ( axis ) ) coords = coords or self . ds . coords coord_names = var . attrs . get ( 'coordinates' , var . encoding . get ( 'coordinates' , '' ) ) . split ( ) if not coord_names : return ret = [ ] for coord in map ( lambda dim : coords [ dim ] , filter ( lambda dim : dim in coords , chain ( coord_names , var . dims ) ) ) : if ( coord . name not in ( c . name for c in ret ) and ( coord . attrs . get ( 'axis' , '' ) . lower ( ) == axis or coord . name in getattr ( self , axis ) ) ) : ret . append ( coord ) if ret : return None if len ( ret ) > 1 else ret [ 0 ] tnames = self . t . intersection ( coord_names ) if axis == 'x' : for cname in filter ( lambda cname : re . search ( 'lon' , cname ) , coord_names ) : return coords [ cname ] return coords . get ( coord_names [ - 1 ] ) elif axis == 'y' and len ( coord_names ) >= 2 : for cname in filter ( lambda cname : re . search ( 'lat' , cname ) , coord_names ) : return coords [ cname ] return coords . get ( coord_names [ - 2 ] ) elif ( axis == 'z' and len ( coord_names ) >= 3 and coord_names [ - 3 ] not in tnames ) : return coords . get ( coord_names [ - 3 ] ) elif axis == 't' and tnames : tname = next ( iter ( tnames ) ) if len ( tnames ) > 1 : warn ( "Found multiple matches for time coordinate in the " "coordinates: %s. I use %s" % ( ', ' . join ( tnames ) , tname ) , PsyPlotRuntimeWarning ) return coords . get ( tname )
Return the coordinate matching the specified axis
43,374
def get_x ( self , var , coords = None ) : coords = coords or self . ds . coords coord = self . get_variable_by_axis ( var , 'x' , coords ) if coord is not None : return coord return coords . get ( self . get_xname ( var ) )
Get the x - coordinate of a variable
43,375
def get_xname ( self , var , coords = None ) : if coords is not None : coord = self . get_variable_by_axis ( var , 'x' , coords ) if coord is not None and coord . name in var . dims : return coord . name dimlist = list ( self . x . intersection ( var . dims ) ) if dimlist : if len ( dimlist ) > 1 : warn ( "Found multiple matches for x coordinate in the variable:" "%s. I use %s" % ( ', ' . join ( dimlist ) , dimlist [ 0 ] ) , PsyPlotRuntimeWarning ) return dimlist [ 0 ] return var . dims [ - 1 ]
Get the name of the x - dimension
43,376
def get_y ( self , var , coords = None ) : coords = coords or self . ds . coords coord = self . get_variable_by_axis ( var , 'y' , coords ) if coord is not None : return coord return coords . get ( self . get_yname ( var ) )
Get the y - coordinate of a variable
43,377
def get_yname ( self , var , coords = None ) : if coords is not None : coord = self . get_variable_by_axis ( var , 'y' , coords ) if coord is not None and coord . name in var . dims : return coord . name dimlist = list ( self . y . intersection ( var . dims ) ) if dimlist : if len ( dimlist ) > 1 : warn ( "Found multiple matches for y coordinate in the variable:" "%s. I use %s" % ( ', ' . join ( dimlist ) , dimlist [ 0 ] ) , PsyPlotRuntimeWarning ) return dimlist [ 0 ] if self . is_unstructured ( var ) : return var . dims [ - 1 ] return var . dims [ - 2 if var . ndim > 1 else - 1 ]
Get the name of the y - dimension
43,378
def get_zname ( self , var , coords = None ) : if coords is not None : coord = self . get_variable_by_axis ( var , 'z' , coords ) if coord is not None and coord . name in var . dims : return coord . name dimlist = list ( self . z . intersection ( var . dims ) ) if dimlist : if len ( dimlist ) > 1 : warn ( "Found multiple matches for z coordinate in the variable:" "%s. I use %s" % ( ', ' . join ( dimlist ) , dimlist [ 0 ] ) , PsyPlotRuntimeWarning ) return dimlist [ 0 ] is_unstructured = self . is_unstructured ( var ) icheck = - 2 if is_unstructured else - 3 min_dim = abs ( icheck ) if 'variable' not in var . dims else abs ( icheck - 1 ) if var . ndim >= min_dim and var . dims [ icheck ] != self . get_tname ( var , coords ) : return var . dims [ icheck ] return None
Get the name of the z - dimension
43,379
def get_t ( self , var , coords = None ) : coords = coords or self . ds . coords coord = self . get_variable_by_axis ( var , 't' , coords ) if coord is not None : return coord dimlist = list ( self . t . intersection ( var . dims ) . intersection ( coords ) ) if dimlist : if len ( dimlist ) > 1 : warn ( "Found multiple matches for time coordinate in the " "variable: %s. I use %s" % ( ', ' . join ( dimlist ) , dimlist [ 0 ] ) , PsyPlotRuntimeWarning ) return coords [ dimlist [ 0 ] ] tname = self . get_tname ( var ) if tname is not None : return coords . get ( tname ) return None
Get the time coordinate of a variable
43,380
def get_tname ( self , var , coords = None ) : if coords is not None : coord = self . get_variable_by_axis ( var , 't' , coords ) if coord is not None and coord . name in var . dims : return coord . name dimlist = list ( self . t . intersection ( var . dims ) ) if dimlist : if len ( dimlist ) > 1 : warn ( "Found multiple matches for t coordinate in the variable:" "%s. I use %s" % ( ', ' . join ( dimlist ) , dimlist [ 0 ] ) , PsyPlotRuntimeWarning ) return dimlist [ 0 ] return None
Get the name of the t - dimension
43,381
def get_coord_idims ( self , coords ) : ret = dict ( ( label , get_index_from_coord ( coord , self . ds . indexes [ label ] ) ) for label , coord in six . iteritems ( coords ) if label in self . ds . indexes ) return ret
Get the slicers for the given coordinates from the base dataset
43,382
def get_plotbounds ( self , coord , kind = None , ignore_shape = False ) : if 'bounds' in coord . attrs : bounds = self . ds . coords [ coord . attrs [ 'bounds' ] ] if ignore_shape : return bounds . values . ravel ( ) if not bounds . shape [ : - 1 ] == coord . shape : bounds = self . ds . isel ( ** self . get_idims ( coord ) ) try : return self . _get_plotbounds_from_cf ( coord , bounds ) except ValueError as e : warn ( ( e . message if six . PY2 else str ( e ) ) + " Bounds are calculated automatically!" ) return self . _infer_interval_breaks ( coord , kind = kind )
Get the bounds of a coordinate
43,383
def _get_plotbounds_from_cf ( coord , bounds ) : if bounds . shape [ : - 1 ] != coord . shape or bounds . shape [ - 1 ] != 2 : raise ValueError ( "Cannot interprete bounds with shape {0} for {1} " "coordinate with shape {2}." . format ( bounds . shape , coord . name , coord . shape ) ) ret = np . zeros ( tuple ( map ( lambda i : i + 1 , coord . shape ) ) ) ret [ tuple ( map ( slice , coord . shape ) ) ] = bounds [ ... , 0 ] last_slices = tuple ( slice ( - 1 , None ) for _ in coord . shape ) ret [ last_slices ] = bounds [ tuple ( chain ( last_slices , [ 1 ] ) ) ] return ret
Get plot bounds from the bounds stored as defined by CFConventions
43,384
def get_triangles ( self , var , coords = None , convert_radian = True , copy = False , src_crs = None , target_crs = None , nans = None , stacklevel = 1 ) : warn ( "The 'get_triangles' method is depreceated and will be removed " "soon! Use the 'get_cell_node_coord' method!" , DeprecationWarning , stacklevel = stacklevel ) from matplotlib . tri import Triangulation def get_vertices ( axis ) : bounds = self . _check_triangular_bounds ( var , coords = coords , axis = axis , nans = nans ) [ 1 ] if coords is not None : bounds = coords . get ( bounds . name , bounds ) vertices = bounds . values . ravel ( ) if convert_radian : coord = getattr ( self , 'get_' + axis ) ( var ) if coord . attrs . get ( 'units' ) == 'radian' : vertices = vertices * 180. / np . pi return vertices if not copy else vertices . copy ( ) if coords is None : coords = self . ds . coords xvert = get_vertices ( 'x' ) yvert = get_vertices ( 'y' ) if src_crs is not None and src_crs != target_crs : if target_crs is None : raise ValueError ( "Found %s for the source crs but got None for the " "target_crs!" % ( src_crs , ) ) arr = target_crs . transform_points ( src_crs , xvert , yvert ) xvert = arr [ : , 0 ] yvert = arr [ : , 1 ] triangles = np . reshape ( range ( len ( xvert ) ) , ( len ( xvert ) // 3 , 3 ) ) return Triangulation ( xvert , yvert , triangles )
Get the triangles for the variable
43,385
def _infer_interval_breaks ( coord , kind = None ) : if coord . ndim == 1 : return _infer_interval_breaks ( coord ) elif coord . ndim == 2 : from scipy . interpolate import interp2d kind = kind or rcParams [ 'decoder.interp_kind' ] y , x = map ( np . arange , coord . shape ) new_x , new_y = map ( _infer_interval_breaks , [ x , y ] ) coord = np . asarray ( coord ) return interp2d ( x , y , coord , kind = kind , copy = False ) ( new_x , new_y )
Interpolate the bounds from the data in coord
43,386
def correct_dims ( self , var , dims = { } , remove = True ) : method_mapping = { 'x' : self . get_xname , 'z' : self . get_zname , 't' : self . get_tname } dims = dict ( dims ) if self . is_unstructured ( var ) : method_mapping [ 'y' ] = self . get_xname else : method_mapping [ 'y' ] = self . get_yname for key in six . iterkeys ( dims . copy ( ) ) : if key in method_mapping and key not in var . dims : dim_name = method_mapping [ key ] ( var , self . ds . coords ) if dim_name in dims : dims . pop ( key ) else : new_name = method_mapping [ key ] ( var ) if new_name is not None : dims [ new_name ] = dims . pop ( key ) if remove : for key in set ( dims ) . difference ( var . dims ) : dims . pop ( key ) self . logger . debug ( "Could not find a dimensions matching %s in variable %s!" , key , var ) return dims
Expands the dimensions to match the dims in the variable
43,387
def standardize_dims ( self , var , dims = { } ) : dims = dict ( dims ) name_map = { self . get_xname ( var , self . ds . coords ) : 'x' , self . get_yname ( var , self . ds . coords ) : 'y' , self . get_zname ( var , self . ds . coords ) : 'z' , self . get_tname ( var , self . ds . coords ) : 't' } dims = dict ( dims ) for dim in set ( dims ) . intersection ( name_map ) : dims [ name_map [ dim ] ] = dims . pop ( dim ) return dims
Replace the coordinate names through x y z and t
43,388
def get_mesh ( self , var , coords = None ) : mesh = var . attrs . get ( 'mesh' ) if mesh is None : return None if coords is None : coords = self . ds . coords return coords . get ( mesh , self . ds . coords . get ( mesh ) )
Get the mesh variable for the given var
43,389
def get_triangles ( self , var , coords = None , convert_radian = True , copy = False , src_crs = None , target_crs = None , nans = None , stacklevel = 1 ) : warn ( "The 'get_triangles' method is depreceated and will be removed " "soon! Use the 'get_cell_node_coord' method!" , DeprecationWarning , stacklevel = stacklevel ) from matplotlib . tri import Triangulation if coords is None : coords = self . ds . coords def get_coord ( coord ) : return coords . get ( coord , self . ds . coords . get ( coord ) ) mesh = self . get_mesh ( var , coords ) nodes = self . get_nodes ( mesh , coords ) if any ( n is None for n in nodes ) : raise ValueError ( "Could not find the nodes variables!" ) xvert , yvert = nodes xvert = xvert . values yvert = yvert . values loc = var . attrs . get ( 'location' , 'face' ) if loc == 'face' : triangles = get_coord ( mesh . attrs . get ( 'face_node_connectivity' , '' ) ) . values if triangles is None : raise ValueError ( "Could not find the connectivity information!" ) elif loc == 'node' : triangles = None else : raise ValueError ( "Could not interprete location attribute (%s) of mesh " "variable %s!" % ( loc , mesh . name ) ) if convert_radian : for coord in nodes : if coord . attrs . get ( 'units' ) == 'radian' : coord = coord * 180. / np . pi if src_crs is not None and src_crs != target_crs : if target_crs is None : raise ValueError ( "Found %s for the source crs but got None for the " "target_crs!" % ( src_crs , ) ) xvert = xvert [ triangles ] . ravel ( ) yvert = yvert [ triangles ] . ravel ( ) arr = target_crs . transform_points ( src_crs , xvert , yvert ) xvert = arr [ : , 0 ] yvert = arr [ : , 1 ] if loc == 'face' : triangles = np . reshape ( range ( len ( xvert ) ) , ( len ( xvert ) // 3 , 3 ) ) return Triangulation ( xvert , yvert , triangles )
Get the of the given coordinate .
43,390
def decode_coords ( ds , gridfile = None ) : extra_coords = set ( ds . coords ) for var in six . itervalues ( ds . variables ) : if 'mesh' in var . attrs : mesh = var . attrs [ 'mesh' ] if mesh not in extra_coords : extra_coords . add ( mesh ) try : mesh_var = ds . variables [ mesh ] except KeyError : warn ( 'Could not find mesh variable %s' % mesh ) continue if 'node_coordinates' in mesh_var . attrs : extra_coords . update ( mesh_var . attrs [ 'node_coordinates' ] . split ( ) ) if 'face_node_connectivity' in mesh_var . attrs : extra_coords . add ( mesh_var . attrs [ 'face_node_connectivity' ] ) if gridfile is not None and not isinstance ( gridfile , xr . Dataset ) : gridfile = open_dataset ( gridfile ) ds . update ( { k : v for k , v in six . iteritems ( gridfile . variables ) if k in extra_coords } ) if xr_version < ( 0 , 11 ) : ds . set_coords ( extra_coords . intersection ( ds . variables ) , inplace = True ) else : ds . _coord_names . update ( extra_coords . intersection ( ds . variables ) ) return ds
Reimplemented to set the mesh variables as coordinates
43,391
def get_nodes ( self , coord , coords ) : def get_coord ( coord ) : return coords . get ( coord , self . ds . coords . get ( coord ) ) return list ( map ( get_coord , coord . attrs . get ( 'node_coordinates' , '' ) . split ( ) [ : 2 ] ) )
Get the variables containing the definition of the nodes
43,392
def get_x ( self , var , coords = None ) : if coords is None : coords = self . ds . coords ret = super ( UGridDecoder , self ) . get_x ( var , coords ) if ret is None or ret . name in var . dims : bounds = self . get_cell_node_coord ( var , axis = 'x' , coords = coords ) if bounds is not None : centers = bounds . mean ( axis = - 1 ) x = self . get_nodes ( self . get_mesh ( var , coords ) , coords ) [ 0 ] try : cls = xr . IndexVariable except AttributeError : cls = xr . Coordinate return cls ( x . name , centers , attrs = x . attrs . copy ( ) )
Get the centers of the triangles in the x - dimension
43,393
def plot ( self ) : if self . _plot is None : import psyplot . project as psy self . _plot = psy . DataArrayPlotter ( self ) return self . _plot
An object to visualize this data object
43,394
def _register_update ( self , replot = False , fmt = { } , force = False , todefault = False ) : self . replot = self . replot or replot if self . plotter is not None : self . plotter . _register_update ( replot = self . replot , fmt = fmt , force = force , todefault = todefault )
Register new formatoptions for updating
43,395
def dims_intersect ( self ) : return set . intersection ( * map ( set , ( getattr ( arr , 'dims_intersect' , arr . dims ) for arr in self ) ) )
Dimensions of the arrays in this list that are used in all arrays
43,396
def names ( self ) : ret = set ( ) for arr in self : if isinstance ( arr , InteractiveList ) : ret . update ( arr . names ) else : ret . add ( arr . name ) return ret
Set of the variable in this list
43,397
def all_names ( self ) : return [ _get_variable_names ( arr ) if not isinstance ( arr , ArrayList ) else arr . all_names for arr in self ]
The variable names for each of the arrays in this list
43,398
def all_dims ( self ) : return [ _get_dims ( arr ) if not isinstance ( arr , ArrayList ) else arr . all_dims for arr in self ]
The dimensions for each of the arrays in this list
43,399
def is_unstructured ( self ) : return [ arr . psy . decoder . is_unstructured ( arr ) if not isinstance ( arr , ArrayList ) else arr . is_unstructured for arr in self ]
A boolean for each array whether it is unstructured or not