idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
25,700
def _load ( self ) : if self . is_exists ( ) : return open ( self . _ref , "rb" ) . read ( ) raise NotFoundError ( "File %s not found" % self . _ref )
Function load .
51
3
25,701
def get_file ( self ) : content = self . _load ( ) if not content : return None filename = "temporary_file.bin" with open ( filename , "wb" ) as file_name : file_name . write ( content ) return filename
Load data into a file and return file path .
56
10
25,702
def as_dict ( self ) : my_info = { } if self . platform : my_info [ "model" ] = self . platform if self . resource_id : my_info [ "sn" ] = self . resource_id if self . vendor : my_info [ "vendor" ] = self . vendor if self . provider : my_info [ "provider" ] = self . provider return my_info
Generate a dictionary of the contents of this DutInformation object .
92
14
25,703
def get_resource_ids ( self ) : resids = [ ] if self . dutinformations : for info in self . dutinformations : resids . append ( info . resource_id ) return resids return "unknown"
Get resource ids as a list .
53
8
25,704
def push_resource_cache ( resourceid , info ) : if not resourceid : raise ResourceInitError ( "Resource id missing" ) if not DutInformationList . _cache . get ( resourceid ) : DutInformationList . _cache [ resourceid ] = dict ( ) DutInformationList . _cache [ resourceid ] = merge ( DutInformationList . _cache [ resourceid ] , info )
Cache resource specific information
88
4
25,705
def get_resource_cache ( resourceid ) : if not resourceid : raise ResourceInitError ( "Resource id missing" ) if not DutInformationList . _cache . get ( resourceid ) : DutInformationList . _cache [ resourceid ] = dict ( ) return DutInformationList . _cache [ resourceid ]
Get a cached dictionary related to an individual resourceid .
70
11
25,706
def create_result_object ( result ) : _result = { 'tcid' : result . get_tc_name ( ) , 'campaign' : result . campaign , 'cre' : { 'user' : result . tester } , 'job' : { 'id' : result . job_id } , 'exec' : { 'verdict' : result . get_verdict ( ) , 'duration' : result . duration , 'note' : result . get_fail_reason ( ) , 'dut' : { 'count' : result . dut_count , 'type' : result . dut_type } , 'sut' : { 'branch' : result . build_branch , 'commitId' : result . buildcommit , 'buildDate' : result . build_date , 'buildSha1' : result . build_sha1 , 'buildUrl' : result . build_url , 'gitUrl' : result . build_git_url , 'cut' : result . component , # Component Under Uest 'fut' : result . feature # Feature Under Test } , 'env' : { 'framework' : { 'name' : result . fw_name , 'ver' : result . fw_version } } , "logs" : [ ] } } if result . dut_resource_id : _result [ "exec" ] [ "dut" ] [ "sn" ] = result . dut_resource_id if result . dut_vendor and result . dut_vendor [ 0 ] : _result [ "exec" ] [ "dut" ] [ "vendor" ] = result . dut_vendor [ 0 ] if result . dut_models and result . dut_models [ 0 ] : _result [ "exec" ] [ "dut" ] [ "model" ] = result . dut_models [ 0 ] # pylint: disable=len-as-condition if len ( result . dut_models ) == 1 and len ( result . dut_resource_id ) == 1 : _result [ "exec" ] [ "dut" ] [ "sn" ] = result . dut_resource_id [ 0 ] return remove_empty_from_dict ( _result )
Create cloud result object from Result .
499
7
25,707
def append_logs_to_result_object ( result_obj , result ) : logs = result . has_logs ( ) result_obj [ "exec" ] [ "logs" ] = [ ] if logs and result . logfiles : for log in logs : typ = None parts = log . split ( os . sep ) if "bench" in parts [ len ( parts ) - 1 ] : typ = "framework" # elif "Dut" in parts[len(parts)-1]: # typ = "dut" if typ is not None : name = parts [ len ( parts ) - 1 ] try : with open ( log , "r" ) as file_name : data = file_name . read ( ) dic = { "data" : data , "name" : name , "from" : typ } result_obj [ "exec" ] [ "logs" ] . append ( dic ) except OSError : pass else : continue
Append log files to cloud result object from Result .
209
11
25,708
def get_available_devices ( self ) : connected_devices = self . mbeds . list_mbeds ( ) if self . mbeds else [ ] # Check non mbedOS supported devices. # Just for backward compatible reason - is obsolete.. edbg_ports = self . available_edbg_ports ( ) for port in edbg_ports : connected_devices . append ( { "platform_name" : "SAM4E" , "serial_port" : port , "mount_point" : None , "target_id" : None , "baud_rate" : 460800 } ) for dev in connected_devices : dev [ 'state' ] = "unknown" return connected_devices
Gets available devices using mbedls and self . available_edbg_ports .
152
18
25,709
def available_edbg_ports ( self ) : ports_available = sorted ( list ( list_ports . comports ( ) ) ) edbg_ports = [ ] for iport in ports_available : port = iport [ 0 ] desc = iport [ 1 ] hwid = iport [ 2 ] if str ( desc ) . startswith ( "EDBG Virtual COM Port" ) or "VID:PID=03EB:2111" in str ( hwid ) . upper ( ) : # print("%-10s: %s (%s)\n" % (port, desc, hwid)) try : edbg_ports . index ( port , 0 ) print ( "There is multiple %s ports with same number!" % port ) except ValueError : edbg_ports . append ( port ) # print("Detected %i DUT's" % len(edbg_ports)) return edbg_ports
Finds available EDBG COM ports .
197
8
25,710
def store_traces ( self , value ) : if not value : self . logger . debug ( "Stopping storing received lines for dut %d" , self . index ) self . _store_traces = False else : self . logger . debug ( "Resuming storing received lines for dut %d" , self . index ) self . _store_traces = True
Setter for _store_traces . _store_traces controls in memory storing of received lines . Also logs the change for the user .
81
30
25,711
def init_wait_register ( self ) : app = self . config . get ( "application" ) if app : bef_init_cmds = app . get ( "cli_ready_trigger" ) if bef_init_cmds : self . init_done . clear ( ) self . init_event_matcher = EventMatcher ( EventTypes . DUT_LINE_RECEIVED , bef_init_cmds , self , self . init_done ) self . init_wait_timeout = app . get ( "cli_ready_trigger_timeout" , 30 ) return self . init_done . set ( ) return
Initialize EventMatcher to wait for certain cli_ready_trigger to arrive from this Dut .
140
22
25,712
def wait_init ( self ) : init_done = self . init_done . wait ( timeout = self . init_wait_timeout ) if not init_done : if hasattr ( self , "peek" ) : app = self . config . get ( "application" ) if app : bef_init_cmds = app . get ( "cli_ready_trigger" ) if bef_init_cmds in self . peek ( ) : # pylint: disable=no-member init_done = True return init_done
Block until init_done flag is set or until init_wait_timeout happens .
117
17
25,713
def init_cli_human ( self ) : if self . post_cli_cmds is None : self . post_cli_cmds = self . set_default_init_cli_human_cmds ( ) for cli_cmd in self . post_cli_cmds : try : if isinstance ( cli_cmd , list ) and len ( cli_cmd ) >= 2 : asynchronous = cli_cmd [ 1 ] if len ( cli_cmd ) > 2 : wait = cli_cmd [ 2 ] else : wait = True self . execute_command ( cli_cmd [ 0 ] , wait = wait , asynchronous = asynchronous ) else : self . execute_command ( cli_cmd ) except ( TestStepFail , TestStepError , TestStepTimeout ) : continue
Send post_cli_cmds to dut
171
10
25,714
def set_time_function ( self , function ) : if isinstance ( function , types . FunctionType ) : self . get_time = function else : raise ValueError ( "Invalid value for DUT time function" )
Set time function to be used .
47
7
25,715
def open_dut ( self , port = None ) : if port is not None : self . comport = port try : self . open_connection ( ) except ( DutConnectionError , ValueError ) as err : self . close_dut ( use_prepare = False ) raise DutConnectionError ( str ( err ) ) except KeyboardInterrupt : self . close_dut ( use_prepare = False ) self . close_connection ( ) raise
Open connection to dut .
99
6
25,716
def _wait_for_exec_ready ( self ) : while not self . response_received . wait ( 1 ) and self . query_timeout != 0 : if self . query_timeout != 0 and self . query_timeout < self . get_time ( ) : if self . prev : cmd = self . prev . cmd else : cmd = "???" self . logger . error ( "CMD timeout: " + cmd ) self . query_timeout = 0 raise TestStepTimeout ( self . name + " CMD timeout: " + cmd ) self . logger . debug ( "Waiting for response... " "timeout=%d" , self . query_timeout - self . get_time ( ) ) self . _dut_is_alive ( ) if self . response_coming_in == - 1 : if self . query_async_response is not None : # fullfill the async response with a dummy response and clean the state self . query_async_response . set_response ( CliResponse ( ) ) self . query_async_response = None # raise and log the error self . logger . error ( "No response received, DUT died" ) raise TestStepError ( "No response received, DUT " + self . name + " died" ) # if an async response is pending, fullfill it with the result if self . query_async_response is not None : self . query_async_response . set_response ( self . response_coming_in ) self . query_async_response = None self . query_timeout = 0 return self . response_coming_in
Wait for response .
347
4
25,717
def execute_command ( self , req , * * kwargs ) : if isinstance ( req , string_types ) : # backward compatible timeout = 50 # Use same default timeout as bench.py wait = True asynchronous = False for key in kwargs : if key == 'wait' : wait = kwargs [ key ] elif key == 'timeout' : timeout = kwargs [ key ] # [ms] elif key == 'asynchronous' : asynchronous = kwargs [ key ] req = CliRequest ( req , timestamp = self . get_time ( ) , wait = wait , timeout = timeout , asynchronous = asynchronous ) # wait for previous command ready if req . wait : response = self . _wait_for_exec_ready ( ) if response is not None and self . query_async_expected is not None : if response . retcode != self . query_async_expected : self . logger . error ( "Asynch call returned unexpected result, " "expected %d was %d" , self . query_async_expected , response . retcode ) raise TestStepFail ( "Asynch call returned unexpected result" ) self . query_async_expected = None # Tell Query to worker thread self . response_received . clear ( ) self . query_timeout = self . get_time ( ) + req . timeout if req . wait else 0 self . query = req msg = "Async CMD {}, " "timeout={}, time={}" if req . asynchronous else "CMD {}, timeout={}, time={}" msg = msg . format ( req . cmd , int ( self . query_timeout ) , int ( self . get_time ( ) ) ) self . logger . debug ( msg , extra = { 'type' : '<->' } ) Dut . process_dut ( self ) if req . asynchronous is True : self . query_async_expected = req . expected_retcode async_response = CliAsyncResponse ( self ) self . query_async_response = async_response return async_response if req . wait is False : self . query_async_expected = req . expected_retcode # if an async response was waiting, just discard the result # since the new command has already been sent... # This is not ideal but when a command has its flags "Wait == False" # the result of the previous command is already discarded in previous # stages if self . query_async_response is not None : self . query_async_response . set_response ( CliResponse ( ) ) self . query_async_response = None return CliResponse ( ) return self . _wait_for_exec_ready ( )
Execute command and return CliResponse
580
8
25,718
def close_dut ( self , use_prepare = True ) : if not self . stopped : self . logger . debug ( "Close '%s' connection" % self . dut_name , extra = { 'type' : '<->' } ) if use_prepare : try : self . prepare_connection_close ( ) except TestStepFail : # We can ignore this for dead Duts, just continue with cleanup pass self . stopped = True Dut . _dutlist . remove ( self ) # Remove myself from signalled dut list, if I'm still there if Dut . _signalled_duts and Dut . _signalled_duts . count ( self ) : try : Dut . _signalled_duts . remove ( self ) except ValueError : pass try : if not Dut . _dutlist : Dut . _run = False Dut . _sem . release ( ) Dut . _th . join ( ) del Dut . _th Dut . _th = None except AttributeError : pass
Close connection to dut .
229
6
25,719
def process_dut ( dut ) : if dut . finished ( ) : return Dut . _signalled_duts . appendleft ( dut ) Dut . _sem . release ( )
Signal worker thread that specified Dut needs processing
44
10
25,720
def run ( ) : # pylint: disable=too-many-branches Dut . _logger . debug ( "Start DUT communication" , extra = { 'type' : '<->' } ) while Dut . _run : Dut . _sem . acquire ( ) try : dut = Dut . _signalled_duts . pop ( ) # Check for pending requests if dut . waiting_for_response is not None : item = dut . waiting_for_response # pylint: disable=protected-access dut . response_coming_in = dut . _read_response ( ) if dut . response_coming_in is None : # Continue to next node continue if isinstance ( dut . response_coming_in , CliResponse ) : dut . response_coming_in . set_response_time ( item . get_timedelta ( dut . get_time ( ) ) ) dut . waiting_for_response = None dut . logger . debug ( "Got response" , extra = { 'type' : '<->' } ) dut . response_received . set ( ) continue # Check for new Request if dut . query is not None : item = dut . query dut . query = None dut . logger . info ( item . cmd , extra = { 'type' : '-->' } ) try : dut . writeline ( item . cmd ) except RuntimeError : dut . response_coming_in = - 1 dut . response_received . set ( ) continue dut . prev = item # Save previous command for logging purposes if item . wait : # Only caller will care if this was asynchronous. dut . waiting_for_response = item else : dut . query_timeout = 0 dut . response_received . set ( ) continue try : line = dut . readline ( ) except RuntimeError : dut . response_coming_in = - 1 dut . response_received . set ( ) continue if line : if dut . store_traces : dut . traces . append ( line ) EventObject ( EventTypes . DUT_LINE_RECEIVED , dut , line ) retcode = dut . check_retcode ( line ) if retcode is not None : dut . logger . warning ( "unrequested retcode" , extra = { 'type' : '!<-' } ) dut . logger . debug ( line , extra = { 'type' : '<<<' } ) except IndexError : pass Dut . _logger . debug ( "End DUT communication" , extra = { 'type' : '<->' } )
Main thread runner for all Duts .
581
8
25,721
def _read_response ( self ) : try : line = self . readline ( ) except RuntimeError : Dut . _logger . warning ( "Failed to read PIPE" , extra = { 'type' : '!<-' } ) return - 1 if line : if self . store_traces : self . traces . append ( line ) self . response_traces . append ( line ) EventObject ( EventTypes . DUT_LINE_RECEIVED , self , line ) match = re . search ( r"^\[([\w\W]{4})\]\[([\W\w]{4,}?)\]\: (.*)" , line ) if match : self . logger . debug ( line , extra = { 'type' : '<<<' } ) else : self . logger . info ( line , extra = { 'type' : '<--' } ) retcode = self . check_retcode ( line ) if retcode is not None : resp = CliResponse ( ) resp . retcode = retcode resp . traces = self . response_traces resp . lines = self . response_traces self . response_traces = [ ] return resp return None
Internal response reader .
266
4
25,722
def check_retcode ( self , line ) : retcode = None match = re . search ( r"retcode\: ([-\d]{1,})" , line ) if match : retcode = num ( str ( match . group ( 1 ) ) ) match = re . search ( "cmd tasklet init" , line ) if match : self . logger . debug ( "Device Boot up" , extra = { 'type' : ' ' } ) return - 1 return retcode
Look for retcode on line line and return return code if found .
105
14
25,723
def start_dut_thread ( self ) : # pylint: disable=no-self-use if Dut . _th is None : Dut . _run = True Dut . _sem = Semaphore ( 0 ) Dut . _signalled_duts = deque ( ) Dut . _logger = LogManager . get_bench_logger ( 'Dut' ) Dut . _th = Thread ( target = Dut . run , name = 'DutThread' ) Dut . _th . daemon = True Dut . _th . start ( )
Start Dut thread .
128
5
25,724
def _event_received ( self , ref , data ) : match = self . _resolve_match_data ( ref , data ) if match : if self . flag_to_set : self . flag_to_set . set ( ) if self . callback : self . callback ( EventMatch ( ref , data , match ) ) if self . __forget : self . forget ( )
Handle received event .
83
4
25,725
def write_file ( self , content , filepath = None , filename = None , indent = None , keys_to_write = None ) : path = filepath if filepath else self . filepath name = filename if filename else self . filename if not os . path . exists ( path ) : try : os . makedirs ( path ) except OSError as error : self . logger . error ( "Error while creating directory: {}" . format ( error ) ) raise name = self . _ends_with ( name , ".json" ) path = self . _ends_with ( path , os . path . sep ) if keys_to_write : data_to_write = { } for key in keys_to_write : data_to_write [ key ] = content [ key ] else : data_to_write = content try : indent = indent if indent else 2 self . _write_json ( path , name , 'w' , data_to_write , indent ) return os . path . join ( path , name ) except EnvironmentError as error : self . logger . error ( "Error while opening or writing to file: {}" . format ( error ) ) raise except ValueError : raise
Write a Python dictionary as JSON to a file .
258
10
25,726
def read_file ( self , filepath = None , filename = None ) : name = filename if filename else self . filename path = filepath if filepath else self . filepath name = self . _ends_with ( name , ".json" ) path = self . _ends_with ( path , os . path . sep ) try : return self . _read_json ( path , name ) except EnvironmentError as error : self . logger . error ( "Error while opening or reading the file: {}" . format ( error ) ) raise except ValueError as error : self . logger . error ( "File contents cannot be decoded to JSON: {}" . format ( error ) ) raise
Tries to read JSON content from filename and convert it to a dict .
145
15
25,727
def read_value ( self , key , filepath = None , filename = None ) : path = filepath if filepath else self . filepath name = filename if filename else self . filename name = self . _ends_with ( name , ".json" ) path = self . _ends_with ( path , os . path . sep ) try : output = self . _read_json ( path , name ) if key not in output : raise KeyError ( "Key '{}' not found in file {}" . format ( key , filename ) ) else : return output [ key ] except EnvironmentError as error : self . logger . error ( "Error while opening or reading the file: {}" . format ( error ) ) raise
Tries to read the value of given key from JSON file filename .
154
14
25,728
def write_values ( self , data , filepath = None , filename = None , indent = None , keys_to_write = None ) : name = filename if filename else self . filename path = filepath if filepath else self . filepath name = self . _ends_with ( name , ".json" ) path = self . _ends_with ( path , os . path . sep ) if not os . path . isfile ( path + name ) : try : return self . write_file ( data , path , name , indent , keys_to_write ) except EnvironmentError as error : self . logger . error ( "Error while opening or writing to file: {}" . format ( error ) ) raise except ValueError : raise if keys_to_write : data_to_write = { } for key in keys_to_write : data_to_write [ key ] = data [ key ] else : data_to_write = data try : with open ( path + name , 'r' ) as fil : output = json . load ( fil ) self . logger . info ( "Read contents of {}" . format ( filename ) ) for key in data_to_write : try : output [ key ] = data_to_write [ key ] except TypeError as error : self . logger . error ( "File contents could not be serialized into a dict. {}" . format ( error ) ) raise self . _write_json ( path , name + ".temp" , "w" , output , indent ) FileUtils . remove_file ( name , path ) FileUtils . rename_file ( name + '.temp' , name , path ) return os . path . join ( path , name ) except EnvironmentError as error : self . logger . error ( "Error while writing to, opening or reading the file: {}" . format ( error ) ) raise except ValueError as error : self . logger . error ( "File could not be decoded to JSON. It might be empty? {}" . format ( error ) ) try : self . _write_json ( path , name , "w" , data_to_write , indent ) return os . path . join ( path , name ) except EnvironmentError : raise
Tries to write extra content to a JSON file .
474
11
25,729
def _write_json ( self , filepath , filename , writemode , content , indent ) : with open ( os . path . join ( filepath , filename ) , writemode ) as fil : json . dump ( content , fil , indent = indent ) self . logger . info ( "Wrote content to file {}" . format ( filename ) )
Helper for writing content to a file .
76
8
25,730
def _read_json ( self , path , name ) : with open ( os . path . join ( path , name ) , 'r' ) as fil : output = json . load ( fil ) self . logger . info ( "Read contents of {}" . format ( name ) ) return output
Load a json into a dictionary from a file .
62
10
25,731
def _ends_with ( self , string_to_edit , end ) : # pylint: disable=no-self-use if not string_to_edit . endswith ( end ) : return string_to_edit + end return string_to_edit
Check if string ends with characters in end if not merge end to string .
58
15
25,732
def parse ( self , * args , * * kwargs ) : # pylint: disable=unused-argument # pylint: disable=W0703 cmd = args [ 0 ] resp = args [ 1 ] if cmd in self . parsers : try : return self . parsers [ cmd ] ( resp ) except Exception as err : print ( err ) return { }
Parse response .
81
4
25,733
def append ( self , result ) : if isinstance ( result , Result ) : self . data . append ( result ) elif isinstance ( result , ResultList ) : self . data += result . data else : raise TypeError ( 'unknown result type' )
Append a new Result to the list .
55
9
25,734
def save ( self , heads , console = True ) : # Junit self . _save_junit ( ) # HTML self . _save_html_report ( heads ) if console : # Console print self . _print_console_summary ( )
Create reports in different formats .
53
6
25,735
def _save_junit ( self ) : report = ReportJunit ( self ) file_name = report . get_latest_filename ( "result.junit.xml" , "" ) report . generate ( file_name ) file_name = report . get_latest_filename ( "junit.xml" , "../" ) report . generate ( file_name )
Save Junit report .
80
5
25,736
def _save_html_report ( self , heads = None , refresh = None ) : report = ReportHtml ( self ) heads = heads if heads else { } test_report_filename = report . get_current_filename ( "html" ) report . generate ( test_report_filename , title = 'Test Results' , heads = heads , refresh = refresh ) # Update latest.html in the log root directory latest_report_filename = report . get_latest_filename ( "html" ) report . generate ( latest_report_filename , title = 'Test Results' , heads = heads , refresh = refresh )
Save html report .
131
4
25,737
def success_count ( self ) : return len ( [ i for i , result in enumerate ( self . data ) if result . success ] )
Amount of passed test cases in this list .
31
9
25,738
def failure_count ( self ) : return len ( [ i for i , result in enumerate ( self . data ) if result . failure ] )
Amount of failed test cases in this list .
31
9
25,739
def inconclusive_count ( self ) : inconc_count = len ( [ i for i , result in enumerate ( self . data ) if result . inconclusive ] ) unknown_count = len ( [ i for i , result in enumerate ( self . data ) if result . get_verdict ( ) == "unknown" ] ) return inconc_count + unknown_count
Amount of inconclusive test cases in this list .
81
10
25,740
def retry_count ( self ) : retries = len ( [ i for i , result in enumerate ( self . data ) if result . retries_left > 0 ] ) return retries
Amount of retried test cases in this list .
42
10
25,741
def skip_count ( self ) : return len ( [ i for i , result in enumerate ( self . data ) if result . skip ] )
Amount of skipped test cases in this list .
31
9
25,742
def clean_fails ( self ) : for item in self . data : if item . failure and not item . retries_left > 0 : return True return False
Check if there are any fails that were not subsequently retried .
35
13
25,743
def clean_inconcs ( self ) : for item in self . data : if ( item . inconclusive or item . get_verdict ( ) == "unknown" ) and not item . retries_left > 0 : return True return False
Check if there are any inconclusives or uknowns that were not subsequently retried .
52
19
25,744
def total_duration ( self ) : durations = [ result . duration for result in self . data ] return sum ( durations )
Sum of the durations of the tests in this list .
28
12
25,745
def pass_rate ( self , include_skips = False , include_inconclusive = False , include_retries = True ) : total = self . count ( ) success = self . success_count ( ) retries = self . retry_count ( ) try : if include_inconclusive and include_skips and include_retries : val = 100.0 * success / total elif include_inconclusive and include_skips and not include_retries : val = 100.0 * success / ( total - retries ) elif include_skips and include_retries and not include_inconclusive : inconcs = self . inconclusive_count ( ) val = 100.0 * success / ( total - inconcs ) elif include_skips and not include_retries and not include_inconclusive : inconcs = self . inconclusive_count ( ) val = 100.0 * success / ( total - inconcs - retries ) elif include_inconclusive and include_retries and not include_skips : skipped = self . skip_count ( ) val = 100.0 * success / ( total - skipped ) elif include_inconclusive and not include_retries and not include_skips : skipped = self . skip_count ( ) val = 100.0 * success / ( total - skipped - retries ) elif not include_inconclusive and not include_skips and include_retries : failures = self . failure_count ( ) val = 100.0 * success / ( failures + success ) else : failures = self . clean_fails ( ) val = 100.0 * success / ( failures + success ) except ZeroDivisionError : val = 0 return format ( val , '.2f' ) + " %"
Calculate pass rate for tests in this list .
386
11
25,746
def get_summary ( self ) : return { "count" : self . count ( ) , "pass" : self . success_count ( ) , "fail" : self . failure_count ( ) , "skip" : self . skip_count ( ) , "inconclusive" : self . inconclusive_count ( ) , "retries" : self . retry_count ( ) , "duration" : self . total_duration ( ) }
Get a summary of this ResultLists contents as dictionary .
97
12
25,747
def next ( self ) : try : result = self . data [ self . index ] except IndexError : self . index = 0 raise StopIteration self . index += 1 return result
Implementation of next method from Iterator .
38
9
25,748
def deprecated ( message = "" ) : def decorator_wrapper ( func ) : """ Generate decorator wrapper function :param func: function to be decorated :return: wrapper """ @ functools . wraps ( func ) def function_wrapper ( * args , * * kwargs ) : """ Wrapper which recognize deprecated line from source code :param args: args for actual function :param kwargs: kwargs for actual functions :return: something that actual function might returns """ current_call_source = '|' . join ( traceback . format_stack ( inspect . currentframe ( ) ) ) if current_call_source not in function_wrapper . last_call_source : warnings . warn ( "Function {} is now deprecated! {}" . format ( func . __name__ , message ) , category = DeprecationWarning , stacklevel = 2 ) function_wrapper . last_call_source . add ( current_call_source ) return func ( * args , * * kwargs ) function_wrapper . last_call_source = set ( ) return function_wrapper return decorator_wrapper
This is a decorator which can be used to mark functions as deprecated . It will result in a warning being emitted when the function is used first time and filter is set for show DeprecationWarning .
235
41
25,749
def remove_file ( filename , path = None ) : cwd = os . getcwd ( ) try : if path : os . chdir ( path ) except OSError : raise try : os . remove ( filename ) os . chdir ( cwd ) return True except OSError : os . chdir ( cwd ) raise
Remove file filename from path .
74
6
25,750
def verify_message ( self , expected_response , break_in_fail = True ) : ok = True try : ok = verify_message ( self . lines , expected_response ) except ( TypeError , LookupError ) as inst : ok = False if break_in_fail : raise inst if ok is False and break_in_fail : raise LookupError ( "Unexpected message found" ) return ok
Verifies that expected_response is found in self . lines .
88
13
25,751
def verify_trace ( self , expected_traces , break_in_fail = True ) : ok = True try : ok = verify_message ( self . traces , expected_traces ) except ( TypeError , LookupError ) as inst : ok = False if break_in_fail : raise inst if ok is False and break_in_fail : raise LookupError ( "Unexpected message found" ) return ok
Verifies that expectedResponse is found in self . traces
90
11
25,752
def verify_response_duration ( self , expected = None , zero = 0 , threshold_percent = 0 , break_in_fail = True ) : was = self . timedelta - zero error = abs ( was / expected ) * 100.0 - 100.0 if expected > 0 else 0 msg = "should: %.3f, was: %.3f, error: %.3f %%" % ( expected , was , error ) self . logger . debug ( msg ) if abs ( error ) > threshold_percent : msg = "Thread::wait error(%.2f %%) was out of bounds (%.2f %%)" % ( error , threshold_percent ) self . logger . debug ( msg ) if break_in_fail : raise TestStepFail ( msg ) return was , expected , error
Verify that response duration is in bounds .
173
9
25,753
def _hardware_count ( self ) : return self . _counts . get ( "hardware" ) + self . _counts . get ( "serial" ) + self . _counts . get ( "mbed" )
Amount of hardware resources .
51
5
25,754
def _resolve_requirements ( self , requirements ) : try : dut_count = requirements [ "duts" ] [ "*" ] [ "count" ] except KeyError : return [ ] default_values = { "type" : "hardware" , "allowed_platforms" : [ ] , "nick" : None , } default_values . update ( requirements [ "duts" ] [ "*" ] ) del default_values [ "count" ] dut_keys = list ( default_values . keys ( ) ) dut_keys . extend ( [ "application" , "location" , "subtype" ] ) dut_requirements = self . __generate_indexed_requirements ( dut_count , default_values , requirements ) # Match groups of duts defined with 1..40 notation. for key in requirements [ "duts" ] . keys ( ) : if not isinstance ( key , string_types ) : continue match = re . search ( r'([\d]{1,})\.\.([\d]{1,})' , key ) if match : first_dut_idx = int ( match . group ( 1 ) ) last_dut_idx = int ( match . group ( 2 ) ) for i in range ( first_dut_idx , last_dut_idx + 1 ) : for k in dut_keys : if k in requirements [ "duts" ] [ key ] : dut_requirements [ i - 1 ] . set ( k , copy . copy ( requirements [ "duts" ] [ key ] [ k ] ) ) for idx , req in enumerate ( dut_requirements ) : if isinstance ( req . get ( "nick" ) , string_types ) : nick = req . get ( "nick" ) req . set ( "nick" , ResourceConfig . __replace_base_variables ( nick , len ( dut_requirements ) , idx ) ) self . _solve_location ( req , len ( dut_requirements ) , idx ) self . _dut_requirements = dut_requirements return None
Internal method for resolving requirements into resource configurations .
474
9
25,755
def _solve_location ( self , req , dut_req_len , idx ) : if not req . get ( "location" ) : return if len ( req . get ( "location" ) ) == 2 : for x_and_y , coord in enumerate ( req . get ( "location" ) ) : if isinstance ( coord , string_types ) : coord = ResourceConfig . __replace_coord_variables ( coord , x_and_y , dut_req_len , idx ) try : loc = req . get ( "location" ) loc [ x_and_y ] = eval ( coord ) # pylint: disable=eval-used req . set ( "location" , loc ) except SyntaxError as error : self . logger . error ( error ) loc = req . get ( "location" ) loc [ x_and_y ] = 0.0 req . set ( "location" , loc ) else : self . logger . error ( "invalid location field!" ) req . set ( "location" , [ 0.0 , 0.0 ] )
Helper function for resolving the location for a resource .
238
10
25,756
def __replace_base_variables ( text , req_len , idx ) : return text . replace ( "{i}" , str ( idx + 1 ) ) . replace ( "{n}" , str ( req_len ) )
Replace i and n in text with index + 1 and req_len .
50
16
25,757
def __replace_coord_variables ( text , x_and_y , req_len , idx ) : return ResourceConfig . __replace_base_variables ( text , req_len , idx ) . replace ( "{xy}" , str ( x_and_y ) ) . replace ( "{pi}" , str ( math . pi ) )
Replace x and y with their coordinates and replace pi with value of pi .
76
16
25,758
def __generate_indexed_requirements ( dut_count , basekeys , requirements ) : dut_requirements = [ ] for i in range ( 1 , dut_count + 1 ) : dut_requirement = ResourceRequirements ( basekeys . copy ( ) ) if i in requirements [ "duts" ] : for k in requirements [ "duts" ] [ i ] : dut_requirement . set ( k , requirements [ "duts" ] [ i ] [ k ] ) elif str ( i ) in requirements [ "duts" ] : i = str ( i ) for k in requirements [ "duts" ] [ i ] : dut_requirement . set ( k , requirements [ "duts" ] [ i ] [ k ] ) dut_requirements . append ( dut_requirement ) return dut_requirements
Generate indexed requirements from general requirements .
189
8
25,759
def _resolve_hardware_count ( self ) : length = len ( [ d for d in self . _dut_requirements if d . get ( "type" ) in [ "hardware" , "serial" , "mbed" ] ] ) self . _hardware_count = length
Calculate amount of hardware resources .
66
8
25,760
def _resolve_process_count ( self ) : length = len ( [ d for d in self . _dut_requirements if d . get ( "type" ) == "process" ] ) self . _process_count = length
Calculate amount of process resources .
52
8
25,761
def _resolve_dut_count ( self ) : self . _dut_count = len ( self . _dut_requirements ) self . _resolve_process_count ( ) self . _resolve_hardware_count ( ) if self . _dut_count != self . _hardware_count + self . _process_count : raise ValueError ( "Missing or invalid type fields in dut configuration!" )
Calculates total amount of resources required and their types .
95
12
25,762
def set_dut_configuration ( self , ident , config ) : if hasattr ( config , "get_requirements" ) : self . _dut_requirements [ ident ] = config elif isinstance ( config , dict ) : self . _dut_requirements [ ident ] = ResourceRequirements ( config )
Set requirements for dut ident .
70
7
25,763
def flash ( self , binary_location = None , forceflash = None ) : if not Flash : self . logger . error ( "Mbed-flasher not installed!" ) raise ImportError ( "Mbed-flasher not installed!" ) try : # create build object self . build = Build . init ( binary_location ) except NotImplementedError as error : self . logger . error ( "Build initialization failed. " "Check your build location." ) self . logger . debug ( error ) raise DutConnectionError ( error ) # check if need to flash - depend on forceflash -option if not self . _flash_needed ( forceflash = forceflash ) : self . logger . info ( "Skipping flash, not needed." ) return True # initialize mbed-flasher with proper logger logger = get_external_logger ( "mbed-flasher" , "FLS" ) flasher = Flash ( logger = logger ) if not self . device : self . logger . error ( "Trying to flash device but device is not there?" ) return False try : buildfile = self . build . get_file ( ) if not buildfile : raise DutConnectionError ( "Binary {} not found" . format ( buildfile ) ) self . logger . info ( 'Flashing dev: %s' , self . device [ 'target_id' ] ) target_id = self . device . get ( "target_id" ) retcode = flasher . flash ( build = buildfile , target_id = target_id , device_mapping_table = [ self . device ] ) except FLASHER_ERRORS as error : if error . __class__ == NotImplementedError : self . logger . error ( "Flashing not supported for this platform!" ) elif error . __class__ == SyntaxError : self . logger . error ( "target_id required by mbed-flasher!" ) if FlashError is not None : if error . __class__ == FlashError : self . logger . error ( "Flasher raised the following error: %s Error code: %i" , error . message , error . return_code ) raise DutConnectionError ( error ) if retcode == 0 : self . dutinformation . build_binary_sha1 = self . build . sha1 return True self . dutinformation . build_binary_sha1 = None return False
Flash a binary to the target device using mbed - flasher .
515
14
25,764
def _flash_needed ( self , * * kwargs ) : forceflash = kwargs . get ( "forceflash" , False ) cur_binary_sha1 = self . dutinformation . build_binary_sha1 if not forceflash and self . build . sha1 == cur_binary_sha1 : return False return True
Check if flashing is needed . Flashing can be skipped if resource binary_sha1 attribute matches build sha1 and forceflash is not True .
74
30
25,765
def get_params ( self ) : return self . timeout , self . xonxoff , self . rtscts , self . baudrate
Get parameters as a tuple .
32
6
25,766
def open_connection ( self ) : if self . readthread is not None : raise DutConnectionError ( "Trying to open serial port which was already open" ) self . logger . info ( "Open Connection " "for '%s' using '%s' baudrate: %d" % ( self . dut_name , self . comport , self . serial_baudrate ) , extra = { 'type' : '<->' } ) if self . serial_xonxoff : self . logger . debug ( "Use software flow control for dut: %s" % self . dut_name ) if self . serial_rtscts : self . logger . debug ( "Use hardware flow control for dut: %s" % self . dut_name ) try : self . port = EnhancedSerial ( self . comport ) self . port . baudrate = self . serial_baudrate self . port . timeout = self . serial_timeout self . port . xonxoff = self . serial_xonxoff self . port . rtscts = self . serial_rtscts self . port . flushInput ( ) self . port . flushOutput ( ) except SerialException as err : self . logger . warning ( err ) raise DutConnectionError ( str ( err ) ) except ValueError as err : self . logger . warning ( err ) raise ValueError ( str ( err ) ) if self . ch_mode : self . logger . info ( "Use chunk-mode with size %d, delay: %.3f when write data" % ( self . ch_mode_chunk_size , self . ch_mode_ch_delay ) , extra = { 'type' : '<->' } ) time . sleep ( self . ch_mode_start_delay ) else : self . logger . info ( "Use normal serial write mode" , extra = { 'type' : '<->' } ) if self . params . reset : self . reset ( ) # Start the serial reading thread self . readthread = Thread ( name = self . name , target = self . run ) self . readthread . start ( )
Open serial port connection .
466
5
25,767
def close_connection ( self ) : # pylint: disable=C0103 if self . port : self . stop ( ) self . logger . debug ( "Close port '%s'" % self . comport , extra = { 'type' : '<->' } ) self . port . close ( ) self . port = False
Closes serial port connection .
72
6
25,768
def __send_break ( self ) : if self . port : self . logger . debug ( "sendBreak to device to reboot" , extra = { 'type' : '<->' } ) result = self . port . safe_sendBreak ( ) time . sleep ( 1 ) if result : self . logger . debug ( "reset completed" , extra = { 'type' : '<->' } ) else : self . logger . warning ( "reset failed" , extra = { 'type' : '<->' } ) return result return None
Sends break to device .
117
6
25,769
def writeline ( self , data ) : try : if self . ch_mode : data += "\n" parts = split_by_n ( data , self . ch_mode_chunk_size ) for split_str in parts : self . port . write ( split_str . encode ( ) ) time . sleep ( self . ch_mode_ch_delay ) else : self . port . write ( ( data + "\n" ) . encode ( ) ) except SerialException as err : self . logger . exception ( "SerialError occured while trying to write data {}." . format ( data ) ) raise RuntimeError ( str ( err ) )
Writes data to serial port .
138
7
25,770
def _readline ( self , timeout = 1 ) : line = self . port . readline ( timeout = timeout ) return strip_escape ( line . strip ( ) ) if line is not None else line
Read line from serial port .
43
6
25,771
def run ( self ) : self . keep_reading = True while self . keep_reading : line = self . _readline ( ) if line : self . input_queue . appendleft ( line ) Dut . process_dut ( self )
Read lines while keep_reading is True . Calls process_dut for each received line .
53
19
25,772
def stop ( self ) : self . keep_reading = False if self . readthread is not None : self . readthread . join ( ) self . readthread = None
Stops and joins readthread .
36
7
25,773
def print_info ( self ) : table = PrettyTable ( ) start_string = "DutSerial {} \n" . format ( self . name ) row = [ ] info_string = "" if self . config : info_string = info_string + "Configuration for this DUT:\n\n {} \n" . format ( self . config ) if self . comport : table . add_column ( "COM port" , [ ] ) row . append ( self . comport ) if self . port : if hasattr ( self . port , "baudrate" ) : table . add_column ( "Baudrate" , [ ] ) row . append ( self . port . baudrate ) if hasattr ( self . port , "xonxoff" ) : table . add_column ( "XON/XOFF" , [ ] ) row . append ( self . port . xonxoff ) if hasattr ( self . port , "timeout" ) : table . add_column ( "Timeout" , [ ] ) row . append ( self . port . timeout ) if hasattr ( self . port , "rtscts" ) : table . add_column ( "RTSCTS" , [ ] ) row . append ( self . port . rtscts ) if self . location : table . add_column ( "Location" , [ ] ) row . append ( "X = {}, Y = {}" . format ( self . location . x_coord , self . location . y_coord ) ) self . logger . info ( start_string ) self . logger . debug ( info_string ) table . add_row ( row ) print ( table )
Prints Dut information nicely formatted into a table .
361
11
25,774
def append ( self , data ) : for k in self . _entries . keys ( ) : self . _entries [ k ] . append ( data . _entries [ k ] )
Append a Data instance to self
41
7
25,775
def init_group ( self , group , chunk_size , compression = None , compression_opts = None ) : create_index ( group , chunk_size ) self . _entries [ 'items' ] . create_dataset ( group , chunk_size , compression = compression , compression_opts = compression_opts ) self . _entries [ 'features' ] . create_dataset ( group , chunk_size , compression = compression , compression_opts = compression_opts ) # chunking the labels depends on features chunks self . _entries [ 'labels' ] . create_dataset ( group , self . _entries [ 'features' ] . nb_per_chunk , compression = compression , compression_opts = compression_opts ) if self . has_properties ( ) : self . _entries [ 'properties' ] . create_dataset ( group , compression = compression , compression_opts = compression_opts )
Initializes a HDF5 group compliant with the stored data .
213
13
25,776
def is_appendable_to ( self , group ) : # First check only the names if not all ( [ k in group for k in self . _entries . keys ( ) ] ) : return False # If names are matching, check the contents for k in self . _entries . keys ( ) : if not self . _entries [ k ] . is_appendable_to ( group ) : return False return True
Returns True if the data can be appended in a given group .
91
14
25,777
def write_to ( self , group , append = False ) : write_index ( self , group , append ) self . _entries [ 'items' ] . write_to ( group ) self . _entries [ 'features' ] . write_to ( group , append ) self . _entries [ 'labels' ] . write_to ( group ) if self . has_properties ( ) : self . _entries [ 'properties' ] . write_to ( group , append = append )
Write the data to the given group .
108
8
25,778
def check ( labels ) : # type checking if not isinstance ( labels , list ) : raise IOError ( 'labels are not in a list' ) if not len ( labels ) : raise IOError ( 'the labels list is empty' ) if not all ( [ isinstance ( l , np . ndarray ) for l in labels ] ) : raise IOError ( 'all labels must be numpy arrays' ) # dimension checking ndim = labels [ 0 ] . ndim if ndim not in [ 1 , 2 ] : raise IOError ( 'labels dimension must be 1 or 2' ) if not all ( [ l . ndim == ndim for l in labels ] ) : raise IOError ( 'all labels dimensions must be equal' ) if ndim == 2 : shape1 = labels [ 0 ] . shape [ 1 ] if not all ( [ l . shape [ 1 ] == shape1 for l in labels ] ) : raise IOError ( 'all labels must have same shape on 2nd dim' ) # sort checking for label in labels : index = ( np . argsort ( label ) if label . ndim == 1 else np . lexsort ( label . T ) ) # print label, index # print len(index), label.shape[0] assert len ( index ) == label . shape [ 0 ] if not all ( n == index [ n ] for n in range ( label . shape [ 0 ] - 1 ) ) : raise IOError ( 'labels are not sorted in increasing order' )
Raise IOError if labels are not correct
322
9
25,779
def _write ( self , item , labels , features ) : data = Data ( [ item ] , [ labels ] , [ features ] ) self . _writer . write ( data , self . groupname , append = True )
Writes the given item to the owned file .
47
10
25,780
def convert ( self , infile , item = None ) : if not os . path . isfile ( infile ) : raise IOError ( '{} is not a valid file' . format ( infile ) ) if item is None : item = os . path . splitext ( infile ) [ 0 ] ext = os . path . splitext ( infile ) [ 1 ] if ext == '.npz' : self . npz_convert ( infile , item ) elif ext == '.mat' : self . mat_convert ( infile , item ) elif ext == '.h5' : self . h5features_convert ( infile ) else : raise IOError ( 'Unknown file format for {}' . format ( infile ) )
Convert an input file to h5features based on its extension .
165
14
25,781
def npz_convert ( self , infile , item ) : data = np . load ( infile ) labels = self . _labels ( data ) features = data [ 'features' ] self . _write ( item , labels , features )
Convert a numpy NPZ file to h5features .
53
13
25,782
def h5features_convert ( self , infile ) : with h5py . File ( infile , 'r' ) as f : groups = list ( f . keys ( ) ) for group in groups : self . _writer . write ( Reader ( infile , group ) . read ( ) , self . groupname , append = True )
Convert a h5features file to the latest h5features version .
74
15
25,783
def read ( filename , groupname = None , from_item = None , to_item = None , from_time = None , to_time = None , index = None ) : # TODO legacy read from index not implemented if index is not None : raise NotImplementedError reader = Reader ( filename , groupname ) data = ( reader . read ( from_item , to_item , from_time , to_time ) if index is None else reader . index_read ( index ) ) if data . has_properties ( ) : return data . dict_labels ( ) , data . dict_features ( ) , data . dict_properties ( ) else : return data . dict_labels ( ) , data . dict_features ( )
Reads in a h5features file .
159
9
25,784
def write ( filename , groupname , items , times , features , properties = None , dformat = 'dense' , chunk_size = 'auto' , sparsity = 0.1 , mode = 'a' ) : # Prepare the data, raise on error sparsity = sparsity if dformat == 'sparse' else None data = Data ( items , times , features , properties = properties , sparsity = sparsity , check = True ) # Write all that stuff in the HDF5 file's specified group Writer ( filename , chunk_size = chunk_size ) . write ( data , groupname , append = True )
Write h5features data in a HDF5 file .
133
12
25,785
def contains_empty ( features ) : if not features : return True for feature in features : if feature . shape [ 0 ] == 0 : return True return False
Check features data are not empty
33
6
25,786
def parse_dformat ( dformat , check = True ) : if check and dformat not in [ 'dense' , 'sparse' ] : raise IOError ( "{} is a bad features format, please choose 'dense' or 'sparse'" . format ( dformat ) ) return dformat
Return dformat or raise if it is not dense or sparse
66
12
25,787
def parse_dtype ( features , check = True ) : dtype = features [ 0 ] . dtype if check : types = [ x . dtype for x in features ] if not all ( [ t == dtype for t in types ] ) : raise IOError ( 'features must be homogeneous' ) return dtype
Return the features scalar type raise if error
69
9
25,788
def parse_dim ( features , check = True ) : # try: dim = features [ 0 ] . shape [ 1 ] # except IndexError: # dim = 1 if check and not dim > 0 : raise IOError ( 'features dimension must be strictly positive' ) if check and not all ( [ d == dim for d in [ x . shape [ 1 ] for x in features ] ] ) : raise IOError ( 'all files must have the same feature dimension' ) return dim
Return the features dimension raise if error
101
7
25,789
def is_appendable_to ( self , group ) : return ( group . attrs [ 'format' ] == self . dformat and group [ self . name ] . dtype == self . dtype and # We use a method because dim differs in dense and sparse. self . _group_dim ( group ) == self . dim )
Return True if features are appendable to a HDF5 group
72
13
25,790
def create_dataset ( self , group , chunk_size , compression = None , compression_opts = None ) : group . attrs [ 'format' ] = self . dformat super ( Features , self ) . _create_dataset ( group , chunk_size , compression , compression_opts ) # TODO attribute declared outside __init__ is not safe. Used # because Labels.create_dataset need it. if chunk_size != 'auto' : self . nb_per_chunk = nb_per_chunk ( self . dtype . itemsize , self . dim , chunk_size ) else : self . nb_per_chunk = 'auto'
Initialize the features subgoup
153
7
25,791
def write_to ( self , group , append = False ) : if self . sparsetodense : self . data = [ x . todense ( ) if sp . issparse ( x ) else x for x in self . data ] nframes = sum ( [ d . shape [ 0 ] for d in self . data ] ) dim = self . _group_dim ( group ) feats = np . concatenate ( self . data , axis = 0 ) if append : nframes_group = group [ self . name ] . shape [ 0 ] group [ self . name ] . resize ( nframes_group + nframes , axis = 0 ) if dim == 1 : group [ self . name ] [ nframes_group : ] = feats else : group [ self . name ] [ nframes_group : , : ] = feats else : group [ self . name ] . resize ( nframes , axis = 0 ) group [ self . name ] [ ... ] = feats if dim == 1 else feats
Write stored features to a given group
213
7
25,792
def create_dataset ( self , group , chunk_size ) : group . attrs [ 'format' ] = self . dformat group . attrs [ 'dim' ] = self . dim if chunk_size == 'auto' : group . create_dataset ( 'coordinates' , ( 0 , 2 ) , dtype = np . float64 , chunks = True , maxshape = ( None , 2 ) ) group . create_dataset ( self . name , ( 0 , ) , dtype = self . dtype , chunks = True , maxshape = ( None , ) ) else : # for storing sparse data we don't use the self.nb_per_chunk, # which is only used by the Writer to determine times chunking. per_chunk = nb_per_chunk ( self . dtype . itemsize , 1 , chunk_size ) group . create_dataset ( 'coordinates' , ( 0 , 2 ) , dtype = np . float64 , chunks = ( per_chunk , 2 ) , maxshape = ( None , 2 ) ) group . create_dataset ( self . name , ( 0 , ) , dtype = self . dtype , chunks = ( per_chunk , ) , maxshape = ( None , ) ) dtype = np . int64 if chunk_size == 'auto' : chunks = True self . nb_per_chunk = 'auto' else : chunks = ( nb_per_chunk ( np . dtype ( dtype ) . itemsize , 1 , chunk_size ) , ) # Needed by Times.create_dataset self . nb_per_chunk = nb_per_chunk ( self . dtype . itemsize , int ( round ( self . sparsity * self . dim ) ) , chunk_size ) group . create_dataset ( 'frames' , ( 0 , ) , dtype = dtype , chunks = chunks , maxshape = ( None , ) )
Initializes sparse specific datasets
437
5
25,793
def read_properties ( group ) : if 'properties' not in group : raise IOError ( 'no properties in group' ) data = group [ 'properties' ] [ ... ] [ 0 ] . replace ( b'__NULL__' , b'\x00' ) return pickle . loads ( data )
Returns properties loaded from a group
66
6
25,794
def _eq_dicts ( d1 , d2 ) : if not d1 . keys ( ) == d2 . keys ( ) : return False for k , v1 in d1 . items ( ) : v2 = d2 [ k ] if not type ( v1 ) == type ( v2 ) : return False if isinstance ( v1 , np . ndarray ) : if not np . array_equal ( v1 , v2 ) : return False else : if not v1 == v2 : return False return True
Returns True if d1 == d2 False otherwise
114
10
25,795
def write_to ( self , group , append = False ) : data = self . data if append is True : try : # concatenate original and new properties in a single list original = read_properties ( group ) data = original + data except EOFError : pass # no former data to append on # h5py does not support embedded NULLs in strings ('\x00') data = pickle . dumps ( data ) . replace ( b'\x00' , b'__NULL__' ) group [ 'properties' ] [ ... ] = np . void ( data )
Writes the properties to a group or append it
123
10
25,796
def generate_data ( nitem , nfeat = 2 , dim = 10 , labeldim = 1 , base = 'item' ) : import numpy as np # A list of item names items = [ base + '_' + str ( i ) for i in range ( nitem ) ] # A list of features arrays features = [ np . random . randn ( nfeat , dim ) for _ in range ( nitem ) ] # A list on 1D or 2D times arrays if labeldim == 1 : labels = [ np . linspace ( 0 , 1 , nfeat ) ] * nitem else : t = np . linspace ( 0 , 1 , nfeat ) labels = [ np . array ( [ t + i for i in range ( labeldim ) ] ) ] * nitem # Format data as required by the writer return h5f . Data ( items , labels , features , check = True )
Returns a randomly generated h5f . Data instance .
202
11
25,797
def create_index ( group , chunk_size , compression = None , compression_opts = None ) : dtype = np . int64 if chunk_size == 'auto' : chunks = True else : chunks = ( nb_per_chunk ( np . dtype ( dtype ) . itemsize , 1 , chunk_size ) , ) group . create_dataset ( 'index' , ( 0 , ) , dtype = dtype , chunks = chunks , maxshape = ( None , ) , compression = compression , compression_opts = compression_opts )
Create an empty index dataset in the given group .
124
10
25,798
def write_index ( data , group , append ) : # build the index from data nitems = group [ 'items' ] . shape [ 0 ] if 'items' in group else 0 last_index = group [ 'index' ] [ - 1 ] if nitems > 0 else - 1 index = last_index + cumindex ( data . _entries [ 'features' ] ) if append : nidx = group [ 'index' ] . shape [ 0 ] # # in case we append to the end of an existing item # if data._entries['items']._continue_last_item(group): # nidx -= 1 group [ 'index' ] . resize ( ( nidx + index . shape [ 0 ] , ) ) group [ 'index' ] [ nidx : ] = index else : group [ 'index' ] . resize ( ( index . shape [ 0 ] , ) ) group [ 'index' ] [ ... ] = index
Write the data index to the given group .
207
9
25,799
def read_index ( group , version = '1.1' ) : if version == '0.1' : return np . int64 ( group [ 'index' ] [ ... ] ) elif version == '1.0' : return group [ 'file_index' ] [ ... ] else : return group [ 'index' ] [ ... ]
Return the index stored in a h5features group .
75
11