idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
16,600
def create_dir ( path ) : full_path = abs_path ( path ) if not os . path . exists ( full_path ) : try : os . makedirs ( full_path ) except OSError as e : if e . errno != os . errno . EEXIST : raise
Creates a directory if it does not exist already .
16,601
def create_alias ( target_path , alias_path ) : if platform . system ( ) == 'Windows' and not alias_path . endswith ( '.lnk' ) : alias_path += '.lnk' if os . path . lexists ( alias_path ) : os . remove ( alias_path ) if platform . system ( ) == 'Windows' : from win32com import client shell = client . Dispatch ( 'WScript.Shell' ) shortcut = shell . CreateShortCut ( alias_path ) shortcut . Targetpath = target_path shortcut . save ( ) else : os . symlink ( target_path , alias_path )
Creates an alias at alias_path pointing to the file target_path .
16,602
def epoch_to_human_time ( epoch_time ) : if isinstance ( epoch_time , int ) : try : d = datetime . datetime . fromtimestamp ( epoch_time / 1000 ) return d . strftime ( "%m-%d-%Y %H:%M:%S " ) except ValueError : return None
Converts an epoch timestamp to human readable time .
16,603
def find_files ( paths , file_predicate ) : file_list = [ ] for path in paths : p = abs_path ( path ) for dirPath , _ , fileList in os . walk ( p ) : for fname in fileList : name , ext = os . path . splitext ( fname ) if file_predicate ( name , ext ) : file_list . append ( ( dirPath , name , ext ) ) return file_list
Locate files whose names and extensions match the given predicate in the specified directories .
16,604
def load_file_to_base64_str ( f_path ) : path = abs_path ( f_path ) with io . open ( path , 'rb' ) as f : f_bytes = f . read ( ) base64_str = base64 . b64encode ( f_bytes ) . decode ( "utf-8" ) return base64_str
Loads the content of a file into a base64 string .
16,605
def find_field ( item_list , cond , comparator , target_field ) : for item in item_list : if comparator ( item , cond ) and target_field in item : return item [ target_field ] return None
Finds the value of a field in a dict object that satisfies certain conditions .
16,606
def rand_ascii_str ( length ) : letters = [ random . choice ( ascii_letters_and_digits ) for _ in range ( length ) ] return '' . join ( letters )
Generates a random string of specified length composed of ascii letters and digits .
16,607
def concurrent_exec ( func , param_list ) : with concurrent . futures . ThreadPoolExecutor ( max_workers = 30 ) as executor : future_to_params = { executor . submit ( func , * p ) : p for p in param_list } return_vals = [ ] for future in concurrent . futures . as_completed ( future_to_params ) : params = future_to_params [ future ] try : return_vals . append ( future . result ( ) ) except Exception as exc : logging . exception ( "{} generated an exception: {}" . format ( params , traceback . format_exc ( ) ) ) return_vals . append ( exc ) return return_vals
Executes a function with different parameters pseudo - concurrently .
16,608
def run_command ( cmd , stdout = None , stderr = None , shell = False , timeout = None , cwd = None , env = None ) : import psutil if stdout is None : stdout = subprocess . PIPE if stderr is None : stderr = subprocess . PIPE process = psutil . Popen ( cmd , stdout = stdout , stderr = stderr , shell = shell , cwd = cwd , env = env ) timer = None timer_triggered = threading . Event ( ) if timeout and timeout > 0 : def timeout_expired ( ) : timer_triggered . set ( ) process . terminate ( ) timer = threading . Timer ( timeout , timeout_expired ) timer . start ( ) ( out , err ) = process . communicate ( ) if timer is not None : timer . cancel ( ) if timer_triggered . is_set ( ) : raise psutil . TimeoutExpired ( timeout , pid = process . pid ) return ( process . returncode , out , err )
Runs a command in a subprocess .
16,609
def start_standing_subprocess ( cmd , shell = False , env = None ) : logging . debug ( 'Starting standing subprocess with: %s' , cmd ) proc = subprocess . Popen ( cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = shell , env = env ) proc . stdin . close ( ) proc . stdin = None logging . debug ( 'Started standing subprocess %d' , proc . pid ) return proc
Starts a long - running subprocess .
16,610
def stop_standing_subprocess ( proc ) : import psutil pid = proc . pid logging . debug ( 'Stopping standing subprocess %d' , pid ) process = psutil . Process ( pid ) failed = [ ] try : children = process . children ( recursive = True ) except AttributeError : children = process . get_children ( recursive = True ) for child in children : try : child . kill ( ) child . wait ( timeout = 10 ) except psutil . NoSuchProcess : pass except : failed . append ( child . pid ) logging . exception ( 'Failed to kill standing subprocess %d' , child . pid ) try : process . kill ( ) process . wait ( timeout = 10 ) except psutil . NoSuchProcess : pass except : failed . append ( pid ) logging . exception ( 'Failed to kill standing subprocess %d' , pid ) if failed : raise Error ( 'Failed to kill standing subprocesses: %s' % failed ) if proc . stdout : proc . stdout . close ( ) if proc . stderr : proc . stderr . close ( ) proc . wait ( ) logging . debug ( 'Stopped standing subprocess %d' , pid )
Stops a subprocess started by start_standing_subprocess .
16,611
def get_available_host_port ( ) : from mobly . controllers . android_device_lib import adb for _ in range ( MAX_PORT_ALLOCATION_RETRY ) : port = portpicker . PickUnusedPort ( ) if port not in adb . list_occupied_adb_ports ( ) : return port raise Error ( 'Failed to find available port after {} retries' . format ( MAX_PORT_ALLOCATION_RETRY ) )
Gets a host port number available for adb forward .
16,612
def grep ( regex , output ) : lines = output . decode ( 'utf-8' ) . strip ( ) . splitlines ( ) results = [ ] for line in lines : if re . search ( regex , line ) : results . append ( line . strip ( ) ) return results
Similar to linux s grep this returns the line in an output stream that matches a given regex pattern .
16,613
def cli_cmd_to_string ( args ) : if isinstance ( args , basestring ) : return args return ' ' . join ( [ pipes . quote ( arg ) for arg in args ] )
Converts a cmd arg list to string .
16,614
def exe_cmd ( * cmds ) : cmd = ' ' . join ( cmds ) proc = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True ) ( out , err ) = proc . communicate ( ) if not err : return out return err
Executes commands in a new shell . Directing stderr to PIPE .
16,615
def verify_controller_module ( module ) : required_attributes = ( 'create' , 'destroy' , 'MOBLY_CONTROLLER_CONFIG_NAME' ) for attr in required_attributes : if not hasattr ( module , attr ) : raise signals . ControllerError ( 'Module %s missing required controller module attribute' ' %s.' % ( module . __name__ , attr ) ) if not getattr ( module , attr ) : raise signals . ControllerError ( 'Controller interface %s in %s cannot be null.' % ( attr , module . __name__ ) )
Verifies a module object follows the required interface for controllers .
16,616
def register_controller ( self , module , required = True , min_number = 1 ) : verify_controller_module ( module ) module_ref_name = module . __name__ . split ( '.' ) [ - 1 ] if module_ref_name in self . _controller_objects : raise signals . ControllerError ( 'Controller module %s has already been registered. It cannot ' 'be registered again.' % module_ref_name ) module_config_name = module . MOBLY_CONTROLLER_CONFIG_NAME if module_config_name not in self . controller_configs : if required : raise signals . ControllerError ( 'No corresponding config found for %s' % module_config_name ) logging . warning ( 'No corresponding config found for optional controller %s' , module_config_name ) return None try : original_config = self . controller_configs [ module_config_name ] controller_config = copy . deepcopy ( original_config ) objects = module . create ( controller_config ) except : logging . exception ( 'Failed to initialize objects for controller %s, abort!' , module_config_name ) raise if not isinstance ( objects , list ) : raise signals . ControllerError ( 'Controller module %s did not return a list of objects, abort.' % module_ref_name ) actual_number = len ( objects ) if actual_number < min_number : module . destroy ( objects ) raise signals . ControllerError ( 'Expected to get at least %d controller objects, got %d.' % ( min_number , actual_number ) ) self . _controller_objects [ module_ref_name ] = copy . copy ( objects ) logging . debug ( 'Found %d objects for controller %s' , len ( objects ) , module_config_name ) self . _controller_modules [ module_ref_name ] = module return objects
Loads a controller module and returns its loaded devices .
16,617
def unregister_controllers ( self ) : for name , module in self . _controller_modules . items ( ) : logging . debug ( 'Destroying %s.' , name ) with expects . expect_no_raises ( 'Exception occurred destroying %s.' % name ) : module . destroy ( self . _controller_objects [ name ] ) self . _controller_objects = collections . OrderedDict ( ) self . _controller_modules = { }
Destroy controller objects and clear internal registry .
16,618
def _create_controller_info_record ( self , controller_module_name ) : module = self . _controller_modules [ controller_module_name ] controller_info = None try : controller_info = module . get_info ( copy . copy ( self . _controller_objects [ controller_module_name ] ) ) except AttributeError : logging . warning ( 'No optional debug info found for controller ' '%s. To provide it, implement `get_info`.' , controller_module_name ) try : yaml . dump ( controller_info ) except TypeError : logging . warning ( 'The info of controller %s in class "%s" is not ' 'YAML serializable! Coercing it to string.' , controller_module_name , self . _class_name ) controller_info = str ( controller_info ) return records . ControllerInfoRecord ( self . _class_name , module . MOBLY_CONTROLLER_CONFIG_NAME , controller_info )
Creates controller info record for a particular controller type .
16,619
def get_controller_info_records ( self ) : info_records = [ ] for controller_module_name in self . _controller_objects . keys ( ) : with expects . expect_no_raises ( 'Failed to collect controller info from %s' % controller_module_name ) : record = self . _create_controller_info_record ( controller_module_name ) if record : info_records . append ( record ) return info_records
Get the info records for all the controller objects in the manager .
16,620
def dict_to_op ( d , index_name , doc_type , op_type = 'index' ) : if d is None : return d op_types = ( 'create' , 'delete' , 'index' , 'update' ) if op_type not in op_types : msg = 'Unknown operation type "{}", must be one of: {}' raise Exception ( msg . format ( op_type , ', ' . join ( op_types ) ) ) if 'id' not in d : raise Exception ( '"id" key not found' ) operation = { '_op_type' : op_type , '_index' : index_name , '_type' : doc_type , '_id' : d . pop ( 'id' ) , } operation . update ( d ) return operation
Create a bulk - indexing operation from the given dictionary .
16,621
def to_dict ( obj ) : if not isinstance ( obj . test , str ) : return keys = [ 'id' , 'job_guid' , 'test' , 'subtest' , 'status' , 'expected' , 'message' , 'best_classification' , 'best_is_verified' , ] all_fields = obj . to_dict ( ) return { k : v for k , v in all_fields . items ( ) if k in keys }
Create a filtered dict from the given object .
16,622
def start_step ( self , lineno , name = "Unnamed step" , timestamp = None ) : self . state = self . STATES [ 'step_in_progress' ] self . stepnum += 1 self . steps . append ( { "name" : name , "started" : timestamp , "started_linenumber" : lineno , "errors" : [ ] , } )
Create a new step and update the state to reflect we re now in the middle of a step .
16,623
def end_step ( self , lineno , timestamp = None , result_code = None ) : self . state = self . STATES [ 'step_finished' ] step_errors = self . sub_parser . get_artifact ( ) step_error_count = len ( step_errors ) if step_error_count > settings . PARSER_MAX_STEP_ERROR_LINES : step_errors = step_errors [ : settings . PARSER_MAX_STEP_ERROR_LINES ] self . artifact [ "errors_truncated" ] = True self . current_step . update ( { "finished" : timestamp , "finished_linenumber" : lineno , "result" : self . RESULT_DICT . get ( result_code , "unknown" ) , "errors" : step_errors } ) self . sub_parser . clear ( )
Fill in the current step s summary and update the state to show the current step has ended .
16,624
def parse_line ( self , line , lineno ) : match = self . RE_TINDERBOXPRINT . match ( line ) if line else None if match : line = match . group ( 'line' ) for regexp_item in self . TINDERBOX_REGEXP_TUPLE : match = regexp_item [ 're' ] . match ( line ) if match : artifact = match . groupdict ( ) for to_field , from_field in regexp_item [ 'duplicates_fields' ] . items ( ) : if to_field not in artifact or artifact [ to_field ] is None : artifact [ to_field ] = artifact [ from_field ] artifact . update ( regexp_item [ 'base_dict' ] ) self . artifact . append ( artifact ) return artifact = { "content_type" : "raw_html" , } if "<br/>" in line : title , value = line . split ( "<br/>" , 1 ) artifact [ "title" ] = title artifact [ "value" ] = value elif "href" in line and "title" in line : def parse_url_line ( line_data ) : class TpLineParser ( HTMLParser ) : def handle_starttag ( self , tag , attrs ) : d = dict ( attrs ) artifact [ "url" ] = d [ 'href' ] artifact [ "title" ] = d [ 'title' ] def handle_data ( self , data ) : artifact [ "value" ] = data p = TpLineParser ( ) p . feed ( line_data ) p . close ( ) parse_url_line ( line . replace ( '\r' , '' ) ) else : artifact [ "value" ] = line self . artifact . append ( artifact )
Parse a single line of the log
16,625
def parse_line ( self , line , lineno ) : if line . startswith ( '[taskcluster ' ) : self . is_taskcluster = True if self . is_taskcluster : line = re . sub ( self . RE_TASKCLUSTER_NORMAL_PREFIX , "" , line ) if self . is_error_line ( line ) : self . add ( line , lineno )
Check a single line for an error . Keeps track of the linenumber
16,626
def retrieve ( self , request , project , pk = None ) : log = JobLog . objects . get ( id = pk ) return Response ( self . _log_as_dict ( log ) )
Returns a job_log_url object given its ID
16,627
def linear_weights ( i , n ) : if i >= n : return 0.0 return float ( n - i ) / float ( n )
A window function that falls off arithmetically .
16,628
def calc_t ( w1 , w2 , weight_fn = None ) : if not w1 or not w2 : return 0 s1 = analyze ( w1 , weight_fn ) s2 = analyze ( w2 , weight_fn ) delta_s = s2 [ 'avg' ] - s1 [ 'avg' ] if delta_s == 0 : return 0 if s1 [ 'variance' ] == 0 and s2 [ 'variance' ] == 0 : return float ( 'inf' ) return delta_s / ( ( ( s1 [ 'variance' ] / s1 [ 'n' ] ) + ( s2 [ 'variance' ] / s2 [ 'n' ] ) ) ** 0.5 )
Perform a Students t - test on the two sets of revision data .
16,629
def _remove_existing_jobs ( data ) : new_data = [ ] guids = [ datum [ 'job' ] [ 'job_guid' ] for datum in data ] state_map = { guid : state for ( guid , state ) in Job . objects . filter ( guid__in = guids ) . values_list ( 'guid' , 'state' ) } for datum in data : job = datum [ 'job' ] if not state_map . get ( job [ 'job_guid' ] ) : new_data . append ( datum ) else : current_state = state_map [ job [ 'job_guid' ] ] if current_state == 'completed' or ( job [ 'state' ] == 'pending' and current_state == 'running' ) : continue new_data . append ( datum ) return new_data
Remove jobs from data where we already have them in the same state .
16,630
def _schedule_log_parsing ( job , job_logs , result ) : from treeherder . log_parser . tasks import parse_logs task_types = { "errorsummary_json" , "buildbot_text" , "builds-4h" } job_log_ids = [ ] for job_log in job_logs : if job_log . status != JobLog . PENDING : continue if job_log . name not in task_types : continue job_log_ids . append ( job_log . id ) if result != 'success' : queue = 'log_parser_fail' priority = 'failures' else : queue = 'log_parser' priority = "normal" parse_logs . apply_async ( queue = queue , args = [ job . id , job_log_ids , priority ] )
Kick off the initial task that parses the log data .
16,631
def store_job_data ( repository , data ) : if not data : return data = _remove_existing_jobs ( data ) if not data : return superseded_job_guid_placeholders = [ ] for datum in data : try : job = datum [ 'job' ] revision = datum [ 'revision' ] superseded = datum . get ( 'superseded' , [ ] ) revision_field = 'revision__startswith' if len ( revision ) < 40 else 'revision' filter_kwargs = { 'repository' : repository , revision_field : revision } push_id = Push . objects . values_list ( 'id' , flat = True ) . get ( ** filter_kwargs ) job_guid = _load_job ( repository , job , push_id ) for superseded_guid in superseded : superseded_job_guid_placeholders . append ( [ job_guid , superseded_guid ] ) except Exception as e : if 'DYNO' not in os . environ : raise logger . exception ( e ) datum . update ( datum . get ( "job" , { } ) ) newrelic . agent . record_exception ( params = datum ) continue if superseded_job_guid_placeholders : for ( job_guid , superseded_by_guid ) in superseded_job_guid_placeholders : Job . objects . filter ( guid = superseded_by_guid ) . update ( result = 'superseded' , state = 'completed' )
Store job data instances into jobs db
16,632
def get_exchange ( connection , name , create = False ) : exchange = Exchange ( name , type = "topic" , passive = not create ) bound_exchange = exchange ( connection ) bound_exchange . declare ( ) return bound_exchange
Get a Kombu Exchange object using the passed in name .
16,633
def _process ( self , project , build_system , job_priorities ) : jobs = [ ] cache_key = '{}-{}-ref_data_names_cache' . format ( project , build_system ) ref_data_names_map = cache . get ( cache_key ) if not ref_data_names_map : ref_data_names_map = self . _build_ref_data_names ( project , build_system ) cache . set ( cache_key , ref_data_names_map , SETA_REF_DATA_NAMES_CACHE_TIMEOUT ) for jp in job_priorities : if not valid_platform ( jp . platform ) : continue if is_job_blacklisted ( jp . testtype ) : continue key = jp . unique_identifier ( ) if key in ref_data_names_map : jobs . append ( ref_data_names_map [ key ] ) else : logger . warning ( 'Job priority (%s) not found in accepted jobs list' , jp ) return jobs
Return list of ref_data_name for job_priorities
16,634
def _build_ref_data_names ( self , project , build_system ) : ignored_jobs = [ ] ref_data_names = { } runnable_jobs = list_runnable_jobs ( project ) for job in runnable_jobs : testtype = parse_testtype ( build_system_type = job [ 'build_system_type' ] , job_type_name = job [ 'job_type_name' ] , platform_option = job [ 'platform_option' ] , ref_data_name = job [ 'ref_data_name' ] ) if not valid_platform ( job [ 'platform' ] ) : continue if is_job_blacklisted ( testtype ) : ignored_jobs . append ( job [ 'ref_data_name' ] ) continue key = unique_key ( testtype = testtype , buildtype = job [ 'platform_option' ] , platform = job [ 'platform' ] ) if build_system == '*' : ref_data_names [ key ] = job [ 'ref_data_name' ] elif job [ 'build_system_type' ] == build_system : ref_data_names [ key ] = job [ 'ref_data_name' ] for ref_data_name in sorted ( ignored_jobs ) : logger . info ( 'Ignoring %s' , ref_data_name ) return ref_data_names
We want all reference data names for every task that runs on a specific project .
16,635
def collect_fields ( node ) : fields = set ( ) for leaf in node : if leaf . get ( 'kind' , None ) == "Field" : fields . add ( leaf [ "name" ] [ "value" ] ) if leaf . get ( "selection_set" , None ) : fields = fields . union ( collect_fields ( leaf [ "selection_set" ] [ "selections" ] ) ) return fields
Get all the unique field names that are eligible for optimization
16,636
def optimize ( qs , info_dict , field_map ) : fields = collect_fields ( info_dict ) for field in fields : if field in field_map : field_name , opt = field_map [ field ] qs = ( qs . prefetch_related ( field_name ) if opt == "prefetch" else qs . select_related ( field_name ) ) return qs
Add either select_related or prefetch_related to fields of the qs
16,637
def build_connection ( url ) : username = os . environ . get ( 'ELASTICSEARCH_USERNAME' ) password = os . environ . get ( 'ELASTICSEARCH_PASSWORD' ) if username and password : return Elasticsearch ( url , http_auth = ( username , password ) ) return Elasticsearch ( url )
Build an Elasticsearch connection with the given url
16,638
def get_artifact ( self ) : self . artifact [ self . parser . name ] = self . parser . get_artifact ( ) return self . artifact
Return the job artifact built by the parser .
16,639
def precise_matcher ( text_log_error ) : failure_line = text_log_error . metadata . failure_line logger . debug ( "Looking for test match in failure %d" , failure_line . id ) if failure_line . action != "test_result" or failure_line . message is None : return f = { 'text_log_error metadata__failure_line__action' : 'test_result' , 'text_log_error metadata__failure_line__test' : failure_line . test , 'text_log_error metadata__failure_line__subtest' : failure_line . subtest , 'text_log_error metadata__failure_line__status' : failure_line . status , 'text_log_error metadata__failure_line__expected' : failure_line . expected , 'text_log_error metadata__failure_line__message' : failure_line . message } qwargs = ( Q ( text_log_error metadata__best_classification = None ) & ( Q ( text_log_error metadata__best_is_verified = True ) | Q ( text_log_error__step__job = text_log_error . step . job ) ) ) qs = ( TextLogErrorMatch . objects . filter ( ** f ) . exclude ( qwargs ) . order_by ( '-score' , '-classified_failure' ) ) if not qs : return chunks = chunked_qs_reverse ( qs , chunk_size = 20000 ) return chain . from_iterable ( time_boxed ( score_matches , chunks , time_budget = 500 ) )
Query for TextLogErrorMatches identical to matches of the given TextLogError .
16,640
def elasticsearch_matcher ( text_log_error ) : if not settings . ELASTICSEARCH_URL : return [ ] failure_line = text_log_error . metadata . failure_line if failure_line . action != "test_result" or not failure_line . message : logger . debug ( "Skipped elasticsearch matching" ) return filters = [ { 'term' : { 'test' : failure_line . test } } , { 'term' : { 'status' : failure_line . status } } , { 'term' : { 'expected' : failure_line . expected } } , { 'exists' : { 'field' : 'best_classification' } } ] if failure_line . subtest : query = filters . append ( { 'term' : { 'subtest' : failure_line . subtest } } ) query = { 'query' : { 'bool' : { 'filter' : filters , 'must' : [ { 'match_phrase' : { 'message' : failure_line . message [ : 1024 ] , } , } ] , } , } , } try : results = search ( query ) except Exception : logger . error ( "Elasticsearch lookup failed: %s %s %s %s %s" , failure_line . test , failure_line . subtest , failure_line . status , failure_line . expected , failure_line . message ) raise if len ( results ) > 1 : args = ( text_log_error . id , failure_line . id , len ( results ) , ) logger . info ( 'text_log_error=%i failure_line=%i Elasticsearch produced %i results' % args ) newrelic . agent . record_custom_event ( 'es_matches' , { 'num_results' : len ( results ) , 'text_log_error_id' : text_log_error . id , 'failure_line_id' : failure_line . id , } ) scorer = MatchScorer ( failure_line . message ) matches = [ ( item , item [ 'message' ] ) for item in results ] best_match = scorer . best_match ( matches ) if not best_match : return score , es_result = best_match return [ ( score , es_result [ 'best_classification' ] ) ]
Query Elasticsearch and score the results .
16,641
def crash_signature_matcher ( text_log_error ) : failure_line = text_log_error . metadata . failure_line if ( failure_line . action != "crash" or failure_line . signature is None or failure_line . signature == "None" ) : return f = { 'text_log_error metadata__failure_line__action' : 'crash' , 'text_log_error metadata__failure_line__signature' : failure_line . signature , } qwargs = ( Q ( text_log_error metadata__best_classification = None ) & ( Q ( text_log_error metadata__best_is_verified = True ) | Q ( text_log_error__step__job = text_log_error . step . job ) ) ) qs = ( TextLogErrorMatch . objects . filter ( ** f ) . exclude ( qwargs ) . select_related ( 'text_log_error' , 'text_log_error metadata' ) . order_by ( '-score' , '-classified_failure' ) ) size = 20000 time_budget = 500 first_attempt = qs . filter ( text_log_error metadata__failure_line__test = failure_line . test ) chunks = chunked_qs_reverse ( first_attempt , chunk_size = size ) scored_matches = chain . from_iterable ( time_boxed ( score_matches , chunks , time_budget ) ) if scored_matches : return scored_matches chunks = chunked_qs_reverse ( qs , chunk_size = size ) scored_matches = chain . from_iterable ( time_boxed ( score_matches , chunks , time_budget , score_multiplier = ( 8 , 10 ) , ) ) return scored_matches
Query for TextLogErrorMatches with the same crash signature .
16,642
def best_match ( self , matches ) : best_match = None for match , message in matches : self . matcher . set_seq1 ( message ) ratio = self . matcher . quick_ratio ( ) if best_match is None or ratio >= best_match [ 0 ] : new_ratio = self . matcher . ratio ( ) if best_match is None or new_ratio > best_match [ 0 ] : best_match = ( new_ratio , match ) return best_match
Find the most similar string to self . target .
16,643
def store_push_data ( repository , pushes ) : if not pushes : logger . info ( "No new pushes to store" ) return for push in pushes : store_push ( repository , push )
Stores push data in the treeherder database
16,644
def cycle_data ( self , repository , cycle_interval , chunk_size , sleep_time ) : max_timestamp = datetime . datetime . now ( ) - cycle_interval while True : perf_datums_to_cycle = list ( self . filter ( repository = repository , push_timestamp__lt = max_timestamp ) . values_list ( 'id' , flat = True ) [ : chunk_size ] ) if not perf_datums_to_cycle : break self . filter ( id__in = perf_datums_to_cycle ) . delete ( ) if sleep_time : time . sleep ( sleep_time ) for signature in PerformanceSignature . objects . filter ( repository = repository ) : if not self . filter ( signature = signature ) . exists ( ) : signature . delete ( )
Delete data older than cycle_interval splitting the target data into chunks of chunk_size size .
16,645
def to_representation ( self , failure_line ) : try : matches = failure_line . error . matches . all ( ) except AttributeError : matches = [ ] tle_serializer = TextLogErrorMatchSerializer ( matches , many = True ) classified_failures = models . ClassifiedFailure . objects . filter ( error_matches__in = matches ) cf_serializer = ClassifiedFailureSerializer ( classified_failures , many = True ) response = super ( ) . to_representation ( failure_line ) response [ 'matches' ] = tle_serializer . data response [ 'classified_failures' ] = cf_serializer . data return response
Manually add matches our wrapper of the TLEMetadata - > TLE relation .
16,646
def check_whiteboard_status ( self , whiteboard ) : stockwell_text = re . search ( r'\[stockwell (.+?)\]' , whiteboard ) if stockwell_text is not None : text = stockwell_text . group ( 1 ) . split ( ':' ) [ 0 ] if text == 'fixed' or text == 'disable-recommended' or text == 'infra' or text == 'disabled' : return True return False
Extracts stockwell text from a bug s whiteboard status to determine whether it matches specified stockwell text ; returns a boolean .
16,647
def fetch_bug_details ( self , bug_ids ) : params = { 'include_fields' : 'product, component, priority, whiteboard, id' } params [ 'id' ] = bug_ids try : response = self . session . get ( settings . BZ_API_URL + '/rest/bug' , headers = self . session . headers , params = params , timeout = 30 ) response . raise_for_status ( ) except RequestException as e : logger . warning ( 'error fetching bugzilla metadata for bugs due to {}' . format ( e ) ) return None if response . headers [ 'Content-Type' ] == 'text/html; charset=UTF-8' : return None data = response . json ( ) if 'bugs' not in data : return None return data [ 'bugs' ]
Fetches bug metadata from bugzilla and returns an encoded dict if successful otherwise returns None .
16,648
def get_alt_date_bug_totals ( self , startday , endday , bug_ids ) : bugs = ( BugJobMap . failures . by_date ( startday , endday ) . filter ( bug_id__in = bug_ids ) . values ( 'bug_id' ) . annotate ( total = Count ( 'id' ) ) . values ( 'bug_id' , 'total' ) ) return { bug [ 'bug_id' ] : bug [ 'total' ] for bug in bugs if bug [ 'total' ] >= 150 }
use previously fetched bug_ids to check for total failures exceeding 150 in 21 days
16,649
def transform ( testtype ) : if testtype . startswith ( '[funsize' ) : return None testtype = testtype . split ( '/opt-' ) [ - 1 ] testtype = testtype . split ( '/debug-' ) [ - 1 ] testtype = testtype . replace ( 'plain-' , '' ) testtype = testtype . strip ( ) testtype = testtype . replace ( 'browser-chrome-e10s' , 'e10s-browser-chrome' ) testtype = testtype . replace ( 'devtools-chrome-e10s' , 'e10s-devtools-chrome' ) testtype = testtype . replace ( '[TC] Android 4.3 API15+ ' , '' ) testtype = testtype . replace ( 'webgl-' , 'gl-' ) return testtype
A lot of these transformations are from tasks before task labels and some of them are if we grab data directly from Treeherder jobs endpoint instead of runnable jobs API .
16,650
def _get_username_from_userinfo ( self , user_info ) : subject = user_info [ 'sub' ] email = user_info [ 'email' ] if "Mozilla-LDAP" in subject : return "mozilla-ldap/" + email elif "email" in subject : return "email/" + email elif "github" in subject : return "github/" + email elif "google" in subject : return "google/" + email elif "oauth2" in subject : return "oauth2/" + email else : raise AuthenticationFailed ( "Unrecognized identity" )
Get the user s username from the jwt sub property
16,651
def _get_user_info ( self , access_token , id_token ) : try : unverified_header = jwt . get_unverified_header ( id_token ) except jwt . JWTError : raise AuthError ( 'Unable to decode the Id token header' ) if 'kid' not in unverified_header : raise AuthError ( 'Id token header missing RSA key ID' ) rsa_key = None for key in jwks [ "keys" ] : if key [ "kid" ] == unverified_header [ "kid" ] : rsa_key = { "kty" : key [ "kty" ] , "kid" : key [ "kid" ] , "use" : key [ "use" ] , "n" : key [ "n" ] , "e" : key [ "e" ] } break if not rsa_key : raise AuthError ( 'Id token using unrecognised RSA key ID' ) try : user_info = jwt . decode ( id_token , rsa_key , algorithms = [ 'RS256' ] , audience = AUTH0_CLIENTID , access_token = access_token , issuer = "https://" + AUTH0_DOMAIN + "/" ) return user_info except jwt . ExpiredSignatureError : raise AuthError ( 'Id token is expired' ) except jwt . JWTClaimsError : raise AuthError ( "Incorrect claims: please check the audience and issuer" ) except jwt . JWTError : raise AuthError ( "Invalid header: Unable to parse authentication" )
Extracts the user info payload from the Id Token .
16,652
def _calculate_session_expiry ( self , request , user_info ) : access_token_expiry_timestamp = self . _get_access_token_expiry ( request ) id_token_expiry_timestamp = self . _get_id_token_expiry ( user_info ) now_in_seconds = int ( time . time ( ) ) earliest_expiration_timestamp = min ( access_token_expiry_timestamp , id_token_expiry_timestamp ) seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds if seconds_until_expiry <= 0 : raise AuthError ( 'Session expiry time has already passed!' ) return seconds_until_expiry
Returns the number of seconds after which the Django session should expire .
16,653
def _unique_key ( job ) : return unique_key ( testtype = str ( job [ 'testtype' ] ) , buildtype = str ( job [ 'platform_option' ] ) , platform = str ( job [ 'platform' ] ) )
Return a key to query our uniqueness mapping system .
16,654
def _sanitize_data ( runnable_jobs_data ) : job_build_system_type = { } sanitized_list = [ ] for job in runnable_jobs_data : if not valid_platform ( job [ 'platform' ] ) : logger . info ( 'Invalid platform %s' , job [ 'platform' ] ) continue testtype = parse_testtype ( build_system_type = job [ 'build_system_type' ] , job_type_name = job [ 'job_type_name' ] , platform_option = job [ 'platform_option' ] , ref_data_name = job [ 'ref_data_name' ] ) if not testtype : continue new_job = { 'build_system_type' : job [ 'build_system_type' ] , 'platform' : job [ 'platform' ] , 'platform_option' : job [ 'platform_option' ] , 'testtype' : testtype , } key = _unique_key ( new_job ) if key not in job_build_system_type : job_build_system_type [ key ] = job [ 'build_system_type' ] sanitized_list . append ( new_job ) elif new_job [ 'build_system_type' ] != job_build_system_type [ key ] : new_job [ 'build_system_type' ] = job_build_system_type [ key ] sanitized_list [ sanitized_list . index ( new_job ) ] [ 'build_system_type' ] = '*' return sanitized_list
We receive data from runnable jobs api and return the sanitized data that meets our needs .
16,655
def _update_table ( data ) : jp_index , priority , expiration_date = _initialize_values ( ) total_jobs = len ( data ) new_jobs , failed_changes , updated_jobs = 0 , 0 , 0 for job in data : key = _unique_key ( job ) if key in jp_index : if jp_index [ key ] [ 'build_system_type' ] != '*' and jp_index [ key ] [ 'build_system_type' ] != job [ "build_system_type" ] : db_job = JobPriority . objects . get ( pk = jp_index [ key ] [ 'pk' ] ) db_job . buildsystem = '*' db_job . save ( ) logger . info ( 'Updated %s/%s from %s to %s' , db_job . testtype , db_job . buildtype , job [ 'build_system_type' ] , db_job . buildsystem ) updated_jobs += 1 else : try : jobpriority = JobPriority ( testtype = str ( job [ "testtype" ] ) , buildtype = str ( job [ "platform_option" ] ) , platform = str ( job [ "platform" ] ) , priority = priority , expiration_date = expiration_date , buildsystem = job [ "build_system_type" ] ) jobpriority . save ( ) logger . info ( 'New job was found (%s,%s,%s,%s)' , job [ 'testtype' ] , job [ 'platform_option' ] , job [ 'platform' ] , job [ "build_system_type" ] ) new_jobs += 1 except Exception as error : logger . warning ( str ( error ) ) failed_changes += 1 logger . info ( 'We have %s new jobs and %s updated jobs out of %s total jobs processed.' , new_jobs , updated_jobs , total_jobs ) if failed_changes != 0 : logger . warning ( 'We have failed %s changes out of %s total jobs processed.' , failed_changes , total_jobs ) return new_jobs , failed_changes , updated_jobs
Add new jobs to the priority table and update the build system if required . data - it is a list of dictionaries that describe a job type
16,656
def load_preseed ( ) : if not JobPriority . objects . exists ( ) : return preseed = preseed_data ( ) for job in preseed : queryset = JobPriority . objects . all ( ) for field in ( 'testtype' , 'buildtype' , 'platform' ) : if job [ field ] != '*' : queryset = queryset . filter ( ** { field : job [ field ] } ) if not queryset : create_new_entry ( job ) else : for jp in queryset : process_job_priority ( jp , job )
Update JobPriority information from preseed . json
16,657
def all_valid_time_intervals ( ) : return [ PerformanceTimeInterval . DAY , PerformanceTimeInterval . WEEK , PerformanceTimeInterval . TWO_WEEKS , PerformanceTimeInterval . SIXTY_DAYS , PerformanceTimeInterval . NINETY_DAYS , PerformanceTimeInterval . ONE_YEAR ]
Helper method to return all possible valid time intervals for data stored by Perfherder
16,658
def get_property_names ( self ) : property_names = set ( ) for signature_value in self . values ( ) : for property_name in signature_value . keys ( ) : property_names . add ( property_name ) return property_names
Returns all property names in this collection of signatures
16,659
def get_property_values ( self , property_name ) : property_values = set ( ) for signature_value in self . values ( ) : if signature_value . get ( property_name ) : property_values . add ( signature_value [ property_name ] ) return property_values
Returns all property values for a particular property name in this collection
16,660
def get_performance_signatures ( self , project , ** params ) : results = self . _get_json ( self . PERFORMANCE_SIGNATURES_ENDPOINT , project , ** params ) return PerformanceSignatureCollection ( results )
Gets a set of performance signatures associated with a project and time range
16,661
def get_performance_data ( self , project , ** params ) : results = self . _get_json ( self . PERFORMANCE_DATA_ENDPOINT , project , ** params ) return { k : PerformanceSeries ( v ) for k , v in results . items ( ) }
Gets a dictionary of PerformanceSeries objects
16,662
def get_matchers ( ) : from . import matchers def is_matcher_func ( member ) : return inspect . isfunction ( member ) and member . __name__ . endswith ( "_matcher" ) members = inspect . getmembers ( matchers , is_matcher_func ) for name , func in members : yield func
Get matcher functions from treeherder . autoclassify . matchers
16,663
def find_best_matches ( errors , matchers ) : for text_log_error in errors : matches = find_all_matches ( text_log_error , matchers ) best_match = first ( matches , key = lambda m : ( - m . score , - m . classified_failure_id ) ) if not best_match : continue newrelic . agent . record_custom_event ( 'highest_scored_matcher' , { 'matcher' : best_match . matcher_name , 'score' : best_match . score , 'text_log_error' : best_match . text_log_error_id , } ) yield best_match
Find the best match for each error
16,664
def find_all_matches ( text_log_error , matchers ) : for matcher_func in matchers : matches = matcher_func ( text_log_error ) if not matches : continue for score , classified_failure_id in matches : yield TextLogErrorMatch ( score = score , matcher_name = matcher_func . __name__ , classified_failure_id = classified_failure_id , text_log_error = text_log_error , )
Find matches for the given error using the given matcher classes
16,665
def get_best_match ( text_log_error ) : score_cut_off = 0.7 return ( text_log_error . matches . filter ( score__gt = score_cut_off ) . order_by ( "-score" , "-classified_failure_id" ) . select_related ( 'classified_failure' ) . first ( ) )
Get the best TextLogErrorMatch for a given TextLogErrorMatch .
16,666
def mark_best_classification ( text_log_error , classified_failure ) : text_log_error . metadata . best_classification = classified_failure text_log_error . metadata . save ( update_fields = [ 'best_classification' ] ) text_log_error . metadata . failure_line . elastic_search_insert ( )
Wrapper for setting best_classification on both TextLogError and FailureLine .
16,667
def mark_best_classifications ( errors ) : for text_log_error in errors : best_match = get_best_match ( text_log_error ) if not best_match : continue mark_best_classification ( text_log_error , best_match . classified_failure )
Convenience wrapper around mark_best_classification .
16,668
def update_db ( matches ) : for match in matches : try : match . save ( ) except IntegrityError : args = ( match . text_log_error_id , match . matcher_name , match . classified_failure_id ) logger . warning ( "Tried to create duplicate match for TextLogError %i with matcher %s and classified_failure %i" , args , )
Save TextLogErrorMatch instances to the DB
16,669
def get_json_schema ( filename ) : file_path = os . path . join ( "schemas" , filename ) with open ( file_path ) as f : schema = yaml . load ( f ) return schema
Get a JSON Schema by filename .
16,670
def store_job_info_artifact ( job , job_info_artifact ) : job_details = json . loads ( job_info_artifact [ 'blob' ] ) [ 'job_details' ] for job_detail in job_details : job_detail_dict = { 'title' : job_detail . get ( 'title' ) , 'value' : job_detail [ 'value' ] , 'url' : job_detail . get ( 'url' ) } for ( k , v ) in job_detail_dict . items ( ) : max_field_length = JobDetail . _meta . get_field ( k ) . max_length if v is not None and len ( v ) > max_field_length : logger . warning ( "Job detail '%s' for job_guid %s too long, truncating" , v [ : max_field_length ] , job . guid ) job_detail_dict [ k ] = v [ : max_field_length ] job_detail_dict [ 'defaults' ] = { 'url' : job_detail_dict [ 'url' ] } del job_detail_dict [ 'url' ] JobDetail . objects . update_or_create ( job = job , ** job_detail_dict )
Store the contents of the job info artifact in job details
16,671
def store_text_log_summary_artifact ( job , text_log_summary_artifact ) : step_data = json . loads ( text_log_summary_artifact [ 'blob' ] ) [ 'step_data' ] result_map = { v : k for ( k , v ) in TextLogStep . RESULTS } with transaction . atomic ( ) : for step in step_data [ 'steps' ] : name = step [ 'name' ] [ : TextLogStep . _meta . get_field ( 'name' ) . max_length ] time_kwargs = { } for tkey in ( 'started' , 'finished' ) : if step . get ( tkey ) : time_kwargs [ tkey ] = dateutil . parser . parse ( step [ tkey ] , ignoretz = True ) log_step = TextLogStep . objects . create ( job = job , started_line_number = step [ 'started_linenumber' ] , finished_line_number = step [ 'finished_linenumber' ] , name = name , result = result_map [ step [ 'result' ] ] , ** time_kwargs ) if step . get ( 'errors' ) : for error in step [ 'errors' ] : TextLogError . objects . create ( step = log_step , line_number = error [ 'linenumber' ] , line = astral_filter ( error [ 'line' ] ) ) error_summary . get_error_summary ( job )
Store the contents of the text log summary artifact
16,672
def serialize_artifact_json_blobs ( artifacts ) : for artifact in artifacts : blob = artifact [ 'blob' ] if ( artifact [ 'type' ] . lower ( ) == 'json' and not isinstance ( blob , str ) ) : artifact [ 'blob' ] = json . dumps ( blob ) return artifacts
Ensure that JSON artifact blobs passed as dicts are converted to JSON
16,673
def get_option_collection_hash ( self ) : resp = self . _get_json ( self . OPTION_COLLECTION_HASH_ENDPOINT ) ret = { } for result in resp : ret [ result [ 'option_collection_hash' ] ] = result [ 'options' ] return ret
Gets option collection hash a mapping of hash values to build properties
16,674
def get_pushes ( self , project , ** params ) : return self . _get_json_list ( self . PUSH_ENDPOINT , project , ** params )
Gets pushes from project filtered by parameters
16,675
def get_jobs ( self , project , ** params ) : return self . _get_json_list ( self . JOBS_ENDPOINT , project , ** params )
Gets jobs from project filtered by parameters
16,676
def get_job_log_url ( self , project , ** params ) : return self . _get_json ( self . JOB_LOG_URL_ENDPOINT , project , ** params )
Gets job log url filtered by parameters
16,677
def all_documents ( index = INDEX_NAME ) : query = { 'query' : { 'match_all' : { } } } for result in raw_query ( query , index = index ) : yield result
Get all documents from the given index .
16,678
def bulk ( iterable , index = INDEX_NAME , doc_type = DOC_TYPE , action = 'index' ) : actions = compact ( dict_to_op ( to_dict ( model ) , index_name = INDEX_NAME , doc_type = DOC_TYPE , op_type = action , ) for model in iterable ) if not actions : return 0 items , _ = es_bulk ( es_conn , actions , doc_type = doc_type , index = index ) return items
Wrapper of elasticsearch s bulk method
16,679
def count_index ( index = INDEX_NAME ) : refresh_index ( ) query = { 'query' : { 'match_all' : { } } } result = es_conn . count ( index = index , doc_type = DOC_TYPE , body = query ) return result [ 'count' ]
Return a document count for the given index .
16,680
def get_document ( id , index = INDEX_NAME , doc_type = DOC_TYPE , ** kwargs ) : result = es_conn . get ( index = index , doc_type = doc_type , id = id , ** kwargs ) return result [ '_source' ]
Thin wrapper to get a single document by ID .
16,681
def index ( obj , index = INDEX_NAME , doc_type = DOC_TYPE ) : doc = to_dict ( obj ) if doc is None : return id = doc . pop ( 'id' ) return es_conn . index ( index , doc_type , doc , id = id )
Index the given document .
16,682
def raw_query ( query , index = INDEX_NAME , doc_type = DOC_TYPE ) : result = es_conn . search ( index = index , doc_type = DOC_TYPE , body = query ) return result [ 'hits' ] [ 'hits' ]
Thin wrapper of the search function to provide useful defaults
16,683
def reinit_index ( index = INDEX_NAME ) : es_conn . indices . delete ( index , ignore = 404 ) try : es_conn . indices . create ( index , INDEX_SETTINGS . get ( index , None ) ) except TransportError as e : raise Exception ( 'Failed to created index, got: {}' . format ( e . error ) )
Delete and then initialise the given index name
16,684
def search ( query , index = INDEX_NAME , doc_type = DOC_TYPE ) : results = raw_query ( query , index = index , doc_type = doc_type ) return [ r [ '_source' ] for r in results ]
Thin wrapper of the main query function to provide just the resulting objects
16,685
def clear_expiration_field_for_expired_jobs ( self ) : for job in JobPriority . objects . filter ( expiration_date__isnull = False ) : if job . has_expired ( ) : job . expiration_date = None job . save ( )
Set the expiration date of every job that has expired .
16,686
def adjust_jobs_priority ( self , high_value_jobs , priority = 1 ) : for jp in JobPriority . objects . filter ( expiration_date__isnull = True ) : if jp . unique_identifier ( ) not in high_value_jobs : if jp . priority != SETA_LOW_VALUE_PRIORITY : logger . warning ( 'Decreasing priority of %s' , jp . unique_identifier ( ) ) jp . priority = SETA_LOW_VALUE_PRIORITY jp . save ( update_fields = [ 'priority' ] ) elif jp . priority != priority : logger . warning ( 'Increasing priority of %s' , jp . unique_identifier ( ) ) jp . priority = priority jp . save ( update_fields = [ 'priority' ] )
For every job priority determine if we need to increase or decrease the job priority
16,687
def _get_job_list_response ( self , job_qs , offset , count , return_type ) : option_collection_map = OptionCollection . objects . get_option_collection_map ( ) results = [ ] for values in job_qs [ offset : ( offset + count ) ] . values_list ( * [ pq [ 1 ] for pq in self . _property_query_mapping ] ) : platform_option = option_collection_map . get ( values [ self . _option_collection_hash_idx ] , "" ) values = list ( values ) for ( i , _ ) in enumerate ( values ) : func = self . _property_query_mapping [ i ] [ 2 ] if func : values [ i ] = func ( values [ i ] ) if return_type == 'dict' : results . append ( dict ( zip ( [ pq [ 0 ] for pq in self . _property_query_mapping ] + [ 'platform_option' ] , values + [ platform_option ] ) ) ) else : results . append ( values + [ platform_option ] ) response_dict = { 'results' : results } if return_type == 'list' : response_dict . update ( { 'job_property_names' : [ pq [ 0 ] for pq in self . _property_query_mapping ] + [ 'platform_option' ] } ) return response_dict
custom method to serialize + format jobs information
16,688
def retrieve ( self , request , project , pk = None ) : try : job = Job . objects . select_related ( * self . _default_select_related + [ 'taskcluster_metadata' ] ) . get ( repository__name = project , id = pk ) except Job . DoesNotExist : return Response ( "No job with id: {0}" . format ( pk ) , status = HTTP_404_NOT_FOUND ) resp = serializers . JobSerializer ( job , read_only = True ) . data resp [ "resource_uri" ] = reverse ( "jobs-detail" , kwargs = { "project" : project , "pk" : pk } ) resp [ "logs" ] = [ ] for ( name , url ) in JobLog . objects . filter ( job = job ) . values_list ( 'name' , 'url' ) : resp [ "logs" ] . append ( { 'name' : name , 'url' : url } ) platform_option = job . get_platform_option ( ) if platform_option : resp [ "platform_option" ] = platform_option try : resp [ 'taskcluster_metadata' ] = { 'task_id' : job . taskcluster_metadata . task_id , 'retry_id' : job . taskcluster_metadata . retry_id } except ObjectDoesNotExist : pass status_map = { k : v for k , v in Job . AUTOCLASSIFY_STATUSES } resp [ "autoclassify_status" ] = status_map [ job . autoclassify_status ] return Response ( resp )
GET method implementation for detail view
16,689
def bug_suggestions ( self , request , project , pk = None ) : try : job = Job . objects . get ( repository__name = project , id = pk ) except ObjectDoesNotExist : return Response ( "No job with id: {0}" . format ( pk ) , status = HTTP_404_NOT_FOUND ) return Response ( get_error_summary ( job ) )
Gets a set of bug suggestions for this job
16,690
def similar_jobs ( self , request , project , pk = None ) : try : repository = Repository . objects . get ( name = project ) except Repository . DoesNotExist : return Response ( { "detail" : "No project with name {}" . format ( project ) } , status = HTTP_404_NOT_FOUND ) try : job = Job . objects . get ( repository = repository , id = pk ) except ObjectDoesNotExist : return Response ( "No job with id: {0}" . format ( pk ) , status = HTTP_404_NOT_FOUND ) filter_params = request . query_params . copy ( ) try : offset = int ( filter_params . get ( "offset" , 0 ) ) count = int ( filter_params . get ( "count" , 50 ) ) except ValueError : return Response ( "Invalid value for offset or count" , status = HTTP_400_BAD_REQUEST ) return_type = filter_params . get ( "return_type" , "dict" ) . lower ( ) jobs = JobFilter ( { k : v for ( k , v ) in filter_params . items ( ) } , queryset = Job . objects . filter ( job_type_id = job . job_type_id , repository = repository ) . exclude ( id = job . id ) . select_related ( * self . _default_select_related ) ) . qs jobs = jobs . order_by ( '-start_time' ) response_body = self . _get_job_list_response ( jobs , offset , count , return_type ) response_body [ "meta" ] = dict ( offset = offset , count = count , repository = project ) return Response ( response_body )
Get a list of jobs similar to the one selected .
16,691
def structured_iterator ( failure_lines ) : summary = partial ( failure_line_summary , TbplFormatter ( ) ) for failure_line in failure_lines : repr_str = summary ( failure_line ) if repr_str : yield failure_line , repr_str while True : yield None , None
Create FailureLine Tbpl - formatted - string tuples .
16,692
def failure_line_summary ( formatter , failure_line ) : if failure_line . action == "test_result" : action = "test_status" if failure_line . subtest is not None else "test_end" elif failure_line . action == "truncated" : return else : action = failure_line . action try : mozlog_func = getattr ( formatter , action ) except AttributeError : logger . warning ( 'Unknown mozlog function "%s"' , action ) return formatted_log = mozlog_func ( failure_line . to_mozlog_format ( ) ) split_log = first ( formatted_log . split ( "\n" , 1 ) ) if not split_log : logger . debug ( 'Failed to split log' , formatted_log ) return return split_log . strip ( )
Create a mozlog formatted error summary string from the given failure_line .
16,693
def get_tls_redis_url ( redis_url ) : url = furl ( redis_url ) url . port += 1 url . scheme += 's' url . args [ 'ssl_cert_reqs' ] = 'none' return str ( url )
Returns the TLS version of a Heroku REDIS_URL string .
16,694
def parse ( self ) : with make_request ( self . url , stream = True ) as response : download_size_in_bytes = int ( response . headers . get ( 'Content-Length' , - 1 ) ) newrelic . agent . add_custom_parameter ( 'unstructured_log_size' , download_size_in_bytes ) newrelic . agent . add_custom_parameter ( 'unstructured_log_encoding' , response . headers . get ( 'Content-Encoding' , 'None' ) ) if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES : raise LogSizeException ( 'Download size of %i bytes exceeds limit' % download_size_in_bytes ) for line in response . iter_lines ( ) : for builder in self . builders : builder . parse_line ( line . decode ( 'utf-8' , 'replace' ) ) for builder in self . builders : builder . finish_parse ( ) name = builder . name artifact = builder . get_artifact ( ) if name == 'performance_data' and not artifact [ name ] : continue self . artifacts [ name ] = artifact
Iterate over each line of the log running each parser against it .
16,695
def calculate_hash ( options ) : options = sorted ( list ( options ) ) sha_hash = sha1 ( ) sha_hash . update ( '' . join ( options ) . encode ( 'utf-8' ) ) return sha_hash . hexdigest ( )
returns an option_collection_hash given a list of options
16,696
def cycle_data ( self , repository , cycle_interval , chunk_size , sleep_time ) : jobs_max_timestamp = datetime . datetime . now ( ) - cycle_interval jobs_cycled = 0 while True : jobs_chunk = list ( self . filter ( repository = repository , submit_time__lt = jobs_max_timestamp ) . values_list ( 'guid' , flat = True ) [ : chunk_size ] ) if not jobs_chunk : return jobs_cycled lines = FailureLine . objects . filter ( job_guid__in = jobs_chunk ) if settings . ELASTICSEARCH_URL : failures = itertools . chain . from_iterable ( chunked_qs ( lines , chunk_size = chunk_size , fields = [ 'id' , 'test' ] , ) , ) bulk ( failures , action = 'delete' ) lines . delete ( ) try : self . filter ( guid__in = jobs_chunk ) . delete ( ) except UnicodeDecodeError as e : newrelic . agent . record_custom_event ( 'cycle_data UnicodeDecodeError workaround' , { 'exception' : str ( e ) , } ) TextLogError . objects . filter ( step__job__guid__in = jobs_chunk ) . only ( 'id' ) . delete ( ) self . filter ( guid__in = jobs_chunk ) . delete ( ) jobs_cycled += len ( jobs_chunk ) if sleep_time : time . sleep ( sleep_time )
Delete data older than cycle_interval splitting the target data into chunks of chunk_size size . Returns the number of result sets deleted
16,697
def is_fully_verified ( self ) : unverified_errors = TextLogError . objects . filter ( _metadata__best_is_verified = False , step__job = self ) . count ( ) if unverified_errors : logger . error ( "Job %r has unverified TextLogErrors" , self ) return False logger . info ( "Job %r is fully verified" , self ) return True
Determine if this Job is fully verified based on the state of its Errors .
16,698
def update_after_verification ( self , user ) : if not self . is_fully_verified ( ) : return classification = 'autoclassified intermittent' already_classified = ( JobNote . objects . filter ( job = self ) . exclude ( failure_classification__name = classification ) . exists ( ) ) if already_classified : return JobNote . create_autoclassify_job_note ( job = self , user = user )
Updates a job s state after being verified by a sheriff
16,699
def get_manual_classification_line ( self ) : try : text_log_error = TextLogError . objects . get ( step__job = self ) except ( TextLogError . DoesNotExist , TextLogError . MultipleObjectsReturned ) : return None from treeherder . model . error_summary import get_useful_search_results search_results = get_useful_search_results ( self ) if len ( search_results ) != 1 : return None failure_line = text_log_error . get_failure_line ( ) if failure_line is None : return None if not ( failure_line . action == "test_result" and failure_line . test and failure_line . status and failure_line . expected ) : return None return text_log_error
If this Job has a single TextLogError line return that TextLogError .