idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
17,000 | def calculate_weights ( correlation_matrix , min_wt ) : # fill diagonal of correlation_matrix with np.nan np . fill_diagonal ( correlation_matrix . values , np . nan ) # remove negative values correlation_matrix = correlation_matrix . clip ( lower = 0 ) # get average correlation for each profile (will ignore NaN) raw_weights = correlation_matrix . mean ( axis = 1 ) # threshold weights raw_weights = raw_weights . clip ( lower = min_wt ) # normalize raw_weights so that they add to 1 weights = raw_weights / sum ( raw_weights ) return raw_weights . round ( rounding_precision ) , weights . round ( rounding_precision ) | Calculate a weight for each profile based on its correlation to other replicates . Negative correlations are clipped to 0 and weights are clipped to be min_wt at the least . | 158 | 36 |
17,001 | def agg_wt_avg ( mat , min_wt = 0.01 , corr_metric = 'spearman' ) : assert mat . shape [ 1 ] > 0 , "mat is empty! mat: {}" . format ( mat ) if mat . shape [ 1 ] == 1 : out_sig = mat upper_tri_df = None raw_weights = None weights = None else : assert corr_metric in [ "spearman" , "pearson" ] # Make correlation matrix column wise corr_mat = mat . corr ( method = corr_metric ) # Save the values in the upper triangle upper_tri_df = get_upper_triangle ( corr_mat ) # Calculate weight per replicate raw_weights , weights = calculate_weights ( corr_mat , min_wt ) # Apply weights to values weighted_values = mat * weights out_sig = weighted_values . sum ( axis = 1 ) return out_sig , upper_tri_df , raw_weights , weights | Aggregate a set of replicate profiles into a single signature using a weighted average . | 226 | 16 |
17,002 | def get_file_list ( wildcard ) : files = glob . glob ( os . path . expanduser ( wildcard ) ) return files | Search for files to be concatenated . Currently very basic but could expand to be more sophisticated . | 30 | 20 |
17,003 | def hstack ( gctoos , remove_all_metadata_fields = False , error_report_file = None , fields_to_remove = [ ] , reset_ids = False ) : # Separate each gctoo into its component dfs row_meta_dfs = [ ] col_meta_dfs = [ ] data_dfs = [ ] srcs = [ ] for g in gctoos : row_meta_dfs . append ( g . row_metadata_df ) col_meta_dfs . append ( g . col_metadata_df ) data_dfs . append ( g . data_df ) srcs . append ( g . src ) logger . debug ( "shapes of row_meta_dfs: {}" . format ( [ x . shape for x in row_meta_dfs ] ) ) # Concatenate row metadata all_row_metadata_df = assemble_common_meta ( row_meta_dfs , fields_to_remove , srcs , remove_all_metadata_fields , error_report_file ) # Concatenate col metadata all_col_metadata_df = assemble_concatenated_meta ( col_meta_dfs , remove_all_metadata_fields ) # Concatenate the data_dfs all_data_df = assemble_data ( data_dfs , "horiz" ) # Make sure df shapes are correct assert all_data_df . shape [ 0 ] == all_row_metadata_df . shape [ 0 ] , "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}" . format ( all_data_df . shape [ 0 ] , all_row_metadata_df . shape [ 0 ] ) assert all_data_df . shape [ 1 ] == all_col_metadata_df . shape [ 0 ] , "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}" . format ( all_data_df . shape [ 1 ] , all_col_metadata_df . shape [ 0 ] ) # If requested, reset sample ids to be unique integers and move old sample # ids into column metadata if reset_ids : do_reset_ids ( all_col_metadata_df , all_data_df , "horiz" ) logger . info ( "Build GCToo of all..." ) concated = GCToo . GCToo ( row_metadata_df = all_row_metadata_df , col_metadata_df = all_col_metadata_df , data_df = all_data_df ) return concated | Horizontally concatenate gctoos . | 610 | 10 |
17,004 | def assemble_concatenated_meta ( concated_meta_dfs , remove_all_metadata_fields ) : # Concatenate the concated_meta_dfs if remove_all_metadata_fields : for df in concated_meta_dfs : df . drop ( df . columns , axis = 1 , inplace = True ) all_concated_meta_df = pd . concat ( concated_meta_dfs , axis = 0 ) # Sanity check: the number of rows in all_concated_meta_df should correspond # to the sum of the number of rows in the input dfs n_rows = all_concated_meta_df . shape [ 0 ] logger . debug ( "all_concated_meta_df.shape[0]: {}" . format ( n_rows ) ) n_rows_cumulative = sum ( [ df . shape [ 0 ] for df in concated_meta_dfs ] ) assert n_rows == n_rows_cumulative # Sort the index and columns all_concated_meta_df_sorted = all_concated_meta_df . sort_index ( axis = 0 ) . sort_index ( axis = 1 ) return all_concated_meta_df_sorted | Assemble the concatenated metadata dfs together . For example if horizontally concatenating the concatenated metadata dfs are the column metadata dfs . Both indices are sorted . | 285 | 37 |
17,005 | def assemble_data ( data_dfs , concat_direction ) : if concat_direction == "horiz" : # Concatenate the data_dfs horizontally all_data_df = pd . concat ( data_dfs , axis = 1 ) # Sanity check: the number of columns in all_data_df should # correspond to the sum of the number of columns in the input dfs n_cols = all_data_df . shape [ 1 ] logger . debug ( "all_data_df.shape[1]: {}" . format ( n_cols ) ) n_cols_cumulative = sum ( [ df . shape [ 1 ] for df in data_dfs ] ) assert n_cols == n_cols_cumulative elif concat_direction == "vert" : # Concatenate the data_dfs vertically all_data_df = pd . concat ( data_dfs , axis = 0 ) # Sanity check: the number of rows in all_data_df should # correspond to the sum of the number of rows in the input dfs n_rows = all_data_df . shape [ 0 ] logger . debug ( "all_data_df.shape[0]: {}" . format ( n_rows ) ) n_rows_cumulative = sum ( [ df . shape [ 0 ] for df in data_dfs ] ) assert n_rows == n_rows_cumulative # Sort both indices all_data_df_sorted = all_data_df . sort_index ( axis = 0 ) . sort_index ( axis = 1 ) return all_data_df_sorted | Assemble the data dfs together . Both indices are sorted . | 364 | 13 |
17,006 | def do_reset_ids ( concatenated_meta_df , data_df , concat_direction ) : if concat_direction == "horiz" : # Make sure cids agree between data_df and concatenated_meta_df assert concatenated_meta_df . index . equals ( data_df . columns ) , ( "cids in concatenated_meta_df do not agree with cids in data_df." ) # Reset cids in concatenated_meta_df reset_ids_in_meta_df ( concatenated_meta_df ) # Replace cids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df . columns = pd . Index ( concatenated_meta_df . index . values ) elif concat_direction == "vert" : # Make sure rids agree between data_df and concatenated_meta_df assert concatenated_meta_df . index . equals ( data_df . index ) , ( "rids in concatenated_meta_df do not agree with rids in data_df." ) # Reset rids in concatenated_meta_df reset_ids_in_meta_df ( concatenated_meta_df ) # Replace rids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df . index = pd . Index ( concatenated_meta_df . index . values ) | Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column . | 345 | 27 |
17,007 | def reset_ids_in_meta_df ( meta_df ) : # Record original index name, and then change it so that the column that it # becomes will be appropriately named original_index_name = meta_df . index . name meta_df . index . name = "old_id" # Reset index meta_df . reset_index ( inplace = True ) # Change the index name back to what it was meta_df . index . name = original_index_name | Meta_df is modified inplace . | 103 | 8 |
17,008 | def subset_gctoo ( gctoo , row_bool = None , col_bool = None , rid = None , cid = None , ridx = None , cidx = None , exclude_rid = None , exclude_cid = None ) : assert sum ( [ ( rid is not None ) , ( row_bool is not None ) , ( ridx is not None ) ] ) <= 1 , ( "Only one of rid, row_bool, and ridx can be provided." ) assert sum ( [ ( cid is not None ) , ( col_bool is not None ) , ( cidx is not None ) ] ) <= 1 , ( "Only one of cid, col_bool, and cidx can be provided." ) # Figure out what rows and columns to keep rows_to_keep = get_rows_to_keep ( gctoo , rid , row_bool , ridx , exclude_rid ) cols_to_keep = get_cols_to_keep ( gctoo , cid , col_bool , cidx , exclude_cid ) # Convert labels to boolean array to preserve order rows_to_keep_bools = gctoo . data_df . index . isin ( rows_to_keep ) cols_to_keep_bools = gctoo . data_df . columns . isin ( cols_to_keep ) # Make the output gct out_gctoo = GCToo . GCToo ( src = gctoo . src , version = gctoo . version , data_df = gctoo . data_df . loc [ rows_to_keep_bools , cols_to_keep_bools ] , row_metadata_df = gctoo . row_metadata_df . loc [ rows_to_keep_bools , : ] , col_metadata_df = gctoo . col_metadata_df . loc [ cols_to_keep_bools , : ] ) assert out_gctoo . data_df . size > 0 , "Subsetting yielded an empty gct!" logger . info ( ( "Initial GCToo with {} rows and {} columns subsetted down to " + "{} rows and {} columns." ) . format ( gctoo . data_df . shape [ 0 ] , gctoo . data_df . shape [ 1 ] , out_gctoo . data_df . shape [ 0 ] , out_gctoo . data_df . shape [ 1 ] ) ) return out_gctoo | Extract a subset of data from a GCToo object in a variety of ways . The order of rows and columns will be preserved . | 557 | 28 |
17,009 | def get_rows_to_keep ( gctoo , rid = None , row_bool = None , ridx = None , exclude_rid = None ) : # Use rid if provided if rid is not None : assert type ( rid ) == list , "rid must be a list. rid: {}" . format ( rid ) rows_to_keep = [ gctoo_row for gctoo_row in gctoo . data_df . index if gctoo_row in rid ] # Tell user if some rids not found num_missing_rids = len ( rid ) - len ( rows_to_keep ) if num_missing_rids != 0 : logger . info ( "{} rids were not found in the GCT." . format ( num_missing_rids ) ) # Use row_bool if provided elif row_bool is not None : assert len ( row_bool ) == gctoo . data_df . shape [ 0 ] , ( "row_bool must have length equal to gctoo.data_df.shape[0]. " + "len(row_bool): {}, gctoo.data_df.shape[0]: {}" . format ( len ( row_bool ) , gctoo . data_df . shape [ 0 ] ) ) rows_to_keep = gctoo . data_df . index [ row_bool ] . values # Use ridx if provided elif ridx is not None : assert type ( ridx [ 0 ] ) is int , ( "ridx must be a list of integers. ridx[0]: {}, " + "type(ridx[0]): {}" ) . format ( ridx [ 0 ] , type ( ridx [ 0 ] ) ) assert max ( ridx ) <= gctoo . data_df . shape [ 0 ] , ( "ridx contains an integer larger than the number of rows in " + "the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}" ) . format ( max ( ridx ) , gctoo . data_df . shape [ 0 ] ) rows_to_keep = gctoo . data_df . index [ ridx ] . values # If rid, row_bool, and ridx are all None, return all rows else : rows_to_keep = gctoo . data_df . index . values # Use exclude_rid if provided if exclude_rid is not None : # Keep only those rows that are not in exclude_rid rows_to_keep = [ row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid ] return rows_to_keep | Figure out based on the possible row inputs which rows to keep . | 592 | 13 |
17,010 | def get_cols_to_keep ( gctoo , cid = None , col_bool = None , cidx = None , exclude_cid = None ) : # Use cid if provided if cid is not None : assert type ( cid ) == list , "cid must be a list. cid: {}" . format ( cid ) cols_to_keep = [ gctoo_col for gctoo_col in gctoo . data_df . columns if gctoo_col in cid ] # Tell user if some cids not found num_missing_cids = len ( cid ) - len ( cols_to_keep ) if num_missing_cids != 0 : logger . info ( "{} cids were not found in the GCT." . format ( num_missing_cids ) ) # Use col_bool if provided elif col_bool is not None : assert len ( col_bool ) == gctoo . data_df . shape [ 1 ] , ( "col_bool must have length equal to gctoo.data_df.shape[1]. " + "len(col_bool): {}, gctoo.data_df.shape[1]: {}" . format ( len ( col_bool ) , gctoo . data_df . shape [ 1 ] ) ) cols_to_keep = gctoo . data_df . columns [ col_bool ] . values # Use cidx if provided elif cidx is not None : assert type ( cidx [ 0 ] ) is int , ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}" ) . format ( cidx [ 0 ] , type ( cidx [ 0 ] ) ) assert max ( cidx ) <= gctoo . data_df . shape [ 1 ] , ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}" ) . format ( max ( cidx ) , gctoo . data_df . shape [ 1 ] ) cols_to_keep = gctoo . data_df . columns [ cidx ] . values # If cid, col_bool, and cidx are all None, return all columns else : cols_to_keep = gctoo . data_df . columns . values # Use exclude_cid if provided if exclude_cid is not None : # Keep only those columns that are not in exclude_cid cols_to_keep = [ col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid ] return cols_to_keep | Figure out based on the possible columns inputs which columns to keep . | 631 | 13 |
17,011 | def read ( in_path ) : assert os . path . exists ( in_path ) , "The following GRP file can't be found. in_path: {}" . format ( in_path ) with open ( in_path , "r" ) as f : lines = f . readlines ( ) # need the second conditional to ignore comment lines grp = [ line . strip ( ) for line in lines if line and not re . match ( "^#" , line ) ] return grp | Read a grp file at the path specified by in_path . | 107 | 14 |
17,012 | def write ( grp , out_path ) : with open ( out_path , "w" ) as f : for x in grp : f . write ( str ( x ) + "\n" ) | Write a GRP to a text file . | 44 | 9 |
17,013 | def make_specified_size_gctoo ( og_gctoo , num_entries , dim ) : assert dim in [ "row" , "col" ] , "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo . data_df . shape [ dim_index ] , ( "number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}" . format ( num_entries , dim , dim_index , og_gctoo . data_df . shape [ dim_index ] ) ) if dim == "col" : columns = [ x for x in og_gctoo . data_df . columns . values ] numpy . random . shuffle ( columns ) columns = columns [ 0 : num_entries ] rows = og_gctoo . data_df . index . values else : rows = [ x for x in og_gctoo . data_df . index . values ] numpy . random . shuffle ( rows ) rows = rows [ 0 : num_entries ] columns = og_gctoo . data_df . columns . values new_data_df = og_gctoo . data_df . loc [ rows , columns ] new_row_meta = og_gctoo . row_metadata_df . loc [ rows ] new_col_meta = og_gctoo . col_metadata_df . loc [ columns ] logger . debug ( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}" . format ( new_col_meta . shape , new_row_meta . shape ) ) # make & return new gctoo instance new_gctoo = GCToo . GCToo ( data_df = new_data_df , row_metadata_df = new_row_meta , col_metadata_df = new_col_meta ) return new_gctoo | Subsets a GCToo instance along either rows or columns to obtain a specified size . | 476 | 18 |
17,014 | def write ( gctoo_object , out_file_name , convert_back_to_neg_666 = True , gzip_compression_level = 6 , max_chunk_kb = 1024 , matrix_dtype = numpy . float32 ) : # make sure out file has a .gctx suffix gctx_out_name = add_gctx_to_out_name ( out_file_name ) # open an hdf5 file to write to hdf5_out = h5py . File ( gctx_out_name , "w" ) # write version write_version ( hdf5_out ) # write src write_src ( hdf5_out , gctoo_object , gctx_out_name ) # set chunk size for data matrix elem_per_kb = calculate_elem_per_kb ( max_chunk_kb , matrix_dtype ) chunk_size = set_data_matrix_chunk_size ( gctoo_object . data_df . shape , max_chunk_kb , elem_per_kb ) # write data matrix hdf5_out . create_dataset ( data_matrix_node , data = gctoo_object . data_df . transpose ( ) . values , dtype = matrix_dtype ) # write col metadata write_metadata ( hdf5_out , "col" , gctoo_object . col_metadata_df , convert_back_to_neg_666 , gzip_compression = gzip_compression_level ) # write row metadata write_metadata ( hdf5_out , "row" , gctoo_object . row_metadata_df , convert_back_to_neg_666 , gzip_compression = gzip_compression_level ) # close gctx file hdf5_out . close ( ) | Writes a GCToo instance to specified file . | 414 | 11 |
17,015 | def write_src ( hdf5_out , gctoo_object , out_file_name ) : if gctoo_object . src == None : hdf5_out . attrs [ src_attr ] = out_file_name else : hdf5_out . attrs [ src_attr ] = gctoo_object . src | Writes src as attribute of gctx out file . | 77 | 11 |
17,016 | def calculate_elem_per_kb ( max_chunk_kb , matrix_dtype ) : if matrix_dtype == numpy . float32 : return ( max_chunk_kb * 8 ) / 32 elif matrix_dtype == numpy . float64 : return ( max_chunk_kb * 8 ) / 64 else : msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported" . format ( matrix_dtype ) logger . error ( msg ) raise Exception ( "write_gctx.calculate_elem_per_kb " + msg ) | Calculates the number of elem per kb depending on the max chunk size set . | 140 | 18 |
17,017 | def set_data_matrix_chunk_size ( df_shape , max_chunk_kb , elem_per_kb ) : row_chunk_size = min ( df_shape [ 0 ] , 1000 ) col_chunk_size = min ( ( ( max_chunk_kb * elem_per_kb ) // row_chunk_size ) , df_shape [ 1 ] ) return ( row_chunk_size , col_chunk_size ) | Sets chunk size to use for writing data matrix . Note . Calculation used here is for compatibility with cmapM and cmapR . | 107 | 29 |
17,018 | def convert ( self , form ) : if not is_lazy_user ( form . instance ) : raise NotLazyError ( 'You cannot convert a non-lazy user' ) user = form . save ( ) # We need to remove the LazyUser instance assocated with the # newly-converted user self . filter ( user = user ) . delete ( ) converted . send ( self , user = user ) return user | Convert a lazy user to a non - lazy one . The form passed in is expected to be a ModelForm instance bound to the user to be converted . | 91 | 32 |
17,019 | def generate_username ( self , user_class ) : m = getattr ( user_class , 'generate_username' , None ) if m : return m ( ) else : max_length = user_class . _meta . get_field ( self . username_field ) . max_length return uuid . uuid4 ( ) . hex [ : max_length ] | Generate a new username for a user | 81 | 8 |
17,020 | def is_lazy_user ( user ) : # Anonymous users are not lazy. if user . is_anonymous : return False # Check the user backend. If the lazy signup backend # authenticated them, then the user is lazy. backend = getattr ( user , 'backend' , None ) if backend == 'lazysignup.backends.LazySignupBackend' : return True # Otherwise, we have to fall back to checking the database. from lazysignup . models import LazyUser return bool ( LazyUser . objects . filter ( user = user ) . count ( ) > 0 ) | Return True if the passed user is a lazy user . | 133 | 11 |
17,021 | def add ( queue_name , payload = None , content_type = None , source = None , task_id = None , build_id = None , release_id = None , run_id = None ) : if task_id : task = WorkQueue . query . filter_by ( task_id = task_id ) . first ( ) if task : return task . task_id else : task_id = uuid . uuid4 ( ) . hex if payload and not content_type and not isinstance ( payload , basestring ) : payload = json . dumps ( payload ) content_type = 'application/json' now = datetime . datetime . utcnow ( ) task = WorkQueue ( task_id = task_id , queue_name = queue_name , eta = now , source = source , build_id = build_id , release_id = release_id , run_id = run_id , payload = payload , content_type = content_type ) db . session . add ( task ) return task . task_id | Adds a work item to a queue . | 228 | 8 |
17,022 | def _task_to_dict ( task ) : payload = task . payload if payload and task . content_type == 'application/json' : payload = json . loads ( payload ) return dict ( task_id = task . task_id , queue_name = task . queue_name , eta = _datetime_to_epoch_seconds ( task . eta ) , source = task . source , created = _datetime_to_epoch_seconds ( task . created ) , lease_attempts = task . lease_attempts , last_lease = _datetime_to_epoch_seconds ( task . last_lease ) , payload = payload , content_type = task . content_type ) | Converts a WorkQueue to a JSON - able dictionary . | 156 | 12 |
17,023 | def lease ( queue_name , owner , count = 1 , timeout_seconds = 60 ) : now = datetime . datetime . utcnow ( ) query = ( WorkQueue . query . filter_by ( queue_name = queue_name , status = WorkQueue . LIVE ) . filter ( WorkQueue . eta <= now ) . order_by ( WorkQueue . eta ) . with_lockmode ( 'update' ) . limit ( count ) ) task_list = query . all ( ) if not task_list : return None next_eta = now + datetime . timedelta ( seconds = timeout_seconds ) for task in task_list : task . eta = next_eta task . lease_attempts += 1 task . last_owner = owner task . last_lease = now task . heartbeat = None task . heartbeat_number = 0 db . session . add ( task ) return [ _task_to_dict ( task ) for task in task_list ] | Leases a work item from a queue usually the oldest task available . | 209 | 14 |
17,024 | def _get_task_with_policy ( queue_name , task_id , owner ) : now = datetime . datetime . utcnow ( ) task = ( WorkQueue . query . filter_by ( queue_name = queue_name , task_id = task_id ) . with_lockmode ( 'update' ) . first ( ) ) if not task : raise TaskDoesNotExistError ( 'task_id=%r' % task_id ) # Lease delta should be positive, meaning it has not yet expired! lease_delta = now - task . eta if lease_delta > datetime . timedelta ( 0 ) : db . session . rollback ( ) raise LeaseExpiredError ( 'queue=%r, task_id=%r expired %s' % ( task . queue_name , task_id , lease_delta ) ) if task . last_owner != owner : db . session . rollback ( ) raise NotOwnerError ( 'queue=%r, task_id=%r, owner=%r' % ( task . queue_name , task_id , task . last_owner ) ) return task | Fetches the specified task and enforces ownership policy . | 253 | 12 |
17,025 | def heartbeat ( queue_name , task_id , owner , message , index ) : task = _get_task_with_policy ( queue_name , task_id , owner ) if task . heartbeat_number > index : return False task . heartbeat = message task . heartbeat_number = index # Extend the lease by the time of the last lease. now = datetime . datetime . utcnow ( ) timeout_delta = task . eta - task . last_lease task . eta = now + timeout_delta task . last_lease = now db . session . add ( task ) signals . task_updated . send ( app , task = task ) return True | Sets the heartbeat status of the task and extends its lease . | 144 | 13 |
17,026 | def finish ( queue_name , task_id , owner , error = False ) : task = _get_task_with_policy ( queue_name , task_id , owner ) if not task . status == WorkQueue . LIVE : logging . warning ( 'Finishing already dead task. queue=%r, task_id=%r, ' 'owner=%r, status=%r' , task . queue_name , task_id , owner , task . status ) return False if not error : task . status = WorkQueue . DONE else : task . status = WorkQueue . ERROR task . finished = datetime . datetime . utcnow ( ) db . session . add ( task ) signals . task_updated . send ( app , task = task ) return True | Marks a work item on a queue as finished . | 166 | 11 |
17,027 | def cancel ( * * kwargs ) : task_list = _query ( * * kwargs ) for task in task_list : task . status = WorkQueue . CANCELED task . finished = datetime . datetime . utcnow ( ) db . session . add ( task ) return len ( task_list ) | Cancels work items based on their criteria . | 71 | 10 |
17,028 | def handle_add ( queue_name ) : source = request . form . get ( 'source' , request . remote_addr , type = str ) try : task_id = work_queue . add ( queue_name , payload = request . form . get ( 'payload' , type = str ) , content_type = request . form . get ( 'content_type' , type = str ) , source = source , task_id = request . form . get ( 'task_id' , type = str ) ) except work_queue . Error , e : return utils . jsonify_error ( e ) db . session . commit ( ) logging . info ( 'Task added: queue=%r, task_id=%r, source=%r' , queue_name , task_id , source ) return flask . jsonify ( task_id = task_id ) | Adds a task to a queue . | 189 | 7 |
17,029 | def handle_lease ( queue_name ) : owner = request . form . get ( 'owner' , request . remote_addr , type = str ) try : task_list = work_queue . lease ( queue_name , owner , request . form . get ( 'count' , 1 , type = int ) , request . form . get ( 'timeout' , 60 , type = int ) ) except work_queue . Error , e : return utils . jsonify_error ( e ) if not task_list : return flask . jsonify ( tasks = [ ] ) db . session . commit ( ) task_ids = [ t [ 'task_id' ] for t in task_list ] logging . debug ( 'Task leased: queue=%r, task_ids=%r, owner=%r' , queue_name , task_ids , owner ) return flask . jsonify ( tasks = task_list ) | Leases a task from a queue . | 196 | 8 |
17,030 | def handle_heartbeat ( queue_name ) : task_id = request . form . get ( 'task_id' , type = str ) message = request . form . get ( 'message' , type = str ) index = request . form . get ( 'index' , type = int ) try : work_queue . heartbeat ( queue_name , task_id , request . form . get ( 'owner' , request . remote_addr , type = str ) , message , index ) except work_queue . Error , e : return utils . jsonify_error ( e ) db . session . commit ( ) logging . debug ( 'Task heartbeat: queue=%r, task_id=%r, message=%r, index=%d' , queue_name , task_id , message , index ) return flask . jsonify ( success = True ) | Updates the heartbeat message for a task . | 184 | 9 |
17,031 | def handle_finish ( queue_name ) : task_id = request . form . get ( 'task_id' , type = str ) owner = request . form . get ( 'owner' , request . remote_addr , type = str ) error = request . form . get ( 'error' , type = str ) is not None try : work_queue . finish ( queue_name , task_id , owner , error = error ) except work_queue . Error , e : return utils . jsonify_error ( e ) db . session . commit ( ) logging . debug ( 'Task finished: queue=%r, task_id=%r, owner=%r, error=%r' , queue_name , task_id , owner , error ) return flask . jsonify ( success = True ) | Marks a task on a queue as finished . | 174 | 10 |
17,032 | def view_all_work_queues ( ) : count_list = list ( db . session . query ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status , func . count ( work_queue . WorkQueue . task_id ) ) . group_by ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status ) ) queue_dict = { } for name , status , count in count_list : queue_dict [ ( name , status ) ] = dict ( name = name , status = status , count = count ) max_created_list = list ( db . session . query ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status , func . max ( work_queue . WorkQueue . created ) ) . group_by ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status ) ) for name , status , newest_created in max_created_list : queue_dict [ ( name , status ) ] [ 'newest_created' ] = newest_created min_eta_list = list ( db . session . query ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status , func . min ( work_queue . WorkQueue . eta ) ) . group_by ( work_queue . WorkQueue . queue_name , work_queue . WorkQueue . status ) ) for name , status , oldest_eta in min_eta_list : queue_dict [ ( name , status ) ] [ 'oldest_eta' ] = oldest_eta queue_list = list ( queue_dict . values ( ) ) queue_list . sort ( key = lambda x : ( x [ 'name' ] , x [ 'status' ] ) ) context = dict ( queue_list = queue_list , ) return render_template ( 'view_work_queue_index.html' , * * context ) | Page for viewing the index of all active work queues . | 426 | 11 |
17,033 | def manage_work_queue ( queue_name ) : modify_form = forms . ModifyWorkQueueTaskForm ( ) if modify_form . validate_on_submit ( ) : primary_key = ( modify_form . task_id . data , queue_name ) task = work_queue . WorkQueue . query . get ( primary_key ) if task : logging . info ( 'Action: %s task_id=%r' , modify_form . action . data , modify_form . task_id . data ) if modify_form . action . data == 'retry' : task . status = work_queue . WorkQueue . LIVE task . lease_attempts = 0 task . heartbeat = 'Retrying ...' db . session . add ( task ) else : db . session . delete ( task ) db . session . commit ( ) else : logging . warning ( 'Could not find task_id=%r to delete' , modify_form . task_id . data ) return redirect ( url_for ( 'manage_work_queue' , queue_name = queue_name ) ) query = ( work_queue . WorkQueue . query . filter_by ( queue_name = queue_name ) . order_by ( work_queue . WorkQueue . created . desc ( ) ) ) status = request . args . get ( 'status' , '' , type = str ) . lower ( ) if status in work_queue . WorkQueue . STATES : query = query . filter_by ( status = status ) else : status = None item_list = list ( query . limit ( 100 ) ) work_list = [ ] for item in item_list : form = forms . ModifyWorkQueueTaskForm ( ) form . task_id . data = item . task_id form . delete . data = True work_list . append ( ( item , form ) ) context = dict ( queue_name = queue_name , status = status , work_list = work_list , ) return render_template ( 'view_work_queue.html' , * * context ) | Page for viewing the contents of a work queue . | 445 | 10 |
17,034 | def retryable_transaction ( attempts = 3 , exceptions = ( OperationalError , ) ) : assert len ( exceptions ) > 0 assert attempts > 0 def wrapper ( f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : for i in xrange ( attempts ) : try : return f ( * args , * * kwargs ) except exceptions , e : if i == ( attempts - 1 ) : raise logging . warning ( 'Retryable error in transaction on attempt %d. %s: %s' , i + 1 , e . __class__ . __name__ , e ) db . session . rollback ( ) return wrapped return wrapper | Decorator retries a function when expected exceptions are raised . | 150 | 13 |
17,035 | def jsonify_assert ( asserted , message , status_code = 400 ) : if asserted : return try : raise AssertionError ( message ) except AssertionError , e : stack = traceback . extract_stack ( ) stack . pop ( ) logging . error ( 'Assertion failed: %s\n%s' , str ( e ) , '' . join ( traceback . format_list ( stack ) ) ) abort ( jsonify_error ( e , status_code = status_code ) ) | Asserts something is true aborts the request if not . | 110 | 13 |
17,036 | def jsonify_error ( message_or_exception , status_code = 400 ) : if isinstance ( message_or_exception , Exception ) : message = '%s: %s' % ( message_or_exception . __class__ . __name__ , message_or_exception ) else : message = message_or_exception logging . debug ( 'Returning status=%s, error message: %s' , status_code , message ) response = jsonify ( error = message ) response . status_code = status_code return response | Returns a JSON payload that indicates the request had an error . | 122 | 12 |
17,037 | def ignore_exceptions ( f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : try : return f ( * args , * * kwargs ) except : logging . exception ( "Ignoring exception in %r" , f ) return wrapped | Decorator catches and ignores any exceptions raised by this function . | 64 | 13 |
17,038 | def timesince ( when ) : if not when : return '' now = datetime . datetime . utcnow ( ) if now > when : diff = now - when suffix = 'ago' else : diff = when - now suffix = 'from now' periods = ( ( diff . days / 365 , 'year' , 'years' ) , ( diff . days / 30 , 'month' , 'months' ) , ( diff . days / 7 , 'week' , 'weeks' ) , ( diff . days , 'day' , 'days' ) , ( diff . seconds / 3600 , 'hour' , 'hours' ) , ( diff . seconds / 60 , 'minute' , 'minutes' ) , ( diff . seconds , 'second' , 'seconds' ) , ) for period , singular , plural in periods : if period : return '%d %s %s' % ( period , singular if period == 1 else plural , suffix ) return 'now' | Returns string representing time since or time until . | 208 | 9 |
17,039 | def human_uuid ( ) : return base64 . b32encode ( hashlib . sha1 ( uuid . uuid4 ( ) . bytes ) . digest ( ) ) . lower ( ) . strip ( '=' ) | Returns a good UUID for using as a human readable string . | 50 | 13 |
17,040 | def get_deployment_timestamp ( ) : # TODO: Support other deployment situations. if os . environ . get ( 'SERVER_SOFTWARE' , '' ) . startswith ( 'Google App Engine' ) : version_id = os . environ . get ( 'CURRENT_VERSION_ID' ) major_version , timestamp = version_id . split ( '.' , 1 ) return timestamp return 'test' | Returns a unique string represeting the current deployment . | 94 | 11 |
17,041 | def real_main ( new_url = None , baseline_url = None , upload_build_id = None , upload_release_name = None ) : coordinator = workers . get_coordinator ( ) fetch_worker . register ( coordinator ) coordinator . start ( ) item = UrlPairDiff ( new_url , baseline_url , upload_build_id , upload_release_name = upload_release_name , heartbeat = workers . PrintWorkflow ) item . root = True coordinator . input_queue . put ( item ) coordinator . wait_one ( ) coordinator . stop ( ) coordinator . join ( ) | Runs the ur_pair_diff . | 131 | 9 |
17,042 | def fetch_internal ( item , request ) : # Break client dependence on Flask if internal fetches aren't being used. from flask import make_response from werkzeug . test import EnvironBuilder # Break circular dependencies. from dpxdt . server import app # Attempt to create a Flask environment from a urllib2.Request object. environ_base = { 'REMOTE_ADDR' : '127.0.0.1' , } # The data object may be a generator from poster.multipart_encode, so we # need to convert that to raw bytes here. Unfortunately EnvironBuilder # only works with the whole request buffered in memory. data = request . get_data ( ) if data and not isinstance ( data , str ) : data = '' . join ( list ( data ) ) builder = EnvironBuilder ( path = request . get_selector ( ) , base_url = '%s://%s' % ( request . get_type ( ) , request . get_host ( ) ) , method = request . get_method ( ) , data = data , headers = request . header_items ( ) , environ_base = environ_base ) with app . request_context ( builder . get_environ ( ) ) : response = make_response ( app . dispatch_request ( ) ) LOGGER . info ( '"%s" %s via internal routing' , request . get_selector ( ) , response . status_code ) item . status_code = response . status_code item . content_type = response . mimetype if item . result_path : # TODO: Is there a better way to access the response stream? with open ( item . result_path , 'wb' ) as result_file : for piece in response . iter_encoded ( ) : result_file . write ( piece ) else : item . data = response . get_data ( ) return item | Fetches the given request by using the local Flask context . | 416 | 13 |
17,043 | def fetch_normal ( item , request ) : try : conn = urllib2 . urlopen ( request , timeout = item . timeout_seconds ) except urllib2 . HTTPError , e : conn = e except ( urllib2 . URLError , ssl . SSLError ) , e : # TODO: Make this status more clear item . status_code = 400 return item try : item . status_code = conn . getcode ( ) item . content_type = conn . info ( ) . gettype ( ) if item . result_path : with open ( item . result_path , 'wb' ) as result_file : shutil . copyfileobj ( conn , result_file ) else : item . data = conn . read ( ) except socket . timeout , e : # TODO: Make this status more clear item . status_code = 400 return item finally : conn . close ( ) return item | Fetches the given request over HTTP . | 200 | 9 |
17,044 | def json ( self ) : if self . _data_json : return self . _data_json if not self . data or self . content_type != 'application/json' : return None self . _data_json = json . loads ( self . data ) return self . _data_json | Returns de - JSONed data or None if it s a different content type . | 63 | 16 |
17,045 | def maybe_imgur ( self , path ) : if not FLAGS . imgur_client_id : return path im = pyimgur . Imgur ( FLAGS . imgur_client_id ) uploaded_image = im . upload_image ( path ) return '%s %s' % ( path , uploaded_image . link ) | Uploads a file to imgur if requested via command line flags . | 73 | 14 |
17,046 | def real_main ( release_url = None , tests_json_path = None , upload_build_id = None , upload_release_name = None ) : coordinator = workers . get_coordinator ( ) fetch_worker . register ( coordinator ) coordinator . start ( ) data = open ( FLAGS . tests_json_path ) . read ( ) tests = load_tests ( data ) item = DiffMyImages ( release_url , tests , upload_build_id , upload_release_name , heartbeat = workers . PrintWorkflow ) item . root = True coordinator . input_queue . put ( item ) coordinator . wait_one ( ) coordinator . stop ( ) coordinator . join ( ) | Runs diff_my_images . | 149 | 8 |
17,047 | def clean_url ( url , force_scheme = None ) : # URL should be ASCII according to RFC 3986 url = str ( url ) # Collapse ../../ and related url_parts = urlparse . urlparse ( url ) path_parts = [ ] for part in url_parts . path . split ( '/' ) : if part == '.' : continue elif part == '..' : if path_parts : path_parts . pop ( ) else : path_parts . append ( part ) url_parts = list ( url_parts ) if force_scheme : url_parts [ 0 ] = force_scheme url_parts [ 2 ] = '/' . join ( path_parts ) if FLAGS . keep_query_string == False : url_parts [ 4 ] = '' # No query string url_parts [ 5 ] = '' # No path # Always have a trailing slash if not url_parts [ 2 ] : url_parts [ 2 ] = '/' return urlparse . urlunparse ( url_parts ) | Cleans the given URL . | 224 | 6 |
17,048 | def extract_urls ( url , data , unescape = HTMLParser . HTMLParser ( ) . unescape ) : parts = urlparse . urlparse ( url ) prefix = '%s://%s' % ( parts . scheme , parts . netloc ) accessed_dir = os . path . dirname ( parts . path ) if not accessed_dir . endswith ( '/' ) : accessed_dir += '/' for pattern , replacement in REPLACEMENT_REGEXES : fixed = replacement % { 'base' : prefix , 'accessed_dir' : accessed_dir , } data = re . sub ( pattern , fixed , data ) result = set ( ) for match in re . finditer ( MAYBE_HTML_URL_REGEX , data ) : found_url = unescape ( match . groupdict ( ) [ 'absurl' ] ) found_url = clean_url ( found_url , force_scheme = parts [ 0 ] ) # Use the main page's scheme result . add ( found_url ) return result | Extracts the URLs from an HTML document . | 223 | 10 |
17,049 | def prune_urls ( url_set , start_url , allowed_list , ignored_list ) : result = set ( ) for url in url_set : allowed = False for allow_url in allowed_list : if url . startswith ( allow_url ) : allowed = True break if not allowed : continue ignored = False for ignore_url in ignored_list : if url . startswith ( ignore_url ) : ignored = True break if ignored : continue prefix , suffix = ( url . rsplit ( '.' , 1 ) + [ '' ] ) [ : 2 ] if suffix . lower ( ) in IGNORE_SUFFIXES : continue result . add ( url ) return result | Prunes URLs that should be ignored . | 149 | 8 |
17,050 | def real_main ( start_url = None , ignore_prefixes = None , upload_build_id = None , upload_release_name = None ) : coordinator = workers . get_coordinator ( ) fetch_worker . register ( coordinator ) coordinator . start ( ) item = SiteDiff ( start_url = start_url , ignore_prefixes = ignore_prefixes , upload_build_id = upload_build_id , upload_release_name = upload_release_name , heartbeat = workers . PrintWorkflow ) item . root = True coordinator . input_queue . put ( item ) coordinator . wait_one ( ) coordinator . stop ( ) coordinator . join ( ) | Runs the site_diff . | 145 | 7 |
17,051 | def render_or_send ( func , message ) : if request . endpoint != func . func_name : mail . send ( message ) if ( current_user . is_authenticated ( ) and current_user . superuser ) : return render_template ( 'debug_email.html' , message = message ) | Renders an email message for debugging or actually sends it . | 67 | 12 |
17,052 | def send_ready_for_review ( build_id , release_name , release_number ) : build = models . Build . query . get ( build_id ) if not build . send_email : logging . debug ( 'Not sending ready for review email because build does not have ' 'email enabled. build_id=%r' , build . id ) return ops = operations . BuildOps ( build_id ) release , run_list , stats_dict , _ = ops . get_release ( release_name , release_number ) if not run_list : logging . debug ( 'Not sending ready for review email because there are ' ' no runs. build_id=%r, release_name=%r, release_number=%d' , build . id , release . name , release . number ) return title = '%s: %s - Ready for review' % ( build . name , release . name ) email_body = render_template ( 'email_ready_for_review.html' , build = build , release = release , run_list = run_list , stats_dict = stats_dict ) recipients = [ ] if build . email_alias : recipients . append ( build . email_alias ) else : for user in build . owners : recipients . append ( user . email_address ) if not recipients : logging . debug ( 'Not sending ready for review email because there are no ' 'recipients. build_id=%r, release_name=%r, release_number=%d' , build . id , release . name , release . number ) return message = Message ( title , recipients = recipients ) message . html = email_body logging . info ( 'Sending ready for review email for build_id=%r, ' 'release_name=%r, release_number=%d to %r' , build . id , release . name , release . number , recipients ) return render_or_send ( send_ready_for_review , message ) | Sends an email indicating that the release is ready for review . | 429 | 13 |
17,053 | def homepage ( ) : if current_user . is_authenticated ( ) : if not login_fresh ( ) : logging . debug ( 'User needs a fresh token' ) abort ( login . needs_refresh ( ) ) auth . claim_invitations ( current_user ) build_list = operations . UserOps ( current_user . get_id ( ) ) . get_builds ( ) return render_template ( 'home.html' , build_list = build_list , show_video_and_promo_text = app . config [ 'SHOW_VIDEO_AND_PROMO_TEXT' ] ) | Renders the homepage . | 135 | 5 |
17,054 | def new_build ( ) : form = forms . BuildForm ( ) if form . validate_on_submit ( ) : build = models . Build ( ) form . populate_obj ( build ) build . owners . append ( current_user ) db . session . add ( build ) db . session . flush ( ) auth . save_admin_log ( build , created_build = True , message = build . name ) db . session . commit ( ) operations . UserOps ( current_user . get_id ( ) ) . evict ( ) logging . info ( 'Created build via UI: build_id=%r, name=%r' , build . id , build . name ) return redirect ( url_for ( 'view_build' , id = build . id ) ) return render_template ( 'new_build.html' , build_form = form ) | Page for crediting or editing a build . | 184 | 9 |
17,055 | def view_build ( ) : build = g . build page_size = min ( request . args . get ( 'page_size' , 10 , type = int ) , 50 ) offset = request . args . get ( 'offset' , 0 , type = int ) ops = operations . BuildOps ( build . id ) has_next_page , candidate_list , stats_counts = ops . get_candidates ( page_size , offset ) # Collate by release name, order releases by latest creation. Init stats. release_dict = { } created_dict = { } run_stats_dict = { } for candidate in candidate_list : release_list = release_dict . setdefault ( candidate . name , [ ] ) release_list . append ( candidate ) max_created = created_dict . get ( candidate . name , candidate . created ) created_dict [ candidate . name ] = max ( candidate . created , max_created ) run_stats_dict [ candidate . id ] = dict ( runs_total = 0 , runs_complete = 0 , runs_successful = 0 , runs_failed = 0 , runs_baseline = 0 , runs_pending = 0 ) # Sort each release by candidate number descending for release_list in release_dict . itervalues ( ) : release_list . sort ( key = lambda x : x . number , reverse = True ) # Sort all releases by created time descending release_age_list = [ ( value , key ) for key , value in created_dict . iteritems ( ) ] release_age_list . sort ( reverse = True ) release_name_list = [ key for _ , key in release_age_list ] # Count totals for each run state within that release. for candidate_id , status , count in stats_counts : stats_dict = run_stats_dict [ candidate_id ] for key in ops . get_stats_keys ( status ) : stats_dict [ key ] += count return render_template ( 'view_build.html' , build = build , release_name_list = release_name_list , release_dict = release_dict , run_stats_dict = run_stats_dict , has_next_page = has_next_page , current_offset = offset , next_offset = offset + page_size , last_offset = max ( 0 , offset - page_size ) , page_size = page_size ) | Page for viewing all releases in a build . | 522 | 9 |
17,056 | def view_release ( ) : build = g . build if request . method == 'POST' : form = forms . ReleaseForm ( request . form ) else : form = forms . ReleaseForm ( request . args ) form . validate ( ) ops = operations . BuildOps ( build . id ) release , run_list , stats_dict , approval_log = ops . get_release ( form . name . data , form . number . data ) if not release : abort ( 404 ) if request . method == 'POST' : decision_states = ( models . Release . REVIEWING , models . Release . RECEIVING , models . Release . PROCESSING ) if form . good . data and release . status in decision_states : release . status = models . Release . GOOD auth . save_admin_log ( build , release_good = True , release = release ) elif form . bad . data and release . status in decision_states : release . status = models . Release . BAD auth . save_admin_log ( build , release_bad = True , release = release ) elif form . reviewing . data and release . status in ( models . Release . GOOD , models . Release . BAD ) : release . status = models . Release . REVIEWING auth . save_admin_log ( build , release_reviewing = True , release = release ) else : logging . warning ( 'Bad state transition for name=%r, number=%r, form=%r' , release . name , release . number , form . data ) abort ( 400 ) db . session . add ( release ) db . session . commit ( ) ops . evict ( ) return redirect ( url_for ( 'view_release' , id = build . id , name = release . name , number = release . number ) ) # Update form values for rendering form . good . data = True form . bad . data = True form . reviewing . data = True return render_template ( 'view_release.html' , build = build , release = release , run_list = run_list , release_form = form , approval_log = approval_log , stats_dict = stats_dict ) | Page for viewing all tests runs in a release . | 460 | 10 |
17,057 | def _get_artifact_context ( run , file_type ) : sha1sum = None image_file = False log_file = False config_file = False if request . path == '/image' : image_file = True if file_type == 'before' : sha1sum = run . ref_image elif file_type == 'diff' : sha1sum = run . diff_image elif file_type == 'after' : sha1sum = run . image else : abort ( 400 ) elif request . path == '/log' : log_file = True if file_type == 'before' : sha1sum = run . ref_log elif file_type == 'diff' : sha1sum = run . diff_log elif file_type == 'after' : sha1sum = run . log else : abort ( 400 ) elif request . path == '/config' : config_file = True if file_type == 'before' : sha1sum = run . ref_config elif file_type == 'after' : sha1sum = run . config else : abort ( 400 ) return image_file , log_file , config_file , sha1sum | Gets the artifact details for the given run and file_type . | 267 | 14 |
17,058 | def get_coordinator ( ) : workflow_queue = Queue . Queue ( ) complete_queue = Queue . Queue ( ) coordinator = WorkflowThread ( workflow_queue , complete_queue ) coordinator . register ( WorkflowItem , workflow_queue ) return coordinator | Creates a coordinator and returns it . | 58 | 8 |
17,059 | def _print_repr ( self , depth ) : if depth <= 0 : return '%s.%s#%d' % ( self . __class__ . __module__ , self . __class__ . __name__ , id ( self ) ) return '%s.%s(%s)#%d' % ( self . __class__ . __module__ , self . __class__ . __name__ , self . _print_tree ( self . _get_dict_for_repr ( ) , depth - 1 ) , id ( self ) ) | Print this WorkItem to the given stack depth . | 123 | 10 |
17,060 | def error ( self ) : # Copy the error from any failed item to be the error for the whole # barrier. The first error seen "wins". Also handles the case where # the WorkItems passed into the barrier have already completed and # been marked with errors. for item in self : if isinstance ( item , WorkItem ) and item . error : return item . error return None | Returns the error for this barrier and all work items if any . | 80 | 13 |
17,061 | def outstanding ( self ) : # Allow the same WorkItem to be yielded multiple times but not # count towards blocking the barrier. done_count = 0 for item in self : if not self . wait_any and item . fire_and_forget : # Only count fire_and_forget items as done if this is # *not* a WaitAny barrier. We only want to return control # to the caller when at least one of the blocking items # has completed. done_count += 1 elif item . done : done_count += 1 if self . wait_any and done_count > 0 : return False if done_count == len ( self ) : return False return True | Returns whether or not this barrier has pending work . | 144 | 10 |
17,062 | def get_item ( self ) : if self . was_list : result = ResultList ( ) for item in self : if isinstance ( item , WorkflowItem ) : if item . done and not item . error : result . append ( item . result ) else : # When there's an error or the workflow isn't done yet, # just return the original WorkflowItem so the caller # can inspect its entire state. result . append ( item ) else : result . append ( item ) return result else : return self [ 0 ] | Returns the item to send back into the workflow generator . | 112 | 11 |
17,063 | def start ( self ) : assert not self . interrupted for thread in self . worker_threads : thread . start ( ) WorkerThread . start ( self ) | Starts the coordinator thread and all related worker threads . | 33 | 11 |
17,064 | def stop ( self ) : if self . interrupted : return for thread in self . worker_threads : thread . interrupted = True self . interrupted = True | Stops the coordinator thread and all related threads . | 32 | 10 |
17,065 | def join ( self ) : for thread in self . worker_threads : thread . join ( ) WorkerThread . join ( self ) | Joins the coordinator thread and all worker threads . | 28 | 10 |
17,066 | def wait_one ( self ) : while True : try : item = self . output_queue . get ( True , self . polltime ) except Queue . Empty : continue except KeyboardInterrupt : LOGGER . debug ( 'Exiting' ) return else : item . check_result ( ) return | Waits until this worker has finished one work item or died . | 63 | 13 |
17,067 | def superuser_required ( f ) : @ functools . wraps ( f ) @ login_required def wrapped ( * args , * * kwargs ) : if not ( current_user . is_authenticated ( ) and current_user . superuser ) : abort ( 403 ) return f ( * args , * * kwargs ) return wrapped | Requires the requestor to be a super user . | 75 | 10 |
17,068 | def can_user_access_build ( param_name ) : build_id = ( request . args . get ( param_name , type = int ) or request . form . get ( param_name , type = int ) or request . json [ param_name ] ) if not build_id : logging . debug ( 'Build ID in param_name=%r was missing' , param_name ) abort ( 400 ) ops = operations . UserOps ( current_user . get_id ( ) ) build , user_is_owner = ops . owns_build ( build_id ) if not build : logging . debug ( 'Could not find build_id=%r' , build_id ) abort ( 404 ) if current_user . is_authenticated ( ) and not user_is_owner : # Assume the user should be able to access the build but can't because # the cache is out of date. This forces the cache to repopulate, any # outstanding user invitations to be completed, hopefully resulting in # the user having access to the build. ops . evict ( ) claim_invitations ( current_user ) build , user_is_owner = ops . owns_build ( build_id ) if not user_is_owner : if current_user . is_authenticated ( ) and current_user . superuser : pass elif request . method != 'GET' : logging . debug ( 'No way to log in user via modifying request' ) abort ( 403 ) elif build . public : pass elif current_user . is_authenticated ( ) : logging . debug ( 'User does not have access to this build' ) abort ( flask . Response ( 'You cannot access this build' , 403 ) ) else : logging . debug ( 'Redirecting user to login to get build access' ) abort ( login . unauthorized ( ) ) elif not login_fresh ( ) : logging . debug ( 'User login is old; forcing refresh' ) abort ( login . needs_refresh ( ) ) return build | Determines if the current user can access the build ID in the request . | 431 | 16 |
17,069 | def build_access_required ( function_or_param_name ) : def get_wrapper ( param_name , f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : g . build = can_user_access_build ( param_name ) if not utils . is_production ( ) : # Insert a sleep to emulate page loading in production. time . sleep ( 0.5 ) return f ( * args , * * kwargs ) return wrapped if isinstance ( function_or_param_name , basestring ) : return lambda f : get_wrapper ( function_or_param_name , f ) else : return get_wrapper ( 'id' , function_or_param_name ) | Decorator ensures user has access to the build ID in the request . | 164 | 15 |
17,070 | def _get_api_key_ops ( ) : auth_header = request . authorization if not auth_header : logging . debug ( 'API request lacks authorization header' ) abort ( flask . Response ( 'API key required' , 401 , { 'WWW-Authenticate' : 'Basic realm="API key required"' } ) ) return operations . ApiKeyOps ( auth_header . username , auth_header . password ) | Gets the operations . ApiKeyOps instance for the current request . | 91 | 15 |
17,071 | def current_api_key ( ) : if app . config . get ( 'IGNORE_AUTH' ) : return models . ApiKey ( id = 'anonymous_superuser' , secret = '' , superuser = True ) ops = _get_api_key_ops ( ) api_key = ops . get ( ) logging . debug ( 'Authenticated as API key=%r' , api_key . id ) return api_key | Determines the API key for the current request . | 97 | 11 |
17,072 | def can_api_key_access_build ( param_name ) : build_id = ( request . args . get ( param_name , type = int ) or request . form . get ( param_name , type = int ) or request . json [ param_name ] ) utils . jsonify_assert ( build_id , 'build_id required' ) if app . config . get ( 'IGNORE_AUTH' ) : api_key = models . ApiKey ( id = 'anonymous_superuser' , secret = '' , superuser = True ) build = models . Build . query . get ( build_id ) utils . jsonify_assert ( build is not None , 'build must exist' , 404 ) else : ops = _get_api_key_ops ( ) api_key , build = ops . can_access_build ( build_id ) return api_key , build | Determines if the current API key can access the build in the request . | 197 | 16 |
17,073 | def build_api_access_required ( f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : g . api_key , g . build = can_api_key_access_build ( 'build_id' ) return f ( * args , * * kwargs ) return wrapped | Decorator ensures API key has access to the build ID in the request . | 74 | 16 |
17,074 | def superuser_api_key_required ( f ) : @ functools . wraps ( f ) def wrapped ( * args , * * kwargs ) : api_key = current_api_key ( ) g . api_key = api_key utils . jsonify_assert ( api_key . superuser , 'API key=%r must be a super user' % api_key . id , 403 ) return f ( * args , * * kwargs ) return wrapped | Decorator ensures only superuser API keys can request this function . | 105 | 14 |
17,075 | def manage_api_keys ( ) : build = g . build create_form = forms . CreateApiKeyForm ( ) if create_form . validate_on_submit ( ) : api_key = models . ApiKey ( ) create_form . populate_obj ( api_key ) api_key . id = utils . human_uuid ( ) api_key . secret = utils . password_uuid ( ) save_admin_log ( build , created_api_key = True , message = api_key . id ) db . session . add ( api_key ) db . session . commit ( ) logging . info ( 'Created API key=%r for build_id=%r' , api_key . id , build . id ) return redirect ( url_for ( 'manage_api_keys' , build_id = build . id ) ) create_form . build_id . data = build . id api_key_query = ( models . ApiKey . query . filter_by ( build_id = build . id ) . order_by ( models . ApiKey . created . desc ( ) ) . limit ( 1000 ) ) revoke_form_list = [ ] for api_key in api_key_query : form = forms . RevokeApiKeyForm ( ) form . id . data = api_key . id form . build_id . data = build . id form . revoke . data = True revoke_form_list . append ( ( api_key , form ) ) return render_template ( 'view_api_keys.html' , build = build , create_form = create_form , revoke_form_list = revoke_form_list ) | Page for viewing and creating API keys . | 367 | 8 |
17,076 | def revoke_api_key ( ) : build = g . build form = forms . RevokeApiKeyForm ( ) if form . validate_on_submit ( ) : api_key = models . ApiKey . query . get ( form . id . data ) if api_key . build_id != build . id : logging . debug ( 'User does not have access to API key=%r' , api_key . id ) abort ( 403 ) api_key . active = False save_admin_log ( build , revoked_api_key = True , message = api_key . id ) db . session . add ( api_key ) db . session . commit ( ) ops = operations . ApiKeyOps ( api_key . id , api_key . secret ) ops . evict ( ) return redirect ( url_for ( 'manage_api_keys' , build_id = build . id ) ) | Form submission handler for revoking API keys . | 197 | 9 |
17,077 | def claim_invitations ( user ) : # See if there are any build invitations present for the user with this # email address. If so, replace all those invitations with the real user. invitation_user_id = '%s:%s' % ( models . User . EMAIL_INVITATION , user . email_address ) invitation_user = models . User . query . get ( invitation_user_id ) if invitation_user : invited_build_list = list ( invitation_user . builds ) if not invited_build_list : return db . session . add ( user ) logging . debug ( 'Found %d build admin invitations for id=%r, user=%r' , len ( invited_build_list ) , invitation_user_id , user ) for build in invited_build_list : build . owners . remove ( invitation_user ) if not build . is_owned_by ( user . id ) : build . owners . append ( user ) logging . debug ( 'Claiming invitation for build_id=%r' , build . id ) save_admin_log ( build , invite_accepted = True ) else : logging . debug ( 'User already owner of build. ' 'id=%r, build_id=%r' , user . id , build . id ) db . session . add ( build ) db . session . delete ( invitation_user ) db . session . commit ( ) # Re-add the user to the current session so we can query with it. db . session . add ( current_user ) | Claims any pending invitations for the given user s email address . | 333 | 13 |
17,078 | def manage_admins ( ) : build = g . build # Do not show cached data db . session . add ( build ) db . session . refresh ( build ) add_form = forms . AddAdminForm ( ) if add_form . validate_on_submit ( ) : invitation_user_id = '%s:%s' % ( models . User . EMAIL_INVITATION , add_form . email_address . data ) invitation_user = models . User . query . get ( invitation_user_id ) if not invitation_user : invitation_user = models . User ( id = invitation_user_id , email_address = add_form . email_address . data ) db . session . add ( invitation_user ) db . session . add ( build ) db . session . add ( invitation_user ) db . session . refresh ( build , lockmode = 'update' ) build . owners . append ( invitation_user ) save_admin_log ( build , invited_new_admin = True , message = invitation_user . email_address ) db . session . commit ( ) logging . info ( 'Added user=%r as owner to build_id=%r' , invitation_user . id , build . id ) return redirect ( url_for ( 'manage_admins' , build_id = build . id ) ) add_form . build_id . data = build . id revoke_form_list = [ ] for user in build . owners : form = forms . RemoveAdminForm ( ) form . user_id . data = user . id form . build_id . data = build . id form . revoke . data = True revoke_form_list . append ( ( user , form ) ) return render_template ( 'view_admins.html' , build = build , add_form = add_form , revoke_form_list = revoke_form_list ) | Page for viewing and managing build admins . | 410 | 8 |
17,079 | def revoke_admin ( ) : build = g . build form = forms . RemoveAdminForm ( ) if form . validate_on_submit ( ) : user = models . User . query . get ( form . user_id . data ) if not user : logging . debug ( 'User being revoked admin access does not exist.' 'id=%r, build_id=%r' , form . user_id . data , build . id ) abort ( 400 ) if user == current_user : logging . debug ( 'User trying to remove themself as admin. ' 'id=%r, build_id=%r' , user . id , build . id ) abort ( 400 ) db . session . add ( build ) db . session . add ( user ) db . session . refresh ( build , lockmode = 'update' ) db . session . refresh ( user , lockmode = 'update' ) user_is_owner = build . owners . filter_by ( id = user . id ) if not user_is_owner : logging . debug ( 'User being revoked admin access is not owner. ' 'id=%r, build_id=%r.' , user . id , build . id ) abort ( 400 ) build . owners . remove ( user ) save_admin_log ( build , revoked_admin = True , message = user . email_address ) db . session . commit ( ) operations . UserOps ( user . get_id ( ) ) . evict ( ) return redirect ( url_for ( 'manage_admins' , build_id = build . id ) ) | Form submission handler for revoking admin access to a build . | 339 | 12 |
17,080 | def save_admin_log ( build , * * kwargs ) : message = kwargs . pop ( 'message' , None ) release = kwargs . pop ( 'release' , None ) run = kwargs . pop ( 'run' , None ) if not len ( kwargs ) == 1 : raise TypeError ( 'Must specify a LOG_TYPE argument' ) log_enum = kwargs . keys ( ) [ 0 ] log_type = getattr ( models . AdminLog , log_enum . upper ( ) , None ) if not log_type : raise TypeError ( 'Bad log_type argument: %s' % log_enum ) if current_user . is_anonymous ( ) : user_id = None else : user_id = current_user . get_id ( ) log = models . AdminLog ( build_id = build . id , log_type = log_type , message = message , user_id = user_id ) if release : log . release_id = release . id if run : log . run_id = run . id log . release_id = run . release_id db . session . add ( log ) | Saves an action to the admin log . | 254 | 9 |
17,081 | def view_admin_log ( ) : build = g . build # TODO: Add paging log_list = ( models . AdminLog . query . filter_by ( build_id = build . id ) . order_by ( models . AdminLog . created . desc ( ) ) . all ( ) ) return render_template ( 'view_admin_log.html' , build = build , log_list = log_list ) | Page for viewing the log of admin activity . | 93 | 9 |
17,082 | def verify_binary ( flag_name , process_args = None ) : if process_args is None : process_args = [ ] path = getattr ( FLAGS , flag_name ) if not path : logging . error ( 'Flag %r not set' % flag_name ) sys . exit ( 1 ) with open ( os . devnull , 'w' ) as dev_null : try : subprocess . check_call ( [ path ] + process_args , stdout = dev_null , stderr = subprocess . STDOUT ) except : logging . exception ( '--%s binary at path %r does not work' , flag_name , path ) sys . exit ( 1 ) | Exits the program if the binary from the given flag doesn t run . | 151 | 15 |
17,083 | def create_release ( ) : build = g . build release_name = request . form . get ( 'release_name' ) utils . jsonify_assert ( release_name , 'release_name required' ) url = request . form . get ( 'url' ) utils . jsonify_assert ( release_name , 'url required' ) release = models . Release ( name = release_name , url = url , number = 1 , build_id = build . id ) last_candidate = ( models . Release . query . filter_by ( build_id = build . id , name = release_name ) . order_by ( models . Release . number . desc ( ) ) . first ( ) ) if last_candidate : release . number += last_candidate . number if last_candidate . status == models . Release . PROCESSING : canceled_task_count = work_queue . cancel ( release_id = last_candidate . id ) logging . info ( 'Canceling %d tasks for previous attempt ' 'build_id=%r, release_name=%r, release_number=%d' , canceled_task_count , build . id , last_candidate . name , last_candidate . number ) last_candidate . status = models . Release . BAD db . session . add ( last_candidate ) db . session . add ( release ) db . session . commit ( ) signals . release_updated_via_api . send ( app , build = build , release = release ) logging . info ( 'Created release: build_id=%r, release_name=%r, url=%r, ' 'release_number=%d' , build . id , release . name , url , release . number ) return flask . jsonify ( success = True , build_id = build . id , release_name = release . name , release_number = release . number , url = url ) | Creates a new release candidate for a build . | 420 | 10 |
17,084 | def _check_release_done_processing ( release ) : if release . status != models . Release . PROCESSING : # NOTE: This statement also guards for situations where the user has # prematurely specified that the release is good or bad. Once the user # has done that, the system will not automatically move the release # back into the 'reviewing' state or send the email notification below. logging . info ( 'Release not in processing state yet: build_id=%r, ' 'name=%r, number=%d' , release . build_id , release . name , release . number ) return False query = models . Run . query . filter_by ( release_id = release . id ) for run in query : if run . status == models . Run . NEEDS_DIFF : # Still waiting for the diff to finish. return False if run . ref_config and not run . ref_image : # Still waiting for the ref capture to process. return False if run . config and not run . image : # Still waiting for the run capture to process. return False logging . info ( 'Release done processing, now reviewing: build_id=%r, ' 'name=%r, number=%d' , release . build_id , release . name , release . number ) # Send the email at the end of this request so we know it's only # sent a single time (guarded by the release.status check above). build_id = release . build_id release_name = release . name release_number = release . number @ utils . after_this_request def send_notification_email ( response ) : emails . send_ready_for_review ( build_id , release_name , release_number ) release . status = models . Release . REVIEWING db . session . add ( release ) return True | Moves a release candidate to reviewing if all runs are done . | 393 | 13 |
17,085 | def _get_release_params ( ) : release_name = request . form . get ( 'release_name' ) utils . jsonify_assert ( release_name , 'release_name required' ) release_number = request . form . get ( 'release_number' , type = int ) utils . jsonify_assert ( release_number is not None , 'release_number required' ) return release_name , release_number | Gets the release params from the current request . | 95 | 10 |
17,086 | def _find_last_good_run ( build ) : run_name = request . form . get ( 'run_name' , type = str ) utils . jsonify_assert ( run_name , 'run_name required' ) last_good_release = ( models . Release . query . filter_by ( build_id = build . id , status = models . Release . GOOD ) . order_by ( models . Release . created . desc ( ) ) . first ( ) ) last_good_run = None if last_good_release : logging . debug ( 'Found last good release for: build_id=%r, ' 'release_name=%r, release_number=%d' , build . id , last_good_release . name , last_good_release . number ) last_good_run = ( models . Run . query . filter_by ( release_id = last_good_release . id , name = run_name ) . first ( ) ) if last_good_run : logging . debug ( 'Found last good run for: build_id=%r, ' 'release_name=%r, release_number=%d, ' 'run_name=%r' , build . id , last_good_release . name , last_good_release . number , last_good_run . name ) return last_good_release , last_good_run | Finds the last good release and run for a build . | 305 | 12 |
17,087 | def find_run ( ) : build = g . build last_good_release , last_good_run = _find_last_good_run ( build ) if last_good_run : return flask . jsonify ( success = True , build_id = build . id , release_name = last_good_release . name , release_number = last_good_release . number , run_name = last_good_run . name , url = last_good_run . url , image = last_good_run . image , log = last_good_run . log , config = last_good_run . config ) return utils . jsonify_error ( 'Run not found' ) | Finds the last good run of the given name for a release . | 151 | 14 |
17,088 | def _get_or_create_run ( build ) : release_name , release_number = _get_release_params ( ) run_name = request . form . get ( 'run_name' , type = str ) utils . jsonify_assert ( run_name , 'run_name required' ) release = ( models . Release . query . filter_by ( build_id = build . id , name = release_name , number = release_number ) . first ( ) ) utils . jsonify_assert ( release , 'release does not exist' ) run = ( models . Run . query . filter_by ( release_id = release . id , name = run_name ) . first ( ) ) if not run : # Ignore re-reports of the same run name for this release. logging . info ( 'Created run: build_id=%r, release_name=%r, ' 'release_number=%d, run_name=%r' , build . id , release . name , release . number , run_name ) run = models . Run ( release_id = release . id , name = run_name , status = models . Run . DATA_PENDING ) db . session . add ( run ) db . session . flush ( ) return release , run | Gets a run for a build or creates it if it does not exist . | 279 | 16 |
17,089 | def _enqueue_capture ( build , release , run , url , config_data , baseline = False ) : # Validate the JSON config parses. try : config_dict = json . loads ( config_data ) except Exception , e : abort ( utils . jsonify_error ( e ) ) # Rewrite the config JSON to include the URL specified in this request. # Blindly overwrite anything that was there. config_dict [ 'targetUrl' ] = url config_data = json . dumps ( config_dict ) config_artifact = _save_artifact ( build , config_data , 'application/json' ) db . session . add ( config_artifact ) db . session . flush ( ) suffix = '' if baseline : suffix = ':baseline' task_id = '%s:%s%s' % ( run . id , hashlib . sha1 ( url ) . hexdigest ( ) , suffix ) logging . info ( 'Enqueueing capture task=%r, baseline=%r' , task_id , baseline ) work_queue . add ( constants . CAPTURE_QUEUE_NAME , payload = dict ( build_id = build . id , release_name = release . name , release_number = release . number , run_name = run . name , url = url , config_sha1sum = config_artifact . id , baseline = baseline , ) , build_id = build . id , release_id = release . id , run_id = run . id , source = 'request_run' , task_id = task_id ) # Set the URL and config early to indicate to report_run that there is # still data pending even if 'image' and 'ref_image' are unset. if baseline : run . ref_url = url run . ref_config = config_artifact . id else : run . url = url run . config = config_artifact . id | Enqueues a task to run a capture process . | 421 | 11 |
17,090 | def request_run ( ) : build = g . build current_release , current_run = _get_or_create_run ( build ) current_url = request . form . get ( 'url' , type = str ) config_data = request . form . get ( 'config' , default = '{}' , type = str ) utils . jsonify_assert ( current_url , 'url to capture required' ) utils . jsonify_assert ( config_data , 'config document required' ) config_artifact = _enqueue_capture ( build , current_release , current_run , current_url , config_data ) ref_url = request . form . get ( 'ref_url' , type = str ) ref_config_data = request . form . get ( 'ref_config' , type = str ) utils . jsonify_assert ( bool ( ref_url ) == bool ( ref_config_data ) , 'ref_url and ref_config must both be specified or not specified' ) if ref_url and ref_config_data : ref_config_artifact = _enqueue_capture ( build , current_release , current_run , ref_url , ref_config_data , baseline = True ) else : _ , last_good_run = _find_last_good_run ( build ) if last_good_run : current_run . ref_url = last_good_run . url current_run . ref_image = last_good_run . image current_run . ref_log = last_good_run . log current_run . ref_config = last_good_run . config db . session . add ( current_run ) db . session . commit ( ) signals . run_updated_via_api . send ( app , build = build , release = current_release , run = current_run ) return flask . jsonify ( success = True , build_id = build . id , release_name = current_release . name , release_number = current_release . number , run_name = current_run . name , url = current_run . url , config = current_run . config , ref_url = current_run . ref_url , ref_config = current_run . ref_config ) | Requests a new run for a release candidate . | 496 | 10 |
17,091 | def runs_done ( ) : build = g . build release_name , release_number = _get_release_params ( ) release = ( models . Release . query . filter_by ( build_id = build . id , name = release_name , number = release_number ) . with_lockmode ( 'update' ) . first ( ) ) utils . jsonify_assert ( release , 'Release does not exist' ) release . status = models . Release . PROCESSING db . session . add ( release ) _check_release_done_processing ( release ) db . session . commit ( ) signals . release_updated_via_api . send ( app , build = build , release = release ) logging . info ( 'Runs done for release: build_id=%r, release_name=%r, ' 'release_number=%d' , build . id , release . name , release . number ) results_url = url_for ( 'view_release' , id = build . id , name = release . name , number = release . number , _external = True ) return flask . jsonify ( success = True , results_url = results_url ) | Marks a release candidate as having all runs reported . | 254 | 11 |
17,092 | def _save_artifact ( build , data , content_type ) : sha1sum = hashlib . sha1 ( data ) . hexdigest ( ) artifact = models . Artifact . query . filter_by ( id = sha1sum ) . first ( ) if artifact : logging . debug ( 'Upload already exists: artifact_id=%r' , sha1sum ) else : logging . info ( 'Upload received: artifact_id=%r, content_type=%r' , sha1sum , content_type ) artifact = models . Artifact ( id = sha1sum , content_type = content_type , data = data ) _artifact_created ( artifact ) artifact . owners . append ( build ) return artifact | Saves an artifact to the DB and returns it . | 161 | 11 |
17,093 | def upload ( ) : build = g . build utils . jsonify_assert ( len ( request . files ) == 1 , 'Need exactly one uploaded file' ) file_storage = request . files . values ( ) [ 0 ] data = file_storage . read ( ) content_type , _ = mimetypes . guess_type ( file_storage . filename ) artifact = _save_artifact ( build , data , content_type ) db . session . add ( artifact ) db . session . commit ( ) return flask . jsonify ( success = True , build_id = build . id , sha1sum = artifact . id , content_type = content_type ) | Uploads an artifact referenced by a run . | 144 | 9 |
17,094 | def _get_artifact_response ( artifact ) : response = flask . Response ( artifact . data , mimetype = artifact . content_type ) response . cache_control . public = True response . cache_control . max_age = 8640000 response . set_etag ( artifact . id ) return response | Gets the response object for the given artifact . | 67 | 10 |
17,095 | def download ( ) : # Allow users with access to the build to download the file. Falls back # to API keys with access to the build. Prefer user first for speed. try : build = auth . can_user_access_build ( 'build_id' ) except HTTPException : logging . debug ( 'User access to artifact failed. Trying API key.' ) _ , build = auth . can_api_key_access_build ( 'build_id' ) sha1sum = request . args . get ( 'sha1sum' , type = str ) if not sha1sum : logging . debug ( 'Artifact sha1sum=%r not supplied' , sha1sum ) abort ( 404 ) artifact = models . Artifact . query . get ( sha1sum ) if not artifact : logging . debug ( 'Artifact sha1sum=%r does not exist' , sha1sum ) abort ( 404 ) build_id = request . args . get ( 'build_id' , type = int ) if not build_id : logging . debug ( 'build_id missing for artifact sha1sum=%r' , sha1sum ) abort ( 404 ) is_owned = artifact . owners . filter_by ( id = build_id ) . first ( ) if not is_owned : logging . debug ( 'build_id=%r not owner of artifact sha1sum=%r' , build_id , sha1sum ) abort ( 403 ) # Make sure there are no Set-Cookie headers on the response so this # request is cachable by all HTTP frontends. @ utils . after_this_request def no_session ( response ) : if 'Set-Cookie' in response . headers : del response . headers [ 'Set-Cookie' ] if not utils . is_production ( ) : # Insert a sleep to emulate how the page loading looks in production. time . sleep ( 1.5 ) if request . if_none_match and request . if_none_match . contains ( sha1sum ) : response = flask . Response ( status = 304 ) return response return _get_artifact_response ( artifact ) | Downloads an artifact by it s content hash . | 470 | 10 |
17,096 | def evict ( self ) : logging . debug ( 'Evicting cache for %r' , self . cache_key ) _clear_version_cache ( self . cache_key ) # Cause the cache key to be refreshed next time any operation is # run to make sure we don't act on old cached data. self . versioned_cache_key = None | Evict all caches related to these operations . | 76 | 9 |
17,097 | def sort_run ( run ) : # Sort errors first, then by name. Also show errors that were manually # approved, so the paging sort order stays the same even after users # approve a diff on the run page. if run . status in models . Run . DIFF_NEEDED_STATES : return ( 0 , run . name ) return ( 1 , run . name ) | Sort function for runs within a release . | 82 | 8 |
17,098 | def parse ( obj , required_properties = None , additional_properties = None , ignore_optional_property_errors = None ) : if not ( required_properties is additional_properties is ignore_optional_property_errors is None ) : with parsing ( required_properties = required_properties , additional_properties = additional_properties , ignore_optional_property_errors = ignore_optional_property_errors ) : return parse ( obj ) validator = None if isinstance ( obj , Validator ) : validator = obj elif inspect . isclass ( obj ) and issubclass ( obj , Validator ) : validator = obj ( ) else : try : validator = _NAMED_VALIDATORS [ obj ] except ( KeyError , TypeError ) : for factory in _VALIDATOR_FACTORIES : validator = factory ( obj ) if validator is not None : break else : if inspect . isclass ( validator ) and issubclass ( validator , Validator ) : _NAMED_VALIDATORS [ obj ] = validator = validator ( ) if not isinstance ( validator , Validator ) : raise SchemaError ( "%r cannot be parsed as a Validator" % obj ) return validator | Try to parse the given obj as a validator instance . | 267 | 12 |
17,099 | def parsing ( * * kwargs ) : from . validators import Object with _VALIDATOR_FACTORIES_LOCK : old_values = { } for key , value in iteritems ( kwargs ) : if value is not None : attr = key . upper ( ) old_values [ key ] = getattr ( Object , attr ) setattr ( Object , attr , value ) try : yield finally : for key , value in iteritems ( kwargs ) : if value is not None : setattr ( Object , key . upper ( ) , old_values [ key ] ) | Context manager for overriding the default validator parsing rules for the following code block . | 129 | 16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.