idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
18,100
def _compute_hline_scores ( self ) : M , N , L = self . M , self . N , self . L hline_score = { } for y in range ( N ) : laststart = [ 0 if ( 0 , y , 0 , k ) in self else None for k in range ( L ) ] for x in range ( M ) : block = [ 0 ] * ( x + 1 ) for k in range ( L ) : if ( x , y , 0 , k ) not in self : laststart [ k ] = None elif laststart [ k ] is None : laststart [ k ] = x block [ x ] += 1 elif x and ( x , y , 0 , k ) not in self [ x - 1 , y , 0 , k ] : laststart [ k ] = x else : for x1 in range ( laststart [ k ] , x + 1 ) : block [ x1 ] += 1 for x1 in range ( x + 1 ) : hline_score [ y , x1 , x ] = block [ x1 ] self . _hline_score = hline_score
Does the hard work to prepare hline_score .
246
11
18,101
def biclique ( self , xmin , xmax , ymin , ymax ) : Aside = sum ( ( self . maximum_hline_bundle ( y , xmin , xmax ) for y in range ( ymin , ymax + 1 ) ) , [ ] ) Bside = sum ( ( self . maximum_vline_bundle ( x , ymin , ymax ) for x in range ( xmin , xmax + 1 ) ) , [ ] ) return Aside , Bside
Compute a maximum - sized complete bipartite graph contained in the rectangle defined by xmin xmax ymin ymax where each chain of qubits is either a vertical line or a horizontal line .
108
41
18,102
def _contains_line ( self , line ) : return all ( v in self for v in line ) and all ( u in self [ v ] for u , v in zip ( line , line [ 1 : : ] ) )
Test if a chain of qubits is completely contained in self . In particular test if all qubits are present and the couplers connecting those qubits are also connected .
49
35
18,103
def maximum_ell_bundle ( self , ell ) : ( x0 , x1 , y0 , y1 ) = ell hlines = self . maximum_hline_bundle ( y0 , x0 , x1 ) vlines = self . maximum_vline_bundle ( x0 , y0 , y1 ) if self . random_bundles : shuffle ( hlines ) shuffle ( vlines ) return [ v + h for h , v in zip ( hlines , vlines ) ]
Return a maximum ell bundle in the rectangle bounded by
110
10
18,104
def nativeCliqueEmbed ( self , width ) : maxCWR = { } M , N = self . M , self . N maxscore = None count = 0 key = None for w in range ( width + 2 ) : h = width - w - 2 for ymin in range ( N - h ) : ymax = ymin + h for xmin in range ( M - w ) : xmax = xmin + w R = ( xmin , xmax , ymin , ymax ) score , best = self . maxCliqueWithRectangle ( R , maxCWR ) maxCWR [ R ] = best if maxscore is None or ( score is not None and maxscore < score ) : maxscore = score key = None # this gets overwritten immediately count = 0 # this gets overwritten immediately if maxscore == score : count , key = _accumulate_random ( count , best [ 3 ] , key , R ) clique = [ ] while key in maxCWR : score , ell , key , num = maxCWR [ key ] if ell is not None : meb = self . maximum_ell_bundle ( ell ) clique . extend ( meb ) return maxscore , clique
Compute a maximum - sized native clique embedding in an induced subgraph of chimera with all chainlengths width + 1 .
262
28
18,105
def _compute_all_deletions ( self ) : minimum_evil = [ ] for disabled_qubits in map ( set , product ( * self . _evil ) ) : newmin = [ ] for s in minimum_evil : if s < disabled_qubits : break elif disabled_qubits < s : continue newmin . append ( s ) else : minimum_evil = newmin + [ disabled_qubits ] return minimum_evil
Returns all minimal edge covers of the set of evil edges .
97
12
18,106
def _compute_deletions ( self ) : M , N , L , edgelist = self . M , self . N , self . L , self . _edgelist if 2 ** len ( self . _evil ) <= self . _proc_limit : deletions = self . _compute_all_deletions ( ) self . _processors = [ self . _subprocessor ( d ) for d in deletions ] else : self . _processors = None
If there are fewer than self . _proc_limit possible deletion sets compute all subprocessors obtained by deleting a minimal subset of qubits .
104
29
18,107
def _random_subprocessor ( self ) : deletion = set ( ) for e in self . _evil : if e [ 0 ] in deletion or e [ 1 ] in deletion : continue deletion . add ( choice ( e ) ) return self . _subprocessor ( deletion )
Creates a random subprocessor where there is a coupler between every pair of working qubits on opposite sides of the same cell . This is guaranteed to be minimal in that adding a qubit back in will reintroduce a bad coupler but not to have minimum size .
57
55
18,108
def _objective_bestscore ( self , old , new ) : ( oldscore , oldthing ) = old ( newscore , newthing ) = new if oldscore is None : return True if newscore is None : return False return oldscore < newscore
An objective function that returns True if new has a better score than old and False otherwise .
55
18
18,109
def nativeCliqueEmbed ( self , width ) : def f ( x ) : return x . nativeCliqueEmbed ( width ) objective = self . _objective_bestscore return self . _translate ( self . _map_to_processors ( f , objective ) )
Compute a maximum - sized native clique embedding in an induced subgraph of chimera with chainsize width + 1 . If possible returns a uniform choice among all largest cliques .
61
38
18,110
def _translate ( self , embedding ) : if embedding is None : return None if not self . _linear : return embedding return [ _bulk_to_linear ( self . M , self . N , self . L , chain ) for chain in embedding ]
Translates an embedding back to linear coordinates if necessary .
59
13
18,111
def _validate_chain_strength ( sampler , chain_strength ) : properties = sampler . properties if 'extended_j_range' in properties : max_chain_strength = - min ( properties [ 'extended_j_range' ] ) elif 'j_range' in properties : max_chain_strength = - min ( properties [ 'j_range' ] ) else : raise ValueError ( "input sampler should have 'j_range' and/or 'extended_j_range' property." ) if chain_strength is None : chain_strength = max_chain_strength elif chain_strength > max_chain_strength : raise ValueError ( "Provided chain strength exceedds the allowed range." ) return chain_strength
Validate the provided chain strength checking J - ranges of the sampler s children .
163
17
18,112
def sample ( self , bqm , apply_flux_bias_offsets = True , * * kwargs ) : child = self . child if apply_flux_bias_offsets : if self . flux_biases is not None : kwargs [ FLUX_BIAS_KWARG ] = self . flux_biases return child . sample ( bqm , * * kwargs )
Sample from the given Ising model .
93
8
18,113
def get_flux_biases ( sampler , embedding , chain_strength , num_reads = 1000 , max_age = 3600 ) : if not isinstance ( sampler , dimod . Sampler ) : raise TypeError ( "input sampler should be DWaveSampler" ) # try to read the chip_id, otherwise get the name system_name = sampler . properties . get ( 'chip_id' , str ( sampler . __class__ ) ) try : with cache_connect ( ) as cur : fbo = get_flux_biases_from_cache ( cur , embedding . values ( ) , system_name , chain_strength = chain_strength , max_age = max_age ) return fbo except MissingFluxBias : pass # if dwave-drivers is not available, then we can't calculate the biases try : import dwave . drivers as drivers except ImportError : msg = ( "dwave-drivers not found, cannot calculate flux biases. dwave-drivers can be " "installed with " "'pip install dwave-drivers --extra-index-url https://pypi.dwavesys.com/simple'. " "See documentation for dwave-drivers license." ) raise RuntimeError ( msg ) fbo = drivers . oneshot_flux_bias ( sampler , embedding . values ( ) , num_reads = num_reads , chain_strength = chain_strength ) # store them in the cache with cache_connect ( ) as cur : for chain in embedding . values ( ) : v = next ( iter ( chain ) ) flux_bias = fbo . get ( v , 0.0 ) insert_flux_bias ( cur , chain , system_name , flux_bias , chain_strength ) return fbo
Get the flux bias offsets for sampler and embedding .
391
12
18,114
def find_clique_embedding ( k , m , n = None , t = None , target_edges = None ) : import random _ , nodes = k m , n , t , target_edges = _chimera_input ( m , n , t , target_edges ) # Special cases to return optimal embeddings for small k. The general clique embedder uses chains of length # at least 2, whereas cliques of size 1 and 2 can be embedded with single-qubit chains. if len ( nodes ) == 1 : # If k == 1 we simply return a single chain consisting of a randomly sampled qubit. qubits = set ( ) . union ( * target_edges ) qubit = random . choice ( tuple ( qubits ) ) embedding = [ [ qubit ] ] elif len ( nodes ) == 2 : # If k == 2 we simply return two one-qubit chains that are the endpoints of a randomly sampled coupler. if not isinstance ( target_edges , list ) : edges = list ( target_edges ) edge = edges [ random . randrange ( len ( edges ) ) ] embedding = [ [ edge [ 0 ] ] , [ edge [ 1 ] ] ] else : # General case for k > 2. embedding = processor ( target_edges , M = m , N = n , L = t ) . tightestNativeClique ( len ( nodes ) ) if not embedding : raise ValueError ( "cannot find a K{} embedding for given Chimera lattice" . format ( k ) ) return dict ( zip ( nodes , embedding ) )
Find an embedding for a clique in a Chimera graph .
351
13
18,115
def find_biclique_embedding ( a , b , m , n = None , t = None , target_edges = None ) : _ , anodes = a _ , bnodes = b m , n , t , target_edges = _chimera_input ( m , n , t , target_edges ) embedding = processor ( target_edges , M = m , N = n , L = t ) . tightestNativeBiClique ( len ( anodes ) , len ( bnodes ) ) if not embedding : raise ValueError ( "cannot find a K{},{} embedding for given Chimera lattice" . format ( a , b ) ) left , right = embedding return dict ( zip ( anodes , left ) ) , dict ( zip ( bnodes , right ) )
Find an embedding for a biclique in a Chimera graph .
181
14
18,116
def find_grid_embedding ( dim , m , n = None , t = 4 ) : m , n , t , target_edges = _chimera_input ( m , n , t , None ) indexer = dnx . generators . chimera . chimera_coordinates ( m , n , t ) dim = list ( dim ) num_dim = len ( dim ) if num_dim == 1 : def _key ( row , col , aisle ) : return row dim . extend ( [ 1 , 1 ] ) elif num_dim == 2 : def _key ( row , col , aisle ) : return row , col dim . append ( 1 ) elif num_dim == 3 : def _key ( row , col , aisle ) : return row , col , aisle else : raise ValueError ( "find_grid_embedding supports between one and three dimensions" ) rows , cols , aisles = dim if rows > m or cols > n or aisles > t : msg = ( "the largest grid that find_grid_embedding can fit in a ({}, {}, {}) Chimera-lattice " "is {}x{}x{}; given grid is {}x{}x{}" ) . format ( m , n , t , m , n , t , rows , cols , aisles ) raise ValueError ( msg ) return { _key ( row , col , aisle ) : [ indexer . int ( ( row , col , 0 , aisle ) ) , indexer . int ( ( row , col , 1 , aisle ) ) ] for row in range ( dim [ 0 ] ) for col in range ( dim [ 1 ] ) for aisle in range ( dim [ 2 ] ) }
Find an embedding for a grid in a Chimera graph .
372
12
18,117
def sample ( self , bqm , * * parameters ) : child = self . child cutoff = self . _cutoff cutoff_vartype = self . _cutoff_vartype comp = self . _comparison if cutoff_vartype is dimod . SPIN : original = bqm . spin else : original = bqm . binary # remove all of the interactions less than cutoff new = type ( bqm ) ( original . linear , ( ( u , v , bias ) for ( u , v ) , bias in original . quadratic . items ( ) if not comp ( abs ( bias ) , cutoff ) ) , original . offset , original . vartype ) # next we check for isolated qubits and remove them, we could do this as # part of the construction but the assumption is there should not be # a large number in the 'typical' case isolated = [ v for v in new if not new . adj [ v ] ] new . remove_variables_from ( isolated ) if isolated and len ( new ) == 0 : # in this case all variables are isolated, so we just put one back # to serve as the basis v = isolated . pop ( ) new . linear [ v ] = original . linear [ v ] # get the samples from the child sampler and put them into the original vartype sampleset = child . sample ( new , * * parameters ) . change_vartype ( bqm . vartype , inplace = True ) # we now need to add the isolated back in, in a way that minimizes # the energy. There are lots of ways to do this but for now we'll just # do one if isolated : samples , variables = _restore_isolated ( sampleset , bqm , isolated ) else : samples = sampleset . record . sample variables = sampleset . variables vectors = sampleset . data_vectors vectors . pop ( 'energy' ) # we're going to recalculate the energy anyway return dimod . SampleSet . from_samples_bqm ( ( samples , variables ) , bqm , * * vectors )
Cutoff and sample from the provided binary quadratic model .
455
13
18,118
def sample_poly ( self , poly , * * kwargs ) : child = self . child cutoff = self . _cutoff cutoff_vartype = self . _cutoff_vartype comp = self . _comparison if cutoff_vartype is dimod . SPIN : original = poly . to_spin ( copy = False ) else : original = poly . to_binary ( copy = False ) # remove all of the terms of order >= 2 that have a bias less than cutoff new = type ( poly ) ( ( ( term , bias ) for term , bias in original . items ( ) if len ( term ) > 1 and not comp ( abs ( bias ) , cutoff ) ) , cutoff_vartype ) # also include the linear biases for the variables in new for v in new . variables : term = v , if term in original : new [ term ] = original [ term ] # everything else is isolated isolated = list ( original . variables . difference ( new . variables ) ) if isolated and len ( new ) == 0 : # in this case all variables are isolated, so we just put one back # to serve as the basis term = isolated . pop ( ) , new [ term ] = original [ term ] # get the samples from the child sampler and put them into the original vartype sampleset = child . sample_poly ( new , * * kwargs ) . change_vartype ( poly . vartype , inplace = True ) # we now need to add the isolated back in, in a way that minimizes # the energy. There are lots of ways to do this but for now we'll just # do one if isolated : samples , variables = _restore_isolated_higherorder ( sampleset , poly , isolated ) else : samples = sampleset . record . sample variables = sampleset . variables vectors = sampleset . data_vectors vectors . pop ( 'energy' ) # we're going to recalculate the energy anyway return dimod . SampleSet . from_samples_bqm ( ( samples , variables ) , poly , * * vectors )
Cutoff and sample from the provided binary polynomial .
447
12
18,119
def diagnose_embedding ( emb , source , target ) : if not hasattr ( source , 'edges' ) : source = nx . Graph ( source ) if not hasattr ( target , 'edges' ) : target = nx . Graph ( target ) label = { } embedded = set ( ) for x in source : try : embx = emb [ x ] missing_chain = len ( embx ) == 0 except KeyError : missing_chain = True if missing_chain : yield MissingChainError , x continue all_present = True for q in embx : if label . get ( q , x ) != x : yield ChainOverlapError , q , x , label [ q ] elif q not in target : all_present = False yield InvalidNodeError , x , q else : label [ q ] = x if all_present : embedded . add ( x ) if not nx . is_connected ( target . subgraph ( embx ) ) : yield DisconnectedChainError , x yielded = nx . Graph ( ) for p , q in target . subgraph ( label ) . edges ( ) : yielded . add_edge ( label [ p ] , label [ q ] ) for x , y in source . edges ( ) : if x == y : continue if x in embedded and y in embedded and not yielded . has_edge ( x , y ) : yield MissingEdgeError , x , y
A detailed diagnostic for minor embeddings .
301
9
18,120
def model ( self , name = None , model = None , mask = None , * * kwargs ) : if isinstance ( model , ( flask_marshmallow . Schema , flask_marshmallow . base_fields . FieldABC ) ) : if not name : name = model . __class__ . __name__ api_model = Model ( name , model , mask = mask ) api_model . __apidoc__ = kwargs return self . add_model ( name , api_model ) return super ( Namespace , self ) . model ( name = name , model = model , * * kwargs )
Model registration decorator .
135
5
18,121
def parameters ( self , parameters , locations = None ) : def decorator ( func ) : if locations is None and parameters . many : _locations = ( 'json' , ) else : _locations = locations if _locations is not None : parameters . context [ 'in' ] = _locations return self . doc ( params = parameters ) ( self . response ( code = HTTPStatus . UNPROCESSABLE_ENTITY ) ( self . WEBARGS_PARSER . use_args ( parameters , locations = _locations ) ( func ) ) ) return decorator
Endpoint parameters registration decorator .
125
7
18,122
def response ( self , model = None , code = HTTPStatus . OK , description = None , * * kwargs ) : code = HTTPStatus ( code ) if code is HTTPStatus . NO_CONTENT : assert model is None if model is None and code not in { HTTPStatus . ACCEPTED , HTTPStatus . NO_CONTENT } : if code . value not in http_exceptions . default_exceptions : raise ValueError ( "`model` parameter is required for code %d" % code ) model = self . model ( name = 'HTTPError%d' % code , model = DefaultHTTPErrorSchema ( http_code = code ) ) if description is None : description = code . description def response_serializer_decorator ( func ) : """ This decorator handles responses to serialize the returned value with a given model. """ def dump_wrapper ( * args , * * kwargs ) : # pylint: disable=missing-docstring response = func ( * args , * * kwargs ) extra_headers = None if response is None : if model is not None : raise ValueError ( "Response cannot not be None with HTTP status %d" % code ) return flask . Response ( status = code ) elif isinstance ( response , flask . Response ) or model is None : return response elif isinstance ( response , tuple ) : response , _code , extra_headers = unpack ( response ) else : _code = code if HTTPStatus ( _code ) is code : response = model . dump ( response ) . data return response , _code , extra_headers return dump_wrapper def decorator ( func_or_class ) : if code . value in http_exceptions . default_exceptions : # If the code is handled by raising an exception, it will # produce a response later, so we don't need to apply a useless # wrapper. decorated_func_or_class = func_or_class elif isinstance ( func_or_class , type ) : # Handle Resource classes decoration # pylint: disable=protected-access func_or_class . _apply_decorator_to_methods ( response_serializer_decorator ) decorated_func_or_class = func_or_class else : decorated_func_or_class = wraps ( func_or_class ) ( response_serializer_decorator ( func_or_class ) ) if model is None : api_model = None else : if isinstance ( model , Model ) : api_model = model else : api_model = self . model ( model = model ) if getattr ( model , 'many' , False ) : api_model = [ api_model ] doc_decorator = self . doc ( responses = { code . value : ( description , api_model ) } ) return doc_decorator ( decorated_func_or_class ) return decorator
Endpoint response OpenAPI documentation decorator .
626
9
18,123
def _apply_decorator_to_methods ( cls , decorator ) : for method in cls . methods : method_name = method . lower ( ) decorated_method_func = decorator ( getattr ( cls , method_name ) ) setattr ( cls , method_name , decorated_method_func )
This helper can apply a given decorator to all methods on the current Resource .
73
16
18,124
def options ( self , * args , * * kwargs ) : # This is a generic implementation of OPTIONS method for resources. # This method checks every permissions provided as decorators for other # methods to provide information about what methods `current_user` can # use. method_funcs = [ getattr ( self , m . lower ( ) ) for m in self . methods ] allowed_methods = [ ] request_oauth_backup = getattr ( flask . request , 'oauth' , None ) for method_func in method_funcs : if getattr ( method_func , '_access_restriction_decorators' , None ) : if not hasattr ( method_func , '_cached_fake_method_func' ) : fake_method_func = lambda * args , * * kwargs : True # `__name__` is used in `login_required` decorator, so it # is required to fake this also fake_method_func . __name__ = 'options' # Decorate the fake method with the registered access # restriction decorators for decorator in method_func . _access_restriction_decorators : fake_method_func = decorator ( fake_method_func ) # Cache the `fake_method_func` to avoid redoing this over # and over again method_func . __dict__ [ '_cached_fake_method_func' ] = fake_method_func else : fake_method_func = method_func . _cached_fake_method_func flask . request . oauth = None try : fake_method_func ( self , * args , * * kwargs ) except HTTPException : # This method is not allowed, so skip it continue allowed_methods . append ( method_func . __name__ . upper ( ) ) flask . request . oauth = request_oauth_backup return flask . Response ( status = HTTPStatus . NO_CONTENT , headers = { 'Allow' : ", " . join ( allowed_methods ) } )
Check which methods are allowed .
444
6
18,125
def validate_patch_structure ( self , data ) : if data [ 'op' ] not in self . NO_VALUE_OPERATIONS and 'value' not in data : raise ValidationError ( 'value is required' ) if 'path' not in data : raise ValidationError ( 'Path is required and must always begin with /' ) else : data [ 'field_name' ] = data [ 'path' ] [ 1 : ]
Common validation of PATCH structure
95
6
18,126
def perform_patch ( cls , operations , obj , state = None ) : if state is None : state = { } for operation in operations : if not cls . _process_patch_operation ( operation , obj = obj , state = state ) : log . info ( "%s patching has been stopped because of unknown operation %s" , obj . __class__ . __name__ , operation ) raise ValidationError ( "Failed to update %s details. Operation %s could not succeed." % ( obj . __class__ . __name__ , operation ) ) return True
Performs all necessary operations by calling class methods with corresponding names .
123
13
18,127
def replace ( cls , obj , field , value , state ) : if not hasattr ( obj , field ) : raise ValidationError ( "Field '%s' does not exist, so it cannot be patched" % field ) setattr ( obj , field , value ) return True
This is method for replace operation . It is separated to provide a possibility to easily override it in your Parameters .
60
22
18,128
def __related_categories ( self , category_id ) : related = [ ] for cat in self . categories_tree : if category_id in self . categories_tree [ cat ] : related . append ( self . categories [ cat ] ) return related
Get all related categories to a given one
54
8
18,129
def _create_projects_file ( project_name , data_source , items ) : repositories = [ ] for item in items : if item [ 'origin' ] not in repositories : repositories . append ( item [ 'origin' ] ) projects = { project_name : { data_source : repositories } } projects_file , projects_file_path = tempfile . mkstemp ( prefix = 'track_items_' ) with open ( projects_file_path , "w" ) as pfile : json . dump ( projects , pfile , indent = True ) return projects_file_path
Create a projects file from the items origin data
127
9
18,130
def enrich_items ( self , ocean_backend , events = False ) : max_items = self . elastic . max_items_bulk current = 0 total = 0 bulk_json = "" items = ocean_backend . fetch ( ) images_items = { } url = self . elastic . index_url + '/items/_bulk' logger . debug ( "Adding items to %s (in %i packs)" , self . elastic . anonymize_url ( url ) , max_items ) for item in items : if current >= max_items : total += self . elastic . safe_put_bulk ( url , bulk_json ) json_size = sys . getsizeof ( bulk_json ) / ( 1024 * 1024 ) logger . debug ( "Added %i items to %s (%0.2f MB)" , total , self . elastic . anonymize_url ( url ) , json_size ) bulk_json = "" current = 0 rich_item = self . get_rich_item ( item ) data_json = json . dumps ( rich_item ) bulk_json += '{"index" : {"_id" : "%s" } }\n' % ( item [ self . get_field_unique_id ( ) ] ) bulk_json += data_json + "\n" # Bulk document current += 1 if rich_item [ 'id' ] not in images_items : # Let's transform the rich_event in a rich_image rich_item [ 'is_docker_image' ] = 1 rich_item [ 'is_event' ] = 0 images_items [ rich_item [ 'id' ] ] = rich_item else : image_date = images_items [ rich_item [ 'id' ] ] [ 'last_updated' ] if image_date <= rich_item [ 'last_updated' ] : # This event is newer for the image rich_item [ 'is_docker_image' ] = 1 rich_item [ 'is_event' ] = 0 images_items [ rich_item [ 'id' ] ] = rich_item if current > 0 : total += self . elastic . safe_put_bulk ( url , bulk_json ) if total == 0 : # No items enriched, nothing to upload to ES return total # Time to upload the images enriched items. The id is uuid+"_image" # Normally we are enriching events for a unique image so all images # data can be upload in one query for image in images_items : data = images_items [ image ] data_json = json . dumps ( data ) bulk_json += '{"index" : {"_id" : "%s" } }\n' % ( data [ 'id' ] + "_image" ) bulk_json += data_json + "\n" # Bulk document total += self . elastic . safe_put_bulk ( url , bulk_json ) return total
A custom enrich items is needed because apart from the enriched events from raw items a image item with the last data for an image must be created
627
28
18,131
def get_owner_repos_url ( owner , token ) : url_org = GITHUB_API_URL + "/orgs/" + owner + "/repos" url_user = GITHUB_API_URL + "/users/" + owner + "/repos" url_owner = url_org # Use org by default try : r = requests . get ( url_org , params = get_payload ( ) , headers = get_headers ( token ) ) r . raise_for_status ( ) except requests . exceptions . HTTPError as e : if r . status_code == 403 : rate_limit_reset_ts = datetime . fromtimestamp ( int ( r . headers [ 'X-RateLimit-Reset' ] ) ) seconds_to_reset = ( rate_limit_reset_ts - datetime . utcnow ( ) ) . seconds + 1 logging . info ( "GitHub rate limit exhausted. Waiting %i secs for rate limit reset." % ( seconds_to_reset ) ) sleep ( seconds_to_reset ) else : # owner is not an org, try with a user url_owner = url_user return url_owner
The owner could be a org or a user . It waits if need to have rate limit . Also it fixes a djando issue changing - with _
254
30
18,132
def get_repositores ( owner_url , token , nrepos ) : all_repos = [ ] url = owner_url while True : logging . debug ( "Getting repos from: %s" % ( url ) ) try : r = requests . get ( url , params = get_payload ( ) , headers = get_headers ( token ) ) r . raise_for_status ( ) all_repos += r . json ( ) logging . debug ( "Rate limit: %s" % ( r . headers [ 'X-RateLimit-Remaining' ] ) ) if 'next' not in r . links : break url = r . links [ 'next' ] [ 'url' ] # Loving requests :) except requests . exceptions . ConnectionError : logging . error ( "Can not connect to GitHub" ) break # Remove forks nrepos_recent = [ repo for repo in all_repos if not repo [ 'fork' ] ] # Sort by updated_at and limit to nrepos nrepos_sorted = sorted ( nrepos_recent , key = lambda repo : parser . parse ( repo [ 'updated_at' ] ) , reverse = True ) nrepos_sorted = nrepos_sorted [ 0 : nrepos ] # First the small repositories to feedback the user quickly nrepos_sorted = sorted ( nrepos_sorted , key = lambda repo : repo [ 'size' ] ) for repo in nrepos_sorted : logging . debug ( "%s %i %s" % ( repo [ 'updated_at' ] , repo [ 'size' ] , repo [ 'name' ] ) ) return nrepos_sorted
owner could be an org or and user
368
8
18,133
def publish_twitter ( twitter_contact , owner ) : dashboard_url = CAULDRON_DASH_URL + "/%s" % ( owner ) tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon" % ( twitter_contact , owner , dashboard_url ) status = quote_plus ( tweet ) oauth = get_oauth ( ) r = requests . post ( url = "https://api.twitter.com/1.1/statuses/update.json?status=" + status , auth = oauth )
Publish in twitter the dashboard
134
6
18,134
def get_perceval_params_from_url ( cls , urls ) : params = [ ] dparam = cls . get_arthur_params_from_url ( urls ) params . append ( dparam [ "url" ] ) return params
Get the perceval params given the URLs for the data source
58
12
18,135
def add_identity ( cls , db , identity , backend ) : uuid = None try : uuid = api . add_identity ( db , backend , identity [ 'email' ] , identity [ 'name' ] , identity [ 'username' ] ) logger . debug ( "New sortinghat identity %s %s,%s,%s " , uuid , identity [ 'username' ] , identity [ 'name' ] , identity [ 'email' ] ) profile = { "name" : identity [ 'name' ] if identity [ 'name' ] else identity [ 'username' ] , "email" : identity [ 'email' ] } api . edit_profile ( db , uuid , * * profile ) except AlreadyExistsError as ex : uuid = ex . eid except InvalidValueError as ex : logger . warning ( "Trying to add a None identity. Ignoring it." ) except UnicodeEncodeError as ex : logger . warning ( "UnicodeEncodeError. Ignoring it. %s %s %s" , identity [ 'email' ] , identity [ 'name' ] , identity [ 'username' ] ) except Exception as ex : logger . warning ( "Unknown exception adding identity. Ignoring it. %s %s %s" , identity [ 'email' ] , identity [ 'name' ] , identity [ 'username' ] , exc_info = True ) if 'company' in identity and identity [ 'company' ] is not None : try : api . add_organization ( db , identity [ 'company' ] ) api . add_enrollment ( db , uuid , identity [ 'company' ] , datetime ( 1900 , 1 , 1 ) , datetime ( 2100 , 1 , 1 ) ) except AlreadyExistsError : pass return uuid
Load and identity list from backend in Sorting Hat
387
10
18,136
def add_identities ( cls , db , identities , backend ) : logger . info ( "Adding the identities to SortingHat" ) total = 0 for identity in identities : try : cls . add_identity ( db , identity , backend ) total += 1 except Exception as e : logger . error ( "Unexcepted error when adding identities: %s" % e ) continue logger . info ( "Total identities added to SH: %i" , total )
Load identities list from backend in Sorting Hat
100
9
18,137
def remove_identity ( cls , sh_db , ident_id ) : success = False try : api . delete_identity ( sh_db , ident_id ) logger . debug ( "Identity %s deleted" , ident_id ) success = True except Exception as e : logger . debug ( "Identity not deleted due to %s" , str ( e ) ) return success
Delete an identity from SortingHat .
84
8
18,138
def remove_unique_identity ( cls , sh_db , uuid ) : success = False try : api . delete_unique_identity ( sh_db , uuid ) logger . debug ( "Unique identity %s deleted" , uuid ) success = True except Exception as e : logger . debug ( "Unique identity not deleted due to %s" , str ( e ) ) return success
Delete a unique identity from SortingHat .
85
9
18,139
def unique_identities ( cls , sh_db ) : try : for unique_identity in api . unique_identities ( sh_db ) : yield unique_identity except Exception as e : logger . debug ( "Unique identities not returned from SortingHat due to %s" , str ( e ) )
List the unique identities available in SortingHat .
68
10
18,140
def get_rich_events ( self , item ) : module = item [ 'data' ] if not item [ 'data' ] [ 'releases' ] : return [ ] for release in item [ 'data' ] [ 'releases' ] : event = self . get_rich_item ( item ) # Update specific fields for this release event [ "uuid" ] += "_" + release [ 'slug' ] event [ "author_url" ] = 'https://forge.puppet.com/' + release [ 'module' ] [ 'owner' ] [ 'username' ] event [ "gravatar_id" ] = release [ 'module' ] [ 'owner' ] [ 'gravatar_id' ] event [ "downloads" ] = release [ 'downloads' ] event [ "slug" ] = release [ 'slug' ] event [ "version" ] = release [ 'version' ] event [ "uri" ] = release [ 'uri' ] event [ "validation_score" ] = release [ 'validation_score' ] event [ "homepage_url" ] = None if 'project_page' in release [ 'metadata' ] : event [ "homepage_url" ] = release [ 'metadata' ] [ 'project_page' ] event [ "issues_url" ] = None if "issues_url" in release [ 'metadata' ] : event [ "issues_url" ] = release [ 'metadata' ] [ 'issues_url' ] event [ "tags" ] = release [ 'tags' ] event [ "license" ] = release [ 'metadata' ] [ 'license' ] event [ "source_url" ] = release [ 'metadata' ] [ 'source' ] event [ "summary" ] = release [ 'metadata' ] [ 'summary' ] event [ "metadata__updated_on" ] = parser . parse ( release [ 'updated_at' ] ) . isoformat ( ) if self . sortinghat : release [ "metadata__updated_on" ] = event [ "metadata__updated_on" ] # Needed in get_item_sh logic event . update ( self . get_item_sh ( release ) ) if self . prjs_map : event . update ( self . get_item_project ( event ) ) event . update ( self . get_grimoire_fields ( release [ "created_at" ] , "release" ) ) yield event
Get the enriched events related to a module
531
8
18,141
def _connect ( self ) : try : db = pymysql . connect ( user = self . user , passwd = self . passwd , host = self . host , port = self . port , db = self . shdb , use_unicode = True ) return db , db . cursor ( ) except Exception : logger . error ( "Database connection error" ) raise
Connect to the MySQL database .
80
6
18,142
def refresh_identities ( enrich_backend , author_field = None , author_values = None ) : def update_items ( new_filter_author ) : for eitem in enrich_backend . fetch ( new_filter_author ) : roles = None try : roles = enrich_backend . roles except AttributeError : pass new_identities = enrich_backend . get_item_sh_from_id ( eitem , roles ) eitem . update ( new_identities ) yield eitem logger . debug ( "Refreshing identities fields from %s" , enrich_backend . elastic . anonymize_url ( enrich_backend . elastic . index_url ) ) total = 0 max_ids = enrich_backend . elastic . max_items_clause logger . debug ( 'Refreshing identities' ) if author_field is None : # No filter, update all items for item in update_items ( None ) : yield item total += 1 else : to_refresh = [ ] for author_value in author_values : to_refresh . append ( author_value ) if len ( to_refresh ) > max_ids : filter_author = { "name" : author_field , "value" : to_refresh } for item in update_items ( filter_author ) : yield item total += 1 to_refresh = [ ] if len ( to_refresh ) > 0 : filter_author = { "name" : author_field , "value" : to_refresh } for item in update_items ( filter_author ) : yield item total += 1 logger . info ( "Total eitems refreshed for identities fields %i" , total )
Refresh identities in enriched index .
366
7
18,143
def get_ocean_backend ( backend_cmd , enrich_backend , no_incremental , filter_raw = None , filter_raw_should = None ) : if no_incremental : last_enrich = None else : last_enrich = get_last_enrich ( backend_cmd , enrich_backend , filter_raw = filter_raw ) logger . debug ( "Last enrichment: %s" , last_enrich ) backend = None connector = get_connectors ( ) [ enrich_backend . get_connector_name ( ) ] if backend_cmd : backend_cmd = init_backend ( backend_cmd ) backend = backend_cmd . backend signature = inspect . signature ( backend . fetch ) if 'from_date' in signature . parameters : ocean_backend = connector [ 1 ] ( backend , from_date = last_enrich ) elif 'offset' in signature . parameters : ocean_backend = connector [ 1 ] ( backend , offset = last_enrich ) else : if last_enrich : ocean_backend = connector [ 1 ] ( backend , from_date = last_enrich ) else : ocean_backend = connector [ 1 ] ( backend ) else : # We can have params for non perceval backends also params = enrich_backend . backend_params if params : try : date_pos = params . index ( '--from-date' ) last_enrich = parser . parse ( params [ date_pos + 1 ] ) except ValueError : pass if last_enrich : ocean_backend = connector [ 1 ] ( backend , from_date = last_enrich ) else : ocean_backend = connector [ 1 ] ( backend ) if filter_raw : ocean_backend . set_filter_raw ( filter_raw ) if filter_raw_should : ocean_backend . set_filter_raw_should ( filter_raw_should ) return ocean_backend
Get the ocean backend configured to start from the last enriched date
422
12
18,144
def do_studies ( ocean_backend , enrich_backend , studies_args , retention_time = None ) : for study in enrich_backend . studies : selected_studies = [ ( s [ 'name' ] , s [ 'params' ] ) for s in studies_args if s [ 'type' ] == study . __name__ ] for ( name , params ) in selected_studies : logger . info ( "Starting study: %s, params %s" , name , str ( params ) ) try : study ( ocean_backend , enrich_backend , * * params ) except Exception as e : logger . error ( "Problem executing study %s, %s" , name , str ( e ) ) raise e # identify studies which creates other indexes. If the study is onion, # it can be ignored since the index is recreated every week if name . startswith ( 'enrich_onion' ) : continue index_params = [ p for p in params if 'out_index' in p ] for ip in index_params : index_name = params [ ip ] elastic = get_elastic ( enrich_backend . elastic_url , index_name ) elastic . delete_items ( retention_time )
Execute studies related to a given enrich backend . If retention_time is not None the study data is deleted based on the number of minutes declared in retention_time .
268
34
18,145
def delete_inactive_unique_identities ( es , sortinghat_db , before_date ) : page = es . search ( index = IDENTITIES_INDEX , scroll = "360m" , size = SIZE_SCROLL_IDENTITIES_INDEX , body = { "query" : { "range" : { "last_seen" : { "lte" : before_date } } } } ) sid = page [ '_scroll_id' ] scroll_size = page [ 'hits' ] [ 'total' ] if scroll_size == 0 : logging . warning ( "[identities retention] No inactive identities found in %s after %s!" , IDENTITIES_INDEX , before_date ) return count = 0 while scroll_size > 0 : for item in page [ 'hits' ] [ 'hits' ] : to_delete = item [ '_source' ] [ 'sh_uuid' ] success = SortingHat . remove_unique_identity ( sortinghat_db , to_delete ) # increment the number of deleted identities only if the corresponding command was successful count = count + 1 if success else count page = es . scroll ( scroll_id = sid , scroll = '60m' ) sid = page [ '_scroll_id' ] scroll_size = len ( page [ 'hits' ] [ 'hits' ] ) logger . debug ( "[identities retention] Total inactive identities deleted from SH: %i" , count )
Select the unique identities not seen before before_date and delete them from SortingHat .
323
18
18,146
def retain_identities ( retention_time , es_enrichment_url , sortinghat_db , data_source , active_data_sources ) : before_date = get_diff_current_date ( minutes = retention_time ) before_date_str = before_date . isoformat ( ) es = Elasticsearch ( [ es_enrichment_url ] , timeout = 120 , max_retries = 20 , retry_on_timeout = True , verify_certs = False ) # delete the unique identities which have not been seen after `before_date` delete_inactive_unique_identities ( es , sortinghat_db , before_date_str ) # delete the unique identities for a given data source which are not in the IDENTITIES_INDEX delete_orphan_unique_identities ( es , sortinghat_db , data_source , active_data_sources )
Select the unique identities not seen before retention_time and delete them from SortingHat . Furthermore it deletes also the orphan unique identities those ones stored in SortingHat but not in IDENTITIES_INDEX .
197
44
18,147
def init_backend ( backend_cmd ) : try : backend_cmd . backend except AttributeError : parsed_args = vars ( backend_cmd . parsed_args ) init_args = find_signature_parameters ( backend_cmd . BACKEND , parsed_args ) backend_cmd . backend = backend_cmd . BACKEND ( * * init_args ) return backend_cmd
Init backend within the backend_cmd
84
7
18,148
def safe_index ( cls , unique_id ) : index = unique_id if unique_id : index = unique_id . replace ( "/" , "_" ) . lower ( ) return index
Return a valid elastic index generated from unique_id
43
10
18,149
def _check_instance ( url , insecure ) : res = grimoire_con ( insecure ) . get ( url ) if res . status_code != 200 : logger . error ( "Didn't get 200 OK from url %s" , url ) raise ElasticConnectException else : try : version_str = res . json ( ) [ 'version' ] [ 'number' ] version_major = version_str . split ( '.' ) [ 0 ] return version_major except Exception : logger . error ( "Could not read proper welcome message from url %s" , ElasticSearch . anonymize_url ( url ) ) logger . error ( "Message read: %s" , res . text ) raise ElasticConnectException
Checks if there is an instance of Elasticsearch in url .
151
13
18,150
def safe_put_bulk ( self , url , bulk_json ) : headers = { "Content-Type" : "application/x-ndjson" } try : res = self . requests . put ( url + '?refresh=true' , data = bulk_json , headers = headers ) res . raise_for_status ( ) except UnicodeEncodeError : # Related to body.encode('iso-8859-1'). mbox data logger . error ( "Encondig error ... converting bulk to iso-8859-1" ) bulk_json = bulk_json . encode ( 'iso-8859-1' , 'ignore' ) res = self . requests . put ( url , data = bulk_json , headers = headers ) res . raise_for_status ( ) result = res . json ( ) failed_items = [ ] if result [ 'errors' ] : # Due to multiple errors that may be thrown when inserting bulk data, only the first error is returned failed_items = [ item [ 'index' ] for item in result [ 'items' ] if 'error' in item [ 'index' ] ] error = str ( failed_items [ 0 ] [ 'error' ] ) logger . error ( "Failed to insert data to ES: %s, %s" , error , self . anonymize_url ( url ) ) inserted_items = len ( result [ 'items' ] ) - len ( failed_items ) # The exception is currently not thrown to avoid stopping ocean uploading processes try : if failed_items : raise ELKError ( cause = error ) except ELKError : pass logger . debug ( "%i items uploaded to ES (%s)" , inserted_items , self . anonymize_url ( url ) ) return inserted_items
Bulk PUT controlling unicode issues
381
8
18,151
def all_es_aliases ( self ) : r = self . requests . get ( self . url + "/_aliases" , headers = HEADER_JSON , verify = False ) try : r . raise_for_status ( ) except requests . exceptions . HTTPError as ex : logger . warning ( "Something went wrong when retrieving aliases on %s." , self . anonymize_url ( self . index_url ) ) logger . warning ( ex ) return aliases = [ ] for index in r . json ( ) . keys ( ) : aliases . extend ( list ( r . json ( ) [ index ] [ 'aliases' ] . keys ( ) ) ) aliases = list ( set ( aliases ) ) return aliases
List all aliases used in ES
152
6
18,152
def list_aliases ( self ) : # check alias doesn't exist r = self . requests . get ( self . index_url + "/_alias" , headers = HEADER_JSON , verify = False ) try : r . raise_for_status ( ) except requests . exceptions . HTTPError as ex : logger . warning ( "Something went wrong when retrieving aliases on %s." , self . anonymize_url ( self . index_url ) ) logger . warning ( ex ) return aliases = r . json ( ) [ self . index ] [ 'aliases' ] return aliases
List aliases linked to the index
123
6
18,153
def bulk_upload ( self , items , field_id ) : current = 0 new_items = 0 # total items added with bulk bulk_json = "" if not items : return new_items url = self . index_url + '/items/_bulk' logger . debug ( "Adding items to %s (in %i packs)" , self . anonymize_url ( url ) , self . max_items_bulk ) task_init = time ( ) for item in items : if current >= self . max_items_bulk : task_init = time ( ) new_items += self . safe_put_bulk ( url , bulk_json ) current = 0 json_size = sys . getsizeof ( bulk_json ) / ( 1024 * 1024 ) logger . debug ( "bulk packet sent (%.2f sec, %i total, %.2f MB)" % ( time ( ) - task_init , new_items , json_size ) ) bulk_json = "" data_json = json . dumps ( item ) bulk_json += '{"index" : {"_id" : "%s" } }\n' % ( item [ field_id ] ) bulk_json += data_json + "\n" # Bulk document current += 1 if current > 0 : new_items += self . safe_put_bulk ( url , bulk_json ) json_size = sys . getsizeof ( bulk_json ) / ( 1024 * 1024 ) logger . debug ( "bulk packet sent (%.2f sec prev, %i total, %.2f MB)" % ( time ( ) - task_init , new_items , json_size ) ) return new_items
Upload in controlled packs items to ES using bulk API
364
10
18,154
def all_properties ( self ) : properties = { } r = self . requests . get ( self . index_url + "/_mapping" , headers = HEADER_JSON , verify = False ) try : r . raise_for_status ( ) r_json = r . json ( ) if 'items' not in r_json [ self . index ] [ 'mappings' ] : return properties if 'properties' not in r_json [ self . index ] [ 'mappings' ] [ 'items' ] : return properties properties = r_json [ self . index ] [ 'mappings' ] [ 'items' ] [ 'properties' ] except requests . exceptions . HTTPError as ex : logger . error ( "Error all attributes for %s." , self . anonymize_url ( self . index_url ) ) logger . error ( ex ) return return properties
Get all properties of a given index
186
7
18,155
def get_kibiter_version ( url ) : config_url = '.kibana/config/_search' # Avoid having // in the URL because ES will fail if url [ - 1 ] != '/' : url += "/" url += config_url r = requests . get ( url ) r . raise_for_status ( ) if len ( r . json ( ) [ 'hits' ] [ 'hits' ] ) == 0 : logger . error ( "Can not get the Kibiter version" ) return None version = r . json ( ) [ 'hits' ] [ 'hits' ] [ 0 ] [ '_id' ] # 5.4.0-SNAPSHOT major_version = version . split ( "." , 1 ) [ 0 ] return major_version
Return kibiter major number version
172
7
18,156
def get_params ( ) : parser = get_params_parser ( ) args = parser . parse_args ( ) if not args . enrich_only and not args . only_identities and not args . only_studies : if not args . index : # Check that the raw index name is defined print ( "[error] --index <name> param is required when collecting items from raw" ) sys . exit ( 1 ) return args
Get params definition from ElasticOcean and from all the backends
92
12
18,157
def get_time_diff_days ( start_txt , end_txt ) : if start_txt is None or end_txt is None : return None start = parser . parse ( start_txt ) end = parser . parse ( end_txt ) seconds_day = float ( 60 * 60 * 24 ) diff_days = ( end - start ) . total_seconds ( ) / seconds_day diff_days = float ( '%.2f' % diff_days ) return diff_days
Number of days between two days
104
6
18,158
def enrich_fields ( cls , fields , eitem ) : for field in fields : if field . startswith ( 'customfield_' ) : if type ( fields [ field ] ) is dict : if 'name' in fields [ field ] : if fields [ field ] [ 'name' ] == "Story Points" : eitem [ 'story_points' ] = fields [ field ] [ 'value' ] elif fields [ field ] [ 'name' ] == "Sprint" : value = fields [ field ] [ 'value' ] if value : sprint = value [ 0 ] . partition ( ",name=" ) [ 2 ] . split ( ',' ) [ 0 ] sprint_start = value [ 0 ] . partition ( ",startDate=" ) [ 2 ] . split ( ',' ) [ 0 ] sprint_end = value [ 0 ] . partition ( ",endDate=" ) [ 2 ] . split ( ',' ) [ 0 ] sprint_complete = value [ 0 ] . partition ( ",completeDate=" ) [ 2 ] . split ( ',' ) [ 0 ] eitem [ 'sprint' ] = sprint eitem [ 'sprint_start' ] = cls . fix_value_null ( sprint_start ) eitem [ 'sprint_end' ] = cls . fix_value_null ( sprint_end ) eitem [ 'sprint_complete' ] = cls . fix_value_null ( sprint_complete )
Enrich the fields property of an issue .
311
9
18,159
def get_review_sh ( self , revision , item ) : identity = self . get_sh_identity ( revision ) update = parser . parse ( item [ self . get_field_date ( ) ] ) erevision = self . get_item_sh_fields ( identity , update ) return erevision
Add sorting hat enrichment fields for the author of the revision
68
11
18,160
def get_github_cache ( self , kind , key_ ) : cache = { } res_size = 100 # best size? from_ = 0 index_github = "github/" + kind url = self . elastic . url + "/" + index_github url += "/_search" + "?" + "size=%i" % res_size r = self . requests . get ( url ) type_items = r . json ( ) if 'hits' not in type_items : logger . info ( "No github %s data in ES" % ( kind ) ) else : while len ( type_items [ 'hits' ] [ 'hits' ] ) > 0 : for hit in type_items [ 'hits' ] [ 'hits' ] : item = hit [ '_source' ] cache [ item [ key_ ] ] = item from_ += res_size r = self . requests . get ( url + "&from=%i" % from_ ) type_items = r . json ( ) if 'hits' not in type_items : break return cache
Get cache data for items of _type using key_ as the cache dict key
235
16
18,161
def get_time_to_first_attention ( self , item ) : comment_dates = [ str_to_datetime ( comment [ 'created_at' ] ) for comment in item [ 'comments_data' ] if item [ 'user' ] [ 'login' ] != comment [ 'user' ] [ 'login' ] ] reaction_dates = [ str_to_datetime ( reaction [ 'created_at' ] ) for reaction in item [ 'reactions_data' ] if item [ 'user' ] [ 'login' ] != reaction [ 'user' ] [ 'login' ] ] reaction_dates . extend ( comment_dates ) if reaction_dates : return min ( reaction_dates ) return None
Get the first date at which a comment or reaction was made to the issue by someone other than the user who created the issue
156
25
18,162
def get_time_to_merge_request_response ( self , item ) : review_dates = [ str_to_datetime ( review [ 'created_at' ] ) for review in item [ 'review_comments_data' ] if item [ 'user' ] [ 'login' ] != review [ 'user' ] [ 'login' ] ] if review_dates : return min ( review_dates ) return None
Get the first date at which a review was made on the PR by someone other than the user who created the PR
92
23
18,163
def get_rich_events ( self , item ) : if "version_downloads_data" not in item [ 'data' ] : return [ ] # To get values from the task eitem = self . get_rich_item ( item ) for sample in item [ 'data' ] [ "version_downloads_data" ] [ "version_downloads" ] : event = deepcopy ( eitem ) event [ 'download_sample_id' ] = sample [ 'id' ] event [ 'sample_date' ] = sample [ 'date' ] sample_date = parser . parse ( event [ 'sample_date' ] ) event [ 'sample_version' ] = sample [ 'version' ] event [ 'sample_downloads' ] = sample [ 'downloads' ] event . update ( self . get_grimoire_fields ( sample_date . isoformat ( ) , "downloads_event" ) ) yield event
In the events there are some common fields with the crate . The name of the field must be the same in the create and in the downloads event so we can filer using it in crate and event at the same time .
203
45
18,164
def get_item_project ( self , eitem ) : project = None eitem_project = { } ds_name = self . get_connector_name ( ) # data source name in projects map if ds_name not in self . prjs_map : return eitem_project for tag in eitem [ 'hashtags_analyzed' ] : # lcanas: hashtag provided in projects.json file should not be case sensitive T6876 tags2project = CaseInsensitiveDict ( self . prjs_map [ ds_name ] ) if tag in tags2project : project = tags2project [ tag ] break if project is None : project = DEFAULT_PROJECT eitem_project = { "project" : project } eitem_project . update ( self . add_project_levels ( project ) ) return eitem_project
Get project mapping enrichment field .
185
6
18,165
def get_fields_from_job_name ( self , job_name ) : extra_fields = { 'category' : None , 'installer' : None , 'scenario' : None , 'testproject' : None , 'pod' : None , 'loop' : None , 'branch' : None } try : components = job_name . split ( '-' ) if len ( components ) < 2 : return extra_fields kind = components [ 1 ] if kind == 'os' : extra_fields [ 'category' ] = 'parent/main' extra_fields [ 'installer' ] = components [ 0 ] extra_fields [ 'scenario' ] = '-' . join ( components [ 2 : - 3 ] ) elif kind == 'deploy' : extra_fields [ 'category' ] = 'deploy' extra_fields [ 'installer' ] = components [ 0 ] else : extra_fields [ 'category' ] = 'test' extra_fields [ 'testproject' ] = components [ 0 ] extra_fields [ 'installer' ] = components [ 1 ] extra_fields [ 'pod' ] = components [ - 3 ] extra_fields [ 'loop' ] = components [ - 2 ] extra_fields [ 'branch' ] = components [ - 1 ] except IndexError as ex : # Just DEBUG level because it is just for OPNFV logger . debug ( 'Problems parsing job name %s' , job_name ) logger . debug ( ex ) return extra_fields
Analyze a Jenkins job name producing a dictionary
326
9
18,166
def extract_builton ( self , built_on , regex ) : pattern = re . compile ( regex , re . M | re . I ) match = pattern . search ( built_on ) if match and len ( match . groups ( ) ) >= 1 : node_name = match . group ( 1 ) else : msg = "Node name not extracted, using builtOn as it is: " + regex + ":" + built_on logger . warning ( msg ) node_name = built_on return node_name
Extracts node name using a regular expression . Node name is expected to be group 1 .
109
19
18,167
def onion_study ( in_conn , out_conn , data_source ) : onion = OnionStudy ( in_connector = in_conn , out_connector = out_conn , data_source = data_source ) ndocs = onion . analyze ( ) return ndocs
Build and index for onion from a given Git index .
60
11
18,168
def read_block ( self , size = None , from_date = None ) : # Get quarters corresponding to All items (Incremental mode NOT SUPPORTED) quarters = self . __quarters ( ) for quarter in quarters : logger . info ( self . __log_prefix + " Quarter: " + str ( quarter ) ) date_range = { self . _timeframe_field : { 'gte' : quarter . start_time , 'lte' : quarter . end_time } } orgs = self . __list_uniques ( date_range , self . AUTHOR_ORG ) projects = self . __list_uniques ( date_range , self . PROJECT ) # Get global data s = self . __build_search ( date_range ) response = s . execute ( ) for timing in response . aggregations [ self . TIMEFRAME ] . buckets : yield self . __build_dataframe ( timing ) . copy ( ) # Get global data by Org for org_name in orgs : logger . info ( self . __log_prefix + " Quarter: " + str ( quarter ) + " Org: " + org_name ) s = self . __build_search ( date_range , org_name = org_name ) response = s . execute ( ) for timing in response . aggregations [ self . TIMEFRAME ] . buckets : yield self . __build_dataframe ( timing , org_name = org_name ) . copy ( ) # Get project specific data for project in projects : logger . info ( self . __log_prefix + " Quarter: " + str ( quarter ) + " Project: " + project ) # Global project s = self . __build_search ( date_range , project_name = project ) response = s . execute ( ) for timing in response . aggregations [ self . TIMEFRAME ] . buckets : yield self . __build_dataframe ( timing , project_name = project ) . copy ( ) # Split by Org for org_name in orgs : logger . info ( self . __log_prefix + " Quarter: " + str ( quarter ) + " Project: " + project + " Org: " + org_name ) s = self . __build_search ( date_range , project_name = project , org_name = org_name ) response = s . execute ( ) for timing in response . aggregations [ self . TIMEFRAME ] . buckets : yield self . __build_dataframe ( timing , project_name = project , org_name = org_name ) . copy ( )
Read author commits by Quarter Org and Project .
551
10
18,169
def __quarters ( self , from_date = None ) : s = Search ( using = self . _es_conn , index = self . _es_index ) if from_date : # Work around to solve conversion problem of '__' to '.' in field name q = Q ( 'range' ) q . __setattr__ ( self . _sort_on_field , { 'gte' : from_date } ) s = s . filter ( q ) # from:to parameters (=> from: 0, size: 0) s = s [ 0 : 0 ] s . aggs . bucket ( self . TIMEFRAME , 'date_histogram' , field = self . _timeframe_field , interval = 'quarter' , min_doc_count = 1 ) response = s . execute ( ) quarters = [ ] for quarter in response . aggregations [ self . TIMEFRAME ] . buckets : period = pandas . Period ( quarter . key_as_string , 'Q' ) quarters . append ( period ) return quarters
Get a set of quarters with available items from a given index date .
222
14
18,170
def __list_uniques ( self , date_range , field_name ) : # Get project list s = Search ( using = self . _es_conn , index = self . _es_index ) s = s . filter ( 'range' , * * date_range ) # from:to parameters (=> from: 0, size: 0) s = s [ 0 : 0 ] s . aggs . bucket ( 'uniques' , 'terms' , field = field_name , size = 1000 ) response = s . execute ( ) uniques_list = [ ] for item in response . aggregations . uniques . buckets : uniques_list . append ( item . key ) return uniques_list
Retrieve a list of unique values in a given field within a date range .
152
16
18,171
def __build_dataframe ( self , timing , project_name = None , org_name = None ) : date_list = [ ] uuid_list = [ ] name_list = [ ] contribs_list = [ ] latest_ts_list = [ ] logger . debug ( self . __log_prefix + " timing: " + timing . key_as_string ) for author in timing [ self . AUTHOR_UUID ] . buckets : latest_ts_list . append ( timing [ self . LATEST_TS ] . value_as_string ) date_list . append ( timing . key_as_string ) uuid_list . append ( author . key ) if author [ self . AUTHOR_NAME ] and author [ self . AUTHOR_NAME ] . buckets and len ( author [ self . AUTHOR_NAME ] . buckets ) > 0 : name_list . append ( author [ self . AUTHOR_NAME ] . buckets [ 0 ] . key ) else : name_list . append ( "Unknown" ) contribs_list . append ( author [ self . CONTRIBUTIONS ] . value ) df = pandas . DataFrame ( ) df [ self . TIMEFRAME ] = date_list df [ self . AUTHOR_UUID ] = uuid_list df [ self . AUTHOR_NAME ] = name_list df [ self . CONTRIBUTIONS ] = contribs_list df [ self . TIMESTAMP ] = latest_ts_list if not project_name : project_name = "_Global_" df [ self . PROJECT ] = project_name if not org_name : org_name = "_Global_" df [ self . AUTHOR_ORG ] = org_name return df
Build a DataFrame from a time bucket .
368
9
18,172
def process ( self , items_block ) : logger . info ( self . __log_prefix + " Authors to process: " + str ( len ( items_block ) ) ) onion_enrich = Onion ( items_block ) df_onion = onion_enrich . enrich ( member_column = ESOnionConnector . AUTHOR_UUID , events_column = ESOnionConnector . CONTRIBUTIONS ) # Get and store Quarter as String df_onion [ 'quarter' ] = df_onion [ ESOnionConnector . TIMEFRAME ] . map ( lambda x : str ( pandas . Period ( x , 'Q' ) ) ) # Add metadata: enriched on timestamp df_onion [ 'metadata__enriched_on' ] = datetime . utcnow ( ) . isoformat ( ) df_onion [ 'data_source' ] = self . data_source df_onion [ 'grimoire_creation_date' ] = df_onion [ ESOnionConnector . TIMEFRAME ] logger . info ( self . __log_prefix + " Final new events: " + str ( len ( df_onion ) ) ) return self . ProcessResults ( processed = len ( df_onion ) , out_items = df_onion )
Process a DataFrame to compute Onion .
277
8
18,173
def get_projects ( self ) : repos_list = [ ] gerrit_projects_db = self . projects_db db = Database ( user = "root" , passwd = "" , host = "localhost" , port = 3306 , scrdb = None , shdb = gerrit_projects_db , prjdb = None ) sql = """ SELECT DISTINCT(repository_name) FROM project_repositories WHERE data_source='scr' """ repos_list_raw = db . execute ( sql ) # Convert from review.openstack.org_openstack/rpm-packaging-tools to # openstack_rpm-packaging-tools for repo in repos_list_raw : # repo_name = repo[0].replace("review.openstack.org_","") repo_name = repo [ 0 ] . replace ( self . repository + "_" , "" ) repos_list . append ( repo_name ) return repos_list
Get the projects list from database
215
6
18,174
def metadata ( func ) : @ functools . wraps ( func ) def decorator ( self , * args , * * kwargs ) : eitem = func ( self , * args , * * kwargs ) metadata = { 'metadata__gelk_version' : self . gelk_version , 'metadata__gelk_backend_name' : self . __class__ . __name__ , 'metadata__enriched_on' : datetime_utcnow ( ) . isoformat ( ) } eitem . update ( metadata ) return eitem return decorator
Add metadata to an item .
124
6
18,175
def get_grimoire_fields ( self , creation_date , item_name ) : grimoire_date = None try : grimoire_date = str_to_datetime ( creation_date ) . isoformat ( ) except Exception as ex : pass name = "is_" + self . get_connector_name ( ) + "_" + item_name return { "grimoire_creation_date" : grimoire_date , name : 1 }
Return common grimoire fields for all data sources
100
9
18,176
def add_project_levels ( cls , project ) : eitem_path = '' eitem_project_levels = { } if project is not None : subprojects = project . split ( '.' ) for i in range ( 0 , len ( subprojects ) ) : if i > 0 : eitem_path += "." eitem_path += subprojects [ i ] eitem_project_levels [ 'project_' + str ( i + 1 ) ] = eitem_path return eitem_project_levels
Add project sub levels extra items
111
6
18,177
def get_item_metadata ( self , eitem ) : eitem_metadata = { } # Get the project entry for the item, which includes the metadata project = self . find_item_project ( eitem ) if project and 'meta' in self . json_projects [ project ] : meta_fields = self . json_projects [ project ] [ 'meta' ] if isinstance ( meta_fields , dict ) : eitem_metadata = { CUSTOM_META_PREFIX + "_" + field : value for field , value in meta_fields . items ( ) } return eitem_metadata
In the projects . json file inside each project there is a field called meta which has a dictionary with fields to be added to the enriched items for this project .
131
32
18,178
def get_domain ( self , identity ) : domain = None if identity [ 'email' ] : try : domain = identity [ 'email' ] . split ( "@" ) [ 1 ] except IndexError : # logger.warning("Bad email format: %s" % (identity['email'])) pass return domain
Get the domain from a SH identity
67
7
18,179
def get_enrollment ( self , uuid , item_date ) : # item_date must be offset-naive (utc) if item_date and item_date . tzinfo : item_date = ( item_date - item_date . utcoffset ( ) ) . replace ( tzinfo = None ) enrollments = self . get_enrollments ( uuid ) enroll = self . unaffiliated_group if enrollments : for enrollment in enrollments : if not item_date : enroll = enrollment . organization . name break elif item_date >= enrollment . start and item_date <= enrollment . end : enroll = enrollment . organization . name break return enroll
Get the enrollment for the uuid when the item was done
146
12
18,180
def __get_item_sh_fields_empty ( self , rol , undefined = False ) : # If empty_field is None, the fields do not appear in index patterns empty_field = '' if not undefined else '-- UNDEFINED --' return { rol + "_id" : empty_field , rol + "_uuid" : empty_field , rol + "_name" : empty_field , rol + "_user_name" : empty_field , rol + "_domain" : empty_field , rol + "_gender" : empty_field , rol + "_gender_acc" : None , rol + "_org_name" : empty_field , rol + "_bot" : False }
Return a SH identity with all fields to empty_field
159
11
18,181
def get_item_sh_fields ( self , identity = None , item_date = None , sh_id = None , rol = 'author' ) : eitem_sh = self . __get_item_sh_fields_empty ( rol ) if identity : # Use the identity to get the SortingHat identity sh_ids = self . get_sh_ids ( identity , self . get_connector_name ( ) ) eitem_sh [ rol + "_id" ] = sh_ids . get ( 'id' , '' ) eitem_sh [ rol + "_uuid" ] = sh_ids . get ( 'uuid' , '' ) eitem_sh [ rol + "_name" ] = identity . get ( 'name' , '' ) eitem_sh [ rol + "_user_name" ] = identity . get ( 'username' , '' ) eitem_sh [ rol + "_domain" ] = self . get_identity_domain ( identity ) elif sh_id : # Use the SortingHat id to get the identity eitem_sh [ rol + "_id" ] = sh_id eitem_sh [ rol + "_uuid" ] = self . get_uuid_from_id ( sh_id ) else : # No data to get a SH identity. Return an empty one. return eitem_sh # If the identity does not exists return and empty identity if rol + "_uuid" not in eitem_sh or not eitem_sh [ rol + "_uuid" ] : return self . __get_item_sh_fields_empty ( rol , undefined = True ) # Get the SH profile to use first this data profile = self . get_profile_sh ( eitem_sh [ rol + "_uuid" ] ) if profile : # If name not in profile, keep its old value (should be empty or identity's name field value) eitem_sh [ rol + "_name" ] = profile . get ( 'name' , eitem_sh [ rol + "_name" ] ) email = profile . get ( 'email' , None ) if email : eitem_sh [ rol + "_domain" ] = self . get_email_domain ( email ) eitem_sh [ rol + "_gender" ] = profile . get ( 'gender' , self . unknown_gender ) eitem_sh [ rol + "_gender_acc" ] = profile . get ( 'gender_acc' , 0 ) elif not profile and sh_id : logger . warning ( "Can't find SH identity profile: %s" , sh_id ) # Ensure we always write gender fields if not eitem_sh . get ( rol + "_gender" ) : eitem_sh [ rol + "_gender" ] = self . unknown_gender eitem_sh [ rol + "_gender_acc" ] = 0 eitem_sh [ rol + "_org_name" ] = self . get_enrollment ( eitem_sh [ rol + "_uuid" ] , item_date ) eitem_sh [ rol + "_bot" ] = self . is_bot ( eitem_sh [ rol + '_uuid' ] ) return eitem_sh
Get standard SH fields from a SH identity
715
8
18,182
def get_item_sh ( self , item , roles = None , date_field = None ) : eitem_sh = { } # Item enriched author_field = self . get_field_author ( ) if not roles : roles = [ author_field ] if not date_field : item_date = str_to_datetime ( item [ self . get_field_date ( ) ] ) else : item_date = str_to_datetime ( item [ date_field ] ) users_data = self . get_users_data ( item ) for rol in roles : if rol in users_data : identity = self . get_sh_identity ( item , rol ) eitem_sh . update ( self . get_item_sh_fields ( identity , item_date , rol = rol ) ) if not eitem_sh [ rol + '_org_name' ] : eitem_sh [ rol + '_org_name' ] = SH_UNKNOWN_VALUE if not eitem_sh [ rol + '_name' ] : eitem_sh [ rol + '_name' ] = SH_UNKNOWN_VALUE if not eitem_sh [ rol + '_user_name' ] : eitem_sh [ rol + '_user_name' ] = SH_UNKNOWN_VALUE # Add the author field common in all data sources rol_author = 'author' if author_field in users_data and author_field != rol_author : identity = self . get_sh_identity ( item , author_field ) eitem_sh . update ( self . get_item_sh_fields ( identity , item_date , rol = rol_author ) ) if not eitem_sh [ 'author_org_name' ] : eitem_sh [ 'author_org_name' ] = SH_UNKNOWN_VALUE if not eitem_sh [ 'author_name' ] : eitem_sh [ 'author_name' ] = SH_UNKNOWN_VALUE if not eitem_sh [ 'author_user_name' ] : eitem_sh [ 'author_user_name' ] = SH_UNKNOWN_VALUE return eitem_sh
Add sorting hat enrichment fields for different roles
490
8
18,183
def get_sh_ids ( self , identity , backend_name ) : # Convert the dict to tuple so it is hashable identity_tuple = tuple ( identity . items ( ) ) sh_ids = self . __get_sh_ids_cache ( identity_tuple , backend_name ) return sh_ids
Return the Sorting Hat id and uuid for an identity
68
12
18,184
def get_repository_filter_raw ( self , term = False ) : perceval_backend_name = self . get_connector_name ( ) filter_ = get_repository_filter ( self . perceval_backend , perceval_backend_name , term ) return filter_
Returns the filter to be used in queries in a repository items
68
12
18,185
def set_filter_raw ( self , filter_raw ) : self . filter_raw = filter_raw self . filter_raw_dict = [ ] splitted = re . compile ( FILTER_SEPARATOR ) . split ( filter_raw ) for fltr_raw in splitted : fltr = self . __process_filter ( fltr_raw ) self . filter_raw_dict . append ( fltr )
Filter to be used when getting items from Ocean index
91
10
18,186
def set_filter_raw_should ( self , filter_raw_should ) : self . filter_raw_should = filter_raw_should self . filter_raw_should_dict = [ ] splitted = re . compile ( FILTER_SEPARATOR ) . split ( filter_raw_should ) for fltr_raw in splitted : fltr = self . __process_filter ( fltr_raw ) self . filter_raw_should_dict . append ( fltr )
Bool filter should to be used when getting items from Ocean index
105
13
18,187
def fetch ( self , _filter = None , ignore_incremental = False ) : logger . debug ( "Creating a elastic items generator." ) scroll_id = None page = self . get_elastic_items ( scroll_id , _filter = _filter , ignore_incremental = ignore_incremental ) if not page : return [ ] scroll_id = page [ "_scroll_id" ] scroll_size = page [ 'hits' ] [ 'total' ] if scroll_size == 0 : logger . warning ( "No results found from %s" , self . elastic . anonymize_url ( self . elastic . index_url ) ) return while scroll_size > 0 : logger . debug ( "Fetching from %s: %d received" , self . elastic . anonymize_url ( self . elastic . index_url ) , len ( page [ 'hits' ] [ 'hits' ] ) ) for item in page [ 'hits' ] [ 'hits' ] : eitem = item [ '_source' ] yield eitem page = self . get_elastic_items ( scroll_id , _filter = _filter , ignore_incremental = ignore_incremental ) if not page : break scroll_size = len ( page [ 'hits' ] [ 'hits' ] ) logger . debug ( "Fetching from %s: done receiving" , self . elastic . anonymize_url ( self . elastic . index_url ) )
Fetch the items from raw or enriched index . An optional _filter could be provided to filter the data collected
319
22
18,188
def find_uuid ( es_url , index ) : uid_field = None # Get the first item to detect the data source and raw/enriched type res = requests . get ( '%s/%s/_search?size=1' % ( es_url , index ) ) first_item = res . json ( ) [ 'hits' ] [ 'hits' ] [ 0 ] [ '_source' ] fields = first_item . keys ( ) if 'uuid' in fields : uid_field = 'uuid' else : # Non perceval backend uuid_value = res . json ( ) [ 'hits' ] [ 'hits' ] [ 0 ] [ '_id' ] logging . debug ( "Finding unique id for %s with value %s" , index , uuid_value ) for field in fields : if first_item [ field ] == uuid_value : logging . debug ( "Found unique id for %s: %s" , index , field ) uid_field = field break if not uid_field : logging . error ( "Can not find uid field for %s. Can not copy the index." , index ) logging . error ( "Try to copy it directly with elasticdump or similar." ) sys . exit ( 1 ) return uid_field
Find the unique identifier field for a given index
285
9
18,189
def find_mapping ( es_url , index ) : mapping = None backend = find_perceval_backend ( es_url , index ) if backend : mapping = backend . get_elastic_mappings ( ) if mapping : logging . debug ( "MAPPING FOUND:\n%s" , json . dumps ( json . loads ( mapping [ 'items' ] ) , indent = True ) ) return mapping
Find the mapping given an index
91
6
18,190
def get_elastic_items ( elastic , elastic_scroll_id = None , limit = None ) : scroll_size = limit if not limit : scroll_size = DEFAULT_LIMIT if not elastic : return None url = elastic . index_url max_process_items_pack_time = "5m" # 10 minutes url += "/_search?scroll=%s&size=%i" % ( max_process_items_pack_time , scroll_size ) if elastic_scroll_id : # Just continue with the scrolling url = elastic . url url += "/_search/scroll" scroll_data = { "scroll" : max_process_items_pack_time , "scroll_id" : elastic_scroll_id } res = requests . post ( url , data = json . dumps ( scroll_data ) ) else : query = """ { "query": { "bool": { "must": [] } } } """ logging . debug ( "%s\n%s" , url , json . dumps ( json . loads ( query ) , indent = 4 ) ) res = requests . post ( url , data = query ) rjson = None try : rjson = res . json ( ) except Exception : logging . error ( "No JSON found in %s" , res . text ) logging . error ( "No results found from %s" , url ) return rjson
Get the items from the index
296
6
18,191
def fetch ( elastic , backend , limit = None , search_after_value = None , scroll = True ) : logging . debug ( "Creating a elastic items generator." ) elastic_scroll_id = None search_after = search_after_value while True : if scroll : rjson = get_elastic_items ( elastic , elastic_scroll_id , limit ) else : rjson = get_elastic_items_search ( elastic , search_after , limit ) if rjson and "_scroll_id" in rjson : elastic_scroll_id = rjson [ "_scroll_id" ] if rjson and "hits" in rjson : if not rjson [ "hits" ] [ "hits" ] : break for hit in rjson [ "hits" ] [ "hits" ] : item = hit [ '_source' ] if 'sort' in hit : search_after = hit [ 'sort' ] try : backend . _fix_item ( item ) except Exception : pass yield item else : logging . error ( "No results found from %s" , elastic . index_url ) break return
Fetch the items from raw or enriched index
242
9
18,192
def export_items ( elastic_url , in_index , out_index , elastic_url_out = None , search_after = False , search_after_value = None , limit = None , copy = False ) : if not limit : limit = DEFAULT_LIMIT if search_after_value : search_after_value_timestamp = int ( search_after_value [ 0 ] ) search_after_value_uuid = search_after_value [ 1 ] search_after_value = [ search_after_value_timestamp , search_after_value_uuid ] logging . info ( "Exporting items from %s/%s to %s" , elastic_url , in_index , out_index ) count_res = requests . get ( '%s/%s/_count' % ( elastic_url , in_index ) ) try : count_res . raise_for_status ( ) except requests . exceptions . HTTPError : if count_res . status_code == 404 : logging . error ( "The index does not exists: %s" , in_index ) else : logging . error ( count_res . text ) sys . exit ( 1 ) logging . info ( "Total items to copy: %i" , count_res . json ( ) [ 'count' ] ) # Time to upload the items with the correct mapping elastic_in = ElasticSearch ( elastic_url , in_index ) if not copy : # Create the correct mapping for the data sources detected from in_index ds_mapping = find_mapping ( elastic_url , in_index ) else : logging . debug ( 'Using the input index mapping' ) ds_mapping = extract_mapping ( elastic_url , in_index ) if not elastic_url_out : elastic_out = ElasticSearch ( elastic_url , out_index , mappings = ds_mapping ) else : elastic_out = ElasticSearch ( elastic_url_out , out_index , mappings = ds_mapping ) # Time to just copy from in_index to our_index uid_field = find_uuid ( elastic_url , in_index ) backend = find_perceval_backend ( elastic_url , in_index ) if search_after : total = elastic_out . bulk_upload ( fetch ( elastic_in , backend , limit , search_after_value , scroll = False ) , uid_field ) else : total = elastic_out . bulk_upload ( fetch ( elastic_in , backend , limit ) , uid_field ) logging . info ( "Total items copied: %i" , total )
Export items from in_index to out_index using the correct mapping
573
14
18,193
def _fix_review_dates ( self , item ) : for date_field in [ 'timestamp' , 'createdOn' , 'lastUpdated' ] : if date_field in item . keys ( ) : date_ts = item [ date_field ] item [ date_field ] = unixtime_to_datetime ( date_ts ) . isoformat ( ) if 'patchSets' in item . keys ( ) : for patch in item [ 'patchSets' ] : pdate_ts = patch [ 'createdOn' ] patch [ 'createdOn' ] = unixtime_to_datetime ( pdate_ts ) . isoformat ( ) if 'approvals' in patch : for approval in patch [ 'approvals' ] : adate_ts = approval [ 'grantedOn' ] approval [ 'grantedOn' ] = unixtime_to_datetime ( adate_ts ) . isoformat ( ) if 'comments' in item . keys ( ) : for comment in item [ 'comments' ] : cdate_ts = comment [ 'timestamp' ] comment [ 'timestamp' ] = unixtime_to_datetime ( cdate_ts ) . isoformat ( )
Convert dates so ES detect them
265
7
18,194
def get_sh_identity ( self , item , identity_field = None ) : def fill_list_identity ( identity , user_list_data ) : """ Fill identity with user data in first item in list """ identity [ 'username' ] = user_list_data [ 0 ] [ '__text__' ] if '@' in identity [ 'username' ] : identity [ 'email' ] = identity [ 'username' ] if 'name' in user_list_data [ 0 ] : identity [ 'name' ] = user_list_data [ 0 ] [ 'name' ] return identity identity = { } for field in [ 'name' , 'email' , 'username' ] : # Basic fields in Sorting Hat identity [ field ] = None user = item # by default a specific user dict is used if 'data' in item and type ( item ) == dict : user = item [ 'data' ] [ identity_field ] identity = fill_list_identity ( identity , user ) return identity
Return a Sorting Hat identity using bugzilla user data
219
11
18,195
def analyze ( self ) : from_date = self . _out . latest_date ( ) if from_date : logger . info ( "Reading items since " + from_date ) else : logger . info ( "Reading items since the beginning of times" ) cont = 0 total_processed = 0 total_written = 0 for item_block in self . _in . read_block ( size = self . _block_size , from_date = from_date ) : cont = cont + len ( item_block ) process_results = self . process ( item_block ) total_processed += process_results . processed if len ( process_results . out_items ) > 0 : self . _out . write ( process_results . out_items ) total_written += len ( process_results . out_items ) else : logger . info ( "No new items to be written this time." ) logger . info ( "Items read/to be written/total read/total processed/total written: " "{0}/{1}/{2}/{3}/{4}" . format ( str ( len ( item_block ) ) , str ( len ( process_results . out_items ) ) , str ( cont ) , str ( total_processed ) , str ( total_written ) ) ) logger . info ( "SUMMARY: Items total read/total processed/total written: " "{0}/{1}/{2}" . format ( str ( cont ) , str ( total_processed ) , str ( total_written ) ) ) logger . info ( "This is the end." ) return total_written
Populate an enriched index by processing input items in blocks .
353
12
18,196
def read_item ( self , from_date = None ) : search_query = self . _build_search_query ( from_date ) for hit in helpers . scan ( self . _es_conn , search_query , scroll = '300m' , index = self . _es_index , preserve_order = True ) : yield hit
Read items and return them one by one .
74
9
18,197
def read_block ( self , size , from_date = None ) : search_query = self . _build_search_query ( from_date ) hits_block = [ ] for hit in helpers . scan ( self . _es_conn , search_query , scroll = '300m' , index = self . _es_index , preserve_order = True ) : hits_block . append ( hit ) if len ( hits_block ) % size == 0 : yield hits_block # Reset hits block hits_block = [ ] if len ( hits_block ) > 0 : yield hits_block
Read items and return them in blocks .
128
8
18,198
def write ( self , items ) : if self . _read_only : raise IOError ( "Cannot write, Connector created as Read Only" ) # Uploading info to the new ES docs = [ ] for item in items : doc = { "_index" : self . _es_index , "_type" : "item" , "_id" : item [ "_id" ] , "_source" : item [ "_source" ] } docs . append ( doc ) # TODO exception and error handling helpers . bulk ( self . _es_conn , docs ) logger . info ( self . __log_prefix + " Written: " + str ( len ( docs ) ) )
Upload items to ElasticSearch .
144
6
18,199
def create_alias ( self , alias_name ) : return self . _es_conn . indices . put_alias ( index = self . _es_index , name = alias_name )
Creates an alias pointing to the index configured in this connection
41
12