idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
23,100
def matches_glob_list ( path , glob_list ) : for glob in glob_list : try : if PurePath ( path ) . match ( glob ) : return True except TypeError : pass return False
Given a list of glob patterns returns a boolean indicating if a path matches any glob in the list
45
19
23,101
def get_files_in_current_directory ( file_type ) : local_files = [ ] total_file_size = 0 ignore_list , whitelist = FloydIgnoreManager . get_lists ( ) floyd_logger . debug ( "Ignoring: %s" , ignore_list ) floyd_logger . debug ( "Whitelisting: %s" , whitelist ) file_paths = get_unignored_file_paths ( ignore_list , whitelist ) for file_path in file_paths : local_files . append ( ( file_type , ( unix_style_path ( file_path ) , open ( file_path , 'rb' ) , 'text/plain' ) ) ) total_file_size += os . path . getsize ( file_path ) return ( local_files , total_file_size )
Gets the list of files in the current directory and subdirectories . Respects . floydignore file if present
191
24
23,102
def __get_nfiles_to_compress ( self ) : floyd_logger . info ( "Get number of files to compress... (this could take a few seconds)" ) paths = [ self . source_dir ] try : # Traverse each subdirs of source_dir and count files/dirs while paths : path = paths . pop ( ) for item in scandir ( path ) : if item . is_dir ( ) : paths . append ( item . path ) self . __files_to_compress += 1 elif item . is_file ( ) : self . __files_to_compress += 1 except OSError as e : # OSError: [Errno 13] Permission denied if e . errno == errno . EACCES : self . source_dir = os . getcwd ( ) if self . source_dir == '.' else self . source_dir # Expand cwd sys . exit ( ( "Permission denied. Make sure to have read permission " "for all the files and directories in the path: %s" ) % ( self . source_dir ) ) floyd_logger . info ( "Compressing %d files" , self . __files_to_compress )
Return the number of files to compress
272
7
23,103
def create_tarfile ( self ) : floyd_logger . info ( "Compressing data..." ) # Show progress bar (file_compressed/file_to_compress) self . __compression_bar = ProgressBar ( expected_size = self . __files_to_compress , filled_char = '=' ) # Auxiliary functions def dfilter_file_counter ( tarinfo ) : """ Dummy filter function used to track the progression at file levels. """ self . __compression_bar . show ( self . __files_compressed ) self . __files_compressed += 1 return tarinfo def warn_purge_exit ( info_msg , filename , progress_bar , exit_msg ) : """ Warn the user that's something went wrong, remove the tarball and provide an exit message. """ progress_bar . done ( ) floyd_logger . info ( info_msg ) rmtree ( os . path . dirname ( filename ) ) sys . exit ( exit_msg ) try : # Define the default signal handler for catching: Ctrl-C signal . signal ( signal . SIGINT , signal . default_int_handler ) with tarfile . open ( self . filename , "w:gz" ) as tar : tar . add ( self . source_dir , arcname = os . path . basename ( self . source_dir ) , filter = dfilter_file_counter ) self . __compression_bar . done ( ) except ( OSError , IOError ) as e : # OSError: [Errno 13] Permission denied if e . errno == errno . EACCES : self . source_dir = os . getcwd ( ) if self . source_dir == '.' else self . source_dir # Expand cwd warn_purge_exit ( info_msg = "Permission denied. Removing compressed data..." , filename = self . filename , progress_bar = self . __compression_bar , exit_msg = ( "Permission denied. Make sure to have read permission " "for all the files and directories in the path: %s" ) % ( self . source_dir ) ) # OSError: [Errno 28] No Space Left on Device (IOError on python2.7) elif e . errno == errno . ENOSPC : dir_path = os . path . dirname ( self . filename ) warn_purge_exit ( info_msg = "No space left. Removing compressed data..." , filename = self . filename , progress_bar = self . __compression_bar , exit_msg = ( "No space left when compressing your data in: %s.\n" "Make sure to have enough space before uploading your data." ) % ( os . path . abspath ( dir_path ) ) ) except KeyboardInterrupt : # Purge tarball on Ctrl-C warn_purge_exit ( info_msg = "Ctrl-C signal detected: Removing compressed data..." , filename = self . filename , progress_bar = self . __compression_bar , exit_msg = "Stopped the data upload gracefully." )
Create a tar file with the contents of the current directory
685
11
23,104
def create ( self , data ) : try : floyd_logger . info ( "Making create request to server..." ) post_body = data . to_dict ( ) post_body [ "resumable" ] = True response = self . request ( "POST" , self . url , json = post_body ) return response . json ( ) except BadRequestException as e : if 'Dataset not found, ID' in e . message : floyd_logger . error ( 'Data create: ERROR! Please run "floyd data init DATASET_NAME" before upload.' ) else : floyd_logger . error ( 'Data create: ERROR! %s' , e . message ) return None except FloydException as e : floyd_logger . error ( "Data create: ERROR! %s" , e . message ) return None
Create a temporary directory for the tar file that will be removed at the end of the operation .
185
19
23,105
def get_command_line ( instance_type , env , message , data , mode , open_notebook , command_str ) : floyd_command = [ "floyd" , "run" ] if instance_type : floyd_command . append ( '--' + INSTANCE_NAME_MAP [ instance_type ] ) if env and not env == DEFAULT_ENV : floyd_command += [ "--env" , env ] if message : floyd_command += [ "--message" , shell_quote ( message ) ] if data : for data_item in data : parts = data_item . split ( ':' ) if len ( parts ) > 1 : data_item = normalize_data_name ( parts [ 0 ] , use_data_config = False ) + ':' + parts [ 1 ] floyd_command += [ "--data" , data_item ] if mode and mode != "job" : floyd_command += [ "--mode" , mode ] if mode == 'jupyter' : if not open_notebook : floyd_command . append ( "--no-open" ) else : if command_str : floyd_command . append ( shell_quote ( command_str ) ) return ' ' . join ( floyd_command )
Return a string representing the full floyd command entered in the command line
281
14
23,106
def restart ( ctx , job_name , data , open_notebook , env , message , gpu , cpu , gpup , cpup , command ) : # Error early if more than one --env is passed. Then get the first/only # --env out of the list so all other operations work normally (they don't # expect an iterable). For details on this approach, see the comment above # the --env click option if len ( env ) > 1 : floyd_logger . error ( "You passed more than one environment: {}. Please specify a single environment." . format ( env ) ) sys . exit ( 1 ) env = env [ 0 ] parameters = { } expt_client = ExperimentClient ( ) try : job = expt_client . get ( normalize_job_name ( job_name ) ) except FloydException : job = expt_client . get ( job_name ) if gpu : instance_type = G1_INSTANCE_TYPE elif cpu : instance_type = C1_INSTANCE_TYPE else : instance_type = job . instance_type if instance_type is not None : parameters [ 'instance_type' ] = instance_type else : instance_type = job . instance_type if env is not None : arch = INSTANCE_ARCH_MAP [ instance_type ] if not validate_env ( env , arch ) : sys . exit ( 1 ) parameters [ 'env' ] = env success , data_ids , show_data_info = process_data_ids ( data ) if not success : sys . exit ( 1 ) if data_ids : parameters [ 'data_ids' ] = data_ids if message : parameters [ 'description' ] = message if command : parameters [ 'command' ] = ' ' . join ( command ) floyd_logger . info ( 'Restarting job %s...' , job_name ) new_job_info = expt_client . restart ( job . id , parameters = parameters ) if not new_job_info : floyd_logger . error ( "Failed to restart job" ) sys . exit ( 1 ) show_new_job_info ( expt_client , new_job_info [ 'name' ] , new_job_info , job . mode , open_notebook , show_data_info )
Restart a finished job as a new job .
504
10
23,107
def filter_user ( user , using = 'records' , interaction = None , part_of_week = 'allweek' , part_of_day = 'allday' ) : if using == 'recharges' : records = user . recharges else : records = user . records if interaction == 'callandtext' : records = filter ( lambda r : r . interaction in [ 'call' , 'text' ] , records ) elif interaction is not None : records = filter ( lambda r : r . interaction == interaction , records ) if part_of_week == 'weekday' : records = filter ( lambda r : r . datetime . isoweekday ( ) not in user . weekend , records ) elif part_of_week == 'weekend' : records = filter ( lambda r : r . datetime . isoweekday ( ) in user . weekend , records ) elif part_of_week != 'allweek' : raise KeyError ( "{} is not a valid value for part_of_week. it should be 'weekday', " "'weekend' or 'allweek'." . format ( part_of_week ) ) if user . night_start < user . night_end : night_filter = lambda r : user . night_end > r . datetime . time ( ) > user . night_start else : night_filter = lambda r : not ( user . night_end < r . datetime . time ( ) < user . night_start ) if part_of_day == 'day' : records = filter ( lambda r : not ( night_filter ( r ) ) , records ) elif part_of_day == 'night' : records = filter ( night_filter , records ) elif part_of_day != 'allday' : raise KeyError ( "{} is not a valid value for part_of_day. It should be 'day', 'night' or 'allday'." . format ( part_of_day ) ) return list ( records )
Filter records of a User objects by interaction part of week and day .
437
14
23,108
def positions_binning ( records ) : def get_key ( d ) : return ( d . year , d . day , d . hour , d . minute // 30 ) chunks = itertools . groupby ( records , key = lambda r : get_key ( r . datetime ) ) for _ , items in chunks : positions = [ i . position for i in items ] # Given the low number of positions per chunk of 30 minutes, and # the need for a deterministic value, we use max and not Counter yield max ( positions , key = positions . count )
Bin records by chunks of 30 minutes returning the most prevalent position .
121
14
23,109
def _group_range ( records , method ) : start_date = records [ 0 ] . datetime end_date = records [ - 1 ] . datetime _fun = DATE_GROUPERS [ method ] d = start_date # Day and week use timedelta if method not in [ "month" , "year" ] : def increment ( i ) : return i + timedelta ( * * { method + 's' : 1 } ) elif method == "month" : def increment ( i ) : year , month = divmod ( i . month + 1 , 12 ) if month == 0 : month = 12 year = year - 1 return d . replace ( year = d . year + year , month = month ) elif method == "year" : def increment ( i ) : return d . replace ( year = d . year + 1 ) while _fun ( d ) <= _fun ( end_date ) : yield d d = increment ( d )
Yield the range of all dates between the extrema of a list of records separated by a given time delta .
204
24
23,110
def group_records ( records , groupby = 'week' ) : def _group_date ( records , _fun ) : for _ , chunk in itertools . groupby ( records , key = lambda r : _fun ( r . datetime ) ) : yield list ( chunk ) return _group_date ( records , DATE_GROUPERS [ groupby ] )
Group records by year month week or day .
80
9
23,111
def infer_type ( data ) : if isinstance ( data , ( type ( None ) , numbers . Number ) ) : return 'scalar' if isinstance ( data , SummaryStats ) : return 'summarystats' if hasattr ( data , "__len__" ) : # list or numpy array data = [ x for x in data if x is not None ] if len ( data ) == 0 or isinstance ( data [ 0 ] , numbers . Number ) : return 'distribution_scalar' if isinstance ( data [ 0 ] , SummaryStats ) : return 'distribution_summarystats' raise TypeError ( "{} is not a valid input. It should be a number, a SummaryStats " "object, or None" . format ( data [ 0 ] ) ) raise TypeError ( "{} is not a valid input. It should be a number, a SummaryStats " "object, or a list" . format ( data ) )
Infer the type of objects returned by indicators .
203
10
23,112
def grouping ( f = None , interaction = [ 'call' , 'text' ] , summary = 'default' , user_kwd = False ) : if f is None : return partial ( grouping , user_kwd = user_kwd , interaction = interaction , summary = summary ) def wrapper ( user , groupby = 'week' , interaction = interaction , summary = summary , split_week = False , split_day = False , filter_empty = True , datatype = None , * * kwargs ) : if interaction is None : interaction = [ 'call' , 'text' ] parameters = divide_parameters ( split_week , split_day , interaction ) operations = { 'grouping' : { 'using' : 'records' , 'binning' : False , 'groupby' : groupby , 'filter_empty' : filter_empty , 'divide_by' : parameters } , 'apply' : { 'user_kwd' : user_kwd , 'summary' : summary , 'kwargs' : kwargs } } for i in parameters [ 'interaction' ] : if i not in [ 'callandtext' , 'call' , 'text' , 'location' ] : raise ValueError ( "%s is not a valid interaction value. Only " "'call', 'text', and 'location' are accepted." % i ) return _generic_wrapper ( f , user , operations , datatype ) return advanced_wrap ( f , wrapper )
grouping is a decorator for indicator functions used to simplify the source code .
322
16
23,113
def kurtosis ( data ) : if len ( data ) == 0 : return None num = moment ( data , 4 ) denom = moment ( data , 2 ) ** 2. return num / denom if denom != 0 else 0
Return the kurtosis for data .
50
8
23,114
def skewness ( data ) : if len ( data ) == 0 : return None num = moment ( data , 3 ) denom = moment ( data , 2 ) ** 1.5 return num / denom if denom != 0 else 0.
Returns the skewness of data .
52
8
23,115
def median ( data ) : if len ( data ) == 0 : return None data = sorted ( data ) return float ( ( data [ len ( data ) // 2 ] + data [ ( len ( data ) - 1 ) // 2 ] ) / 2. )
Return the median of numeric data unsing the mean of middle two method . If data is empty 0 is returned .
54
23
23,116
def entropy ( data ) : if len ( data ) == 0 : return None n = sum ( data ) _op = lambda f : f * math . log ( f ) return - sum ( _op ( float ( i ) / n ) for i in data )
Compute the Shannon entropy a measure of uncertainty .
55
10
23,117
def advanced_wrap ( f , wrapper ) : f_sig = list ( inspect . getargspec ( f ) ) wrap_sig = list ( inspect . getargspec ( wrapper ) ) # Update the keyword arguments of the wrapper if f_sig [ 3 ] is None or f_sig [ 3 ] == [ ] : f_sig [ 3 ] , f_kwargs = [ ] , [ ] else : f_kwargs = f_sig [ 0 ] [ - len ( f_sig [ 3 ] ) : ] for key , default in zip ( f_kwargs , f_sig [ 3 ] ) : wrap_sig [ 0 ] . append ( key ) wrap_sig [ 3 ] = wrap_sig [ 3 ] + ( default , ) wrap_sig [ 2 ] = None # Remove kwargs src = "lambda %s: " % ( inspect . formatargspec ( * wrap_sig ) [ 1 : - 1 ] ) new_args = inspect . formatargspec ( wrap_sig [ 0 ] , wrap_sig [ 1 ] , wrap_sig [ 2 ] , f_kwargs , formatvalue = lambda x : '=' + x ) src += 'wrapper%s\n' % new_args decorated = eval ( src , locals ( ) ) decorated . func = f return update_wrapper ( decorated , f )
Wrap a decorated function while keeping the same keyword arguments
301
11
23,118
def percent_records_missing_location ( user , method = None ) : if len ( user . records ) == 0 : return 0. missing_locations = sum ( [ 1 for record in user . records if record . position . _get_location ( user ) is None ] ) return float ( missing_locations ) / len ( user . records )
Return the percentage of records missing a location parameter .
76
10
23,119
def percent_overlapping_calls ( records , min_gab = 300 ) : calls = [ r for r in records if r . interaction == "call" ] if len ( calls ) == 0 : return 0. overlapping_calls = 0 for i , r in enumerate ( calls ) : if i <= len ( calls ) - 2 : if r . datetime + timedelta ( seconds = r . call_duration - min_gab ) >= calls [ i + 1 ] . datetime : overlapping_calls += 1 return ( float ( overlapping_calls ) / len ( calls ) )
Return the percentage of calls that overlap with the next call .
129
12
23,120
def antennas_missing_locations ( user , Method = None ) : unique_antennas = set ( [ record . position . antenna for record in user . records if record . position . antenna is not None ] ) return sum ( [ 1 for antenna in unique_antennas if user . antennas . get ( antenna ) is None ] )
Return the number of antennas missing locations in the records of a given user .
72
15
23,121
def bandicoot_code_signature ( ) : checksum = hashlib . sha1 ( ) for root , dirs , files in os . walk ( MAIN_DIRECTORY ) : for filename in sorted ( files ) : if not filename . endswith ( '.py' ) : continue f_path = os . path . join ( root , filename ) f_size = os . path . getsize ( f_path ) with open ( f_path , 'rb' ) as f : while f . tell ( ) != f_size : checksum . update ( f . read ( 0x40000 ) ) return checksum . hexdigest ( )
Returns a unique hash of the Python source code in the current bandicoot module using the cryptographic hash function SHA - 1 .
143
25
23,122
def supported ( cls , stream = sys . stdout ) : if not stream . isatty ( ) : return False # auto color only on TTYs try : import curses except ImportError : return False else : try : try : return curses . tigetnum ( "colors" ) > 2 except curses . error : curses . setupterm ( ) return curses . tigetnum ( "colors" ) > 2 except : raise # guess false in case of error return False
A class method that returns True if the current platform supports coloring terminal output using this method . Returns False otherwise .
103
22
23,123
def write ( self , text , color ) : color = self . _colors [ color ] self . stream . write ( '\x1b[{}m{}\x1b[0m' . format ( color , text ) )
Write the given text to the stream in the given color .
52
12
23,124
def percent_at_home ( positions , user ) : if not user . has_home : return None total_home = sum ( 1 for p in positions if p == user . home ) return float ( total_home ) / len ( positions ) if len ( positions ) != 0 else 0
The percentage of interactions the user had while he was at home .
61
13
23,125
def entropy_of_antennas ( positions , normalize = False ) : counter = Counter ( p for p in positions ) raw_entropy = entropy ( list ( counter . values ( ) ) ) n = len ( counter ) if normalize and n > 1 : return raw_entropy / math . log ( n ) else : return raw_entropy
The entropy of visited antennas .
76
6
23,126
def churn_rate ( user , summary = 'default' , * * kwargs ) : if len ( user . records ) == 0 : return statistics ( [ ] , summary = summary ) query = { 'groupby' : 'week' , 'divide_by' : OrderedDict ( [ ( 'part_of_week' , [ 'allweek' ] ) , ( 'part_of_day' , [ 'allday' ] ) ] ) , 'using' : 'records' , 'filter_empty' : True , 'binning' : True } rv = grouping_query ( user , query ) weekly_positions = rv [ 0 ] [ 1 ] all_positions = list ( set ( p for l in weekly_positions for p in l ) ) frequencies = { } cos_dist = [ ] for week , week_positions in enumerate ( weekly_positions ) : count = Counter ( week_positions ) total = sum ( count . values ( ) ) frequencies [ week ] = [ count . get ( p , 0 ) / total for p in all_positions ] all_indexes = range ( len ( all_positions ) ) for f_1 , f_2 in pairwise ( list ( frequencies . values ( ) ) ) : num = sum ( f_1 [ a ] * f_2 [ a ] for a in all_indexes ) denom_1 = sum ( f ** 2 for f in f_1 ) denom_2 = sum ( f ** 2 for f in f_2 ) cos_dist . append ( 1 - num / ( denom_1 ** .5 * denom_2 ** .5 ) ) return statistics ( cos_dist , summary = summary )
Computes the frequency spent at every towers each week and returns the distribution of the cosine similarity between two consecutives week .
378
25
23,127
def describe ( self ) : def format_int ( name , n ) : if n == 0 or n == 1 : return "%i %s" % ( n , name [ : - 1 ] ) else : return "%i %s" % ( n , name ) empty_box = Colors . OKGREEN + '[ ]' + Colors . ENDC + ' ' filled_box = Colors . OKGREEN + '[x]' + Colors . ENDC + ' ' if self . start_time is None : print ( empty_box + "No records stored" ) else : print ( ( filled_box + format_int ( "records" , len ( self . records ) ) + " from %s to %s" % ( self . start_time , self . end_time ) ) ) nb_contacts = bc . individual . number_of_contacts ( self , interaction = 'callandtext' , groupby = None ) nb_contacts = nb_contacts [ 'allweek' ] [ 'allday' ] [ 'callandtext' ] if nb_contacts : print ( filled_box + format_int ( "contacts" , nb_contacts ) ) else : print ( empty_box + "No contacts" ) if self . has_attributes : print ( filled_box + format_int ( "attributes" , len ( self . attributes ) ) ) else : print ( empty_box + "No attribute stored" ) if len ( self . antennas ) == 0 : print ( empty_box + "No antenna stored" ) else : print ( filled_box + format_int ( "antennas" , len ( self . antennas ) ) ) if self . has_recharges : print ( filled_box + format_int ( "recharges" , len ( self . recharges ) ) ) else : print ( empty_box + "No recharges" ) if self . has_home : print ( filled_box + "Has home" ) else : print ( empty_box + "No home" ) if self . has_text : print ( filled_box + "Has texts" ) else : print ( empty_box + "No texts" ) if self . has_call : print ( filled_box + "Has calls" ) else : print ( empty_box + "No calls" ) if self . has_network : print ( filled_box + "Has network" ) else : print ( empty_box + "No network" )
Generates a short description of the object and writes it to the standard output .
535
16
23,128
def recompute_home ( self ) : if self . night_start < self . night_end : night_filter = lambda r : self . night_end > r . datetime . time ( ) > self . night_start else : night_filter = lambda r : not ( self . night_end < r . datetime . time ( ) < self . night_start ) # Bin positions by chunks of 30 minutes candidates = list ( positions_binning ( filter ( night_filter , self . _records ) ) ) if len ( candidates ) == 0 : self . home = None else : self . home = Counter ( candidates ) . most_common ( ) [ 0 ] [ 0 ] self . reset_cache ( ) return self . home
Return the antenna where the user spends most of his time at night . None is returned if there are no candidates for a home antenna
158
26
23,129
def set_home ( self , new_home ) : if type ( new_home ) is Position : self . home = new_home elif type ( new_home ) is tuple : self . home = Position ( location = new_home ) else : self . home = Position ( antenna = new_home ) self . reset_cache ( )
Sets the user s home . The argument can be a Position object or a tuple containing location data .
73
21
23,130
def interevent_time_recharges ( recharges ) : time_pairs = pairwise ( r . datetime for r in recharges ) times = [ ( new - old ) . total_seconds ( ) for old , new in time_pairs ] return summary_stats ( times )
Return the distribution of time between consecutive recharges of the user .
63
13
23,131
def percent_pareto_recharges ( recharges , percentage = 0.8 ) : amounts = sorted ( [ r . amount for r in recharges ] , reverse = True ) total_sum = sum ( amounts ) partial_sum = 0 for count , a in enumerate ( amounts ) : partial_sum += a if partial_sum >= percentage * total_sum : break return ( count + 1 ) / len ( recharges )
Percentage of recharges that account for 80% of total recharged amount .
92
16
23,132
def average_balance_recharges ( user , * * kwargs ) : balance = 0 for r1 , r2 in pairwise ( user . recharges ) : # If the range is less than 1 day, cap at 1 balance += r1 . amount * min ( 1 , ( r2 . datetime - r1 . datetime ) . days ) / 2 first_recharge = user . recharges [ 0 ] last_recharge = user . recharges [ - 1 ] duration = ( last_recharge . datetime - first_recharge . datetime ) . days return balance / min ( 1 , duration )
Return the average daily balance estimated from all recharges . We assume a linear usage between two recharges and an empty balance before a recharge .
133
28
23,133
def _round_half_hour ( record ) : k = record . datetime + timedelta ( minutes = - ( record . datetime . minute % 30 ) ) return datetime ( k . year , k . month , k . day , k . hour , k . minute , 0 )
Round a time DOWN to half nearest half - hour .
61
11
23,134
def matrix_index ( user ) : other_keys = sorted ( [ k for k in user . network . keys ( ) if k != user . name ] ) return [ user . name ] + other_keys
Returns the keys associated with each axis of the matrices .
44
12
23,135
def matrix_directed_unweighted ( user ) : matrix = _interaction_matrix ( user , interaction = None ) for a in range ( len ( matrix ) ) : for b in range ( len ( matrix ) ) : if matrix [ a ] [ b ] is not None and matrix [ a ] [ b ] > 0 : matrix [ a ] [ b ] = 1 return matrix
Returns a directed unweighted matrix where an edge exists if there is at least one call or text .
82
21
23,136
def matrix_undirected_weighted ( user , interaction = None ) : matrix = _interaction_matrix ( user , interaction = interaction ) result = [ [ 0 for _ in range ( len ( matrix ) ) ] for _ in range ( len ( matrix ) ) ] for a in range ( len ( matrix ) ) : for b in range ( len ( matrix ) ) : if a != b and matrix [ a ] [ b ] and matrix [ b ] [ a ] : result [ a ] [ b ] = matrix [ a ] [ b ] + matrix [ b ] [ a ] elif matrix [ a ] [ b ] is None or matrix [ b ] [ a ] is None : result [ a ] [ b ] = None else : result [ a ] [ b ] = 0 return result
Returns an undirected weighted matrix for call text and call duration where an edge exists if the relationship is reciprocated .
169
24
23,137
def matrix_undirected_unweighted ( user ) : matrix = matrix_undirected_weighted ( user , interaction = None ) for a , b in combinations ( range ( len ( matrix ) ) , 2 ) : if matrix [ a ] [ b ] is None or matrix [ b ] [ a ] is None : continue if matrix [ a ] [ b ] > 0 and matrix [ b ] [ a ] > 0 : matrix [ a ] [ b ] , matrix [ b ] [ a ] = 1 , 1 return matrix
Returns an undirected unweighted matrix where an edge exists if the relationship is reciprocated .
113
20
23,138
def clustering_coefficient_unweighted ( user ) : matrix = matrix_undirected_unweighted ( user ) closed_triplets = 0 for a , b in combinations ( range ( len ( matrix ) ) , 2 ) : a_b , a_c , b_c = matrix [ a ] [ b ] , matrix [ a ] [ 0 ] , matrix [ b ] [ 0 ] if a_b is None or a_c is None or b_c is None : continue if a_b > 0 and a_c > 0 and b_c > 0 : closed_triplets += 1. d_ego = sum ( matrix [ 0 ] ) return 2 * closed_triplets / ( d_ego * ( d_ego - 1 ) ) if d_ego > 1 else 0
The clustering coefficient of the user in the unweighted undirected ego network .
177
18
23,139
def clustering_coefficient_weighted ( user , interaction = None ) : matrix = matrix_undirected_weighted ( user , interaction = interaction ) weights = [ weight for g in matrix for weight in g if weight is not None ] if len ( weights ) == 0 : return None max_weight = max ( weights ) triplet_weight = 0 for a , b in combinations ( range ( len ( matrix ) ) , 2 ) : a_b , a_c , b_c = matrix [ a ] [ b ] , matrix [ a ] [ 0 ] , matrix [ b ] [ 0 ] if a_b is None or a_c is None or b_c is None : continue if a_b and a_c and b_c : triplet_weight += ( a_b * a_c * b_c ) ** ( 1 / 3 ) / max_weight d_ego = sum ( 1 for i in matrix [ 0 ] if i > 0 ) return 2 * triplet_weight / ( d_ego * ( d_ego - 1 ) ) if d_ego > 1 else 0
The clustering coefficient of the user s weighted undirected network .
242
14
23,140
def assortativity_indicators ( user ) : matrix = matrix_undirected_unweighted ( user ) count_indicator = defaultdict ( int ) total_indicator = defaultdict ( int ) # Use all indicator except reporting variables and attributes ego_indics = all ( user , flatten = True ) ego_indics = { a : value for a , value in ego_indics . items ( ) if a != "name" and a [ : 11 ] != "reporting__" and a [ : 10 ] != "attributes" } for i , u_name in enumerate ( matrix_index ( user ) ) : correspondent = user . network . get ( u_name , None ) # Non reciprocated edge if correspondent is None or u_name == user . name or matrix [ 0 ] [ i ] == 0 : continue neighbor_indics = all ( correspondent , flatten = True ) for a in ego_indics : if ego_indics [ a ] is not None and neighbor_indics [ a ] is not None : total_indicator [ a ] += 1 count_indicator [ a ] += ( ego_indics [ a ] - neighbor_indics [ a ] ) ** 2 assortativity = { } for i in count_indicator : assortativity [ i ] = count_indicator [ i ] / total_indicator [ i ] return assortativity
Computes the assortativity of indicators .
301
9
23,141
def assortativity_attributes ( user ) : matrix = matrix_undirected_unweighted ( user ) neighbors = [ k for k in user . network . keys ( ) if k != user . name ] neighbors_attrbs = { } for i , u_name in enumerate ( matrix_index ( user ) ) : correspondent = user . network . get ( u_name , None ) if correspondent is None or u_name == user . name or matrix [ 0 ] [ i ] == 0 : continue if correspondent . has_attributes : neighbors_attrbs [ correspondent . name ] = correspondent . attributes assortativity = { } for a in user . attributes : total = sum ( 1 for n in neighbors if n in neighbors_attrbs and user . attributes [ a ] == neighbors_attrbs [ n ] [ a ] ) den = sum ( 1 for n in neighbors if n in neighbors_attrbs ) assortativity [ a ] = total / den if den != 0 else None return assortativity
Computes the assortativity of the nominal attributes .
215
11
23,142
def network_sampling ( n , filename , directory = None , snowball = False , user = None ) : if snowball : if user is None : raise ValueError ( "Must specify a starting user from whom to initiate the snowball" ) else : users , agenda = [ user ] , [ user ] while len ( agenda ) > 0 : parent = agenda . pop ( ) dealphebetized_network = sorted ( parent . network . items ( ) , key = lambda k : random . random ( ) ) for neighbor in dealphebetized_network : if neighbor [ 1 ] not in users and neighbor [ 1 ] is not None and len ( users ) < n : users . append ( neighbor [ 1 ] ) if neighbor [ 1 ] . network : agenda . push ( neighbor [ 1 ] ) else : files = [ x for x in os . listdir ( directory ) if os . path . isfile ( os . path . join ( directory , x ) ) ] shuffled_files = sorted ( files , key = lambda k : random . random ( ) ) user_names = shuffled_files [ : n ] users = [ bc . read_csv ( u [ : - 4 ] , directory ) for u in user_names ] if len ( users ) < n : raise ValueError ( "Specified more users than records that exist, only {} records available" . format ( len ( users ) ) ) bc . to_csv ( [ bc . utils . all ( u ) for u in users ] , filename )
Selects a few users and exports a CSV of indicators for them .
318
14
23,143
def export ( user , directory = None , warnings = True ) : # Get dashboard directory current_file = os . path . realpath ( __file__ ) current_path = os . path . dirname ( current_file ) dashboard_path = os . path . join ( current_path , 'dashboard_src' ) # Create a temporary directory if needed and copy all files if directory : dirpath = directory else : dirpath = tempfile . mkdtemp ( ) # Copy all files except source code copy_tree ( dashboard_path + '/public' , dirpath , update = 1 ) # Export indicators data = user_data ( user ) bc . io . to_json ( data , dirpath + '/data/bc_export.json' , warnings = False ) if warnings : print ( "Successfully exported the visualization to %s" % dirpath ) return dirpath
Build a temporary directory with the visualization . Returns the local path where files have been written .
186
18
23,144
def run ( user , port = 4242 ) : owd = os . getcwd ( ) dir = export ( user ) os . chdir ( dir ) Handler = SimpleHTTPServer . SimpleHTTPRequestHandler try : httpd = SocketServer . TCPServer ( ( "" , port ) , Handler ) print ( "Serving bandicoot visualization at http://0.0.0.0:%i" % port ) httpd . serve_forever ( ) except KeyboardInterrupt : print ( "^C received, shutting down the web server" ) httpd . server_close ( ) finally : os . chdir ( owd )
Build a temporary directory with a visualization and serve it over HTTP .
137
13
23,145
def to_csv ( objects , filename , digits = 5 , warnings = True ) : if not isinstance ( objects , list ) : objects = [ objects ] data = [ flatten ( obj ) for obj in objects ] all_keys = [ d for datum in data for d in datum . keys ( ) ] field_names = sorted ( set ( all_keys ) , key = lambda x : all_keys . index ( x ) ) with open ( filename , 'w' ) as f : w = csv . writer ( f ) w . writerow ( field_names ) def make_repr ( item ) : if item is None : return None elif isinstance ( item , float ) : return repr ( round ( item , digits ) ) else : return str ( item ) for row in data : row = dict ( ( k , make_repr ( v ) ) for k , v in row . items ( ) ) w . writerow ( [ make_repr ( row . get ( k , None ) ) for k in field_names ] ) if warnings : print ( "Successfully exported {} object(s) to {}" . format ( len ( objects ) , filename ) )
Export the flatten indicators of one or several users to CSV .
254
13
23,146
def to_json ( objects , filename , warnings = True ) : if not isinstance ( objects , list ) : objects = [ objects ] obj_dict = OrderedDict ( [ ( obj [ 'name' ] , obj ) for obj in objects ] ) with open ( filename , 'w' ) as f : f . write ( dumps ( obj_dict , indent = 4 , separators = ( ',' , ': ' ) ) ) if warnings : print ( "Successfully exported {} object(s) to {}" . format ( len ( objects ) , filename ) )
Export the indicators of one or several users to JSON .
122
11
23,147
def _parse_record ( data , duration_format = 'seconds' ) : def _map_duration ( s ) : if s == '' : return None elif duration_format . lower ( ) == 'seconds' : return int ( s ) else : t = time . strptime ( s , duration_format ) return 3600 * t . tm_hour + 60 * t . tm_min + t . tm_sec def _map_position ( data ) : antenna = Position ( ) if 'antenna_id' in data and data [ 'antenna_id' ] : antenna . antenna = data [ 'antenna_id' ] if 'place_id' in data : raise NameError ( "Use field name 'antenna_id' in input files. " "'place_id' is deprecated." ) if 'latitude' in data and 'longitude' in data : latitude = data [ 'latitude' ] longitude = data [ 'longitude' ] # latitude and longitude should not be empty strings. if latitude and longitude : antenna . location = float ( latitude ) , float ( longitude ) return antenna return Record ( interaction = data [ 'interaction' ] if data [ 'interaction' ] else None , direction = data [ 'direction' ] , correspondent_id = data [ 'correspondent_id' ] , datetime = _tryto ( lambda x : datetime . strptime ( x , "%Y-%m-%d %H:%M:%S" ) , data [ 'datetime' ] ) , call_duration = _tryto ( _map_duration , data [ 'call_duration' ] ) , position = _tryto ( _map_position , data ) )
Parse a raw data dictionary and return a Record object .
376
12
23,148
def filter_record ( records ) : def scheme ( r ) : if r . interaction is None : call_duration_ok = True elif r . interaction == 'call' : call_duration_ok = isinstance ( r . call_duration , ( int , float ) ) else : call_duration_ok = True callandtext = r . interaction in [ 'call' , 'text' ] not_callandtext = not callandtext return { 'interaction' : r . interaction in [ 'call' , 'text' , 'gps' , None ] , 'direction' : ( not_callandtext and r . direction is None ) or r . direction in [ 'in' , 'out' ] , 'correspondent_id' : not_callandtext or ( r . correspondent_id not in [ None , '' ] ) , 'datetime' : isinstance ( r . datetime , datetime ) , 'call_duration' : call_duration_ok , 'location' : callandtext or r . position . type ( ) is not None } ignored = OrderedDict ( [ ( 'all' , 0 ) , ( 'interaction' , 0 ) , ( 'direction' , 0 ) , ( 'correspondent_id' , 0 ) , ( 'datetime' , 0 ) , ( 'call_duration' , 0 ) , ( 'location' , 0 ) , ] ) bad_records = [ ] def _filter ( records ) : for r in records : valid = True for key , valid_key in scheme ( r ) . items ( ) : if not valid_key : ignored [ key ] += 1 bad_records . append ( r ) # Not breaking, to count all fields with errors valid = False if valid : yield r else : ignored [ 'all' ] += 1 return list ( _filter ( records ) ) , ignored , bad_records
Filter records and remove items with missing or inconsistent fields
409
10
23,149
def read_csv ( user_id , records_path , antennas_path = None , attributes_path = None , recharges_path = None , network = False , duration_format = 'seconds' , describe = True , warnings = True , errors = False , drop_duplicates = False ) : antennas = None if antennas_path is not None : try : with open ( antennas_path , 'r' ) as csv_file : reader = csv . DictReader ( csv_file ) antennas = dict ( ( d [ 'antenna_id' ] , ( float ( d [ 'latitude' ] ) , float ( d [ 'longitude' ] ) ) ) for d in reader ) except IOError : pass user_records = os . path . join ( records_path , user_id + '.csv' ) with open ( user_records , 'r' ) as csv_file : reader = csv . DictReader ( csv_file ) records = [ _parse_record ( r , duration_format ) for r in reader ] attributes = None if attributes_path is not None : user_attributes = os . path . join ( attributes_path , user_id + '.csv' ) attributes = _load_attributes ( user_attributes ) recharges = None if recharges_path is not None : user_recharges = os . path . join ( recharges_path , user_id + '.csv' ) recharges = _load_recharges ( user_recharges ) user , bad_records = load ( user_id , records , antennas , attributes , recharges , antennas_path , attributes_path , recharges_path , describe = False , warnings = warnings , drop_duplicates = drop_duplicates ) # Loads the network if network is True : user . network = _read_network ( user , records_path , attributes_path , read_csv , antennas_path , warnings , drop_duplicates = drop_duplicates ) user . recompute_missing_neighbors ( ) if describe : user . describe ( ) if errors : return user , bad_records return user
Load user records from a CSV file .
468
8
23,150
def interevent_time ( records ) : inter_events = pairwise ( r . datetime for r in records ) inter = [ ( new - old ) . total_seconds ( ) for old , new in inter_events ] return summary_stats ( inter )
The interevent time between two records of the user .
56
12
23,151
def number_of_contacts ( records , direction = None , more = 0 ) : if direction is None : counter = Counter ( r . correspondent_id for r in records ) else : counter = Counter ( r . correspondent_id for r in records if r . direction == direction ) return sum ( 1 for d in counter . values ( ) if d > more )
The number of contacts the user interacted with .
77
9
23,152
def entropy_of_contacts ( records , normalize = False ) : counter = Counter ( r . correspondent_id for r in records ) raw_entropy = entropy ( counter . values ( ) ) n = len ( counter ) if normalize and n > 1 : return raw_entropy / math . log ( n ) else : return raw_entropy
The entropy of the user s contacts .
76
8
23,153
def interactions_per_contact ( records , direction = None ) : if direction is None : counter = Counter ( r . correspondent_id for r in records ) else : counter = Counter ( r . correspondent_id for r in records if r . direction == direction ) return summary_stats ( counter . values ( ) )
The number of interactions a user had with each of its contacts .
66
13
23,154
def percent_initiated_interactions ( records , user ) : if len ( records ) == 0 : return 0 initiated = sum ( 1 for r in records if r . direction == 'out' ) return initiated / len ( records )
The percentage of calls initiated by the user .
50
9
23,155
def percent_nocturnal ( records , user ) : if len ( records ) == 0 : return 0 if user . night_start < user . night_end : night_filter = lambda d : user . night_end > d . time ( ) > user . night_start else : night_filter = lambda d : not ( user . night_end < d . time ( ) < user . night_start ) return sum ( 1 for r in records if night_filter ( r . datetime ) ) / len ( records )
The percentage of interactions the user had at night .
112
10
23,156
def call_duration ( records , direction = None ) : if direction is None : call_durations = [ r . call_duration for r in records ] else : call_durations = [ r . call_duration for r in records if r . direction == direction ] return summary_stats ( call_durations )
The duration of the user s calls .
67
8
23,157
def _conversations ( group , delta = datetime . timedelta ( hours = 1 ) ) : last_time = None results = [ ] for g in group : if last_time is None or g . datetime - last_time < delta : if g . interaction == 'text' : results . append ( g ) # A call always ends a conversation else : if len ( results ) != 0 : yield results results = [ ] else : if len ( results ) != 0 : yield results if g . interaction == 'call' : results = [ ] else : results = [ g ] last_time = g . datetime if len ( results ) != 0 : yield results
Group texts into conversations . The function returns an iterator over records grouped by conversations .
142
16
23,158
def percent_initiated_conversations ( records ) : interactions = defaultdict ( list ) for r in records : interactions [ r . correspondent_id ] . append ( r ) def _percent_initiated ( grouped ) : mapped = [ ( 1 if conv [ 0 ] . direction == 'out' else 0 , 1 ) for conv in _conversations ( grouped ) ] return mapped all_couples = [ sublist for i in interactions . values ( ) for sublist in _percent_initiated ( i ) ] if len ( all_couples ) == 0 : init , total = 0 , 0 else : init , total = list ( map ( sum , list ( zip ( * all_couples ) ) ) ) return init / total if total != 0 else 0
The percentage of conversations that have been initiated by the user .
170
12
23,159
def active_days ( records ) : days = set ( r . datetime . date ( ) for r in records ) return len ( days )
The number of days during which the user was active . A user is considered active if he sends a text receives a text initiates a call receives a call or has a mobility point .
30
37
23,160
def percent_pareto_interactions ( records , percentage = 0.8 ) : if len ( records ) == 0 : return None user_count = Counter ( r . correspondent_id for r in records ) target = int ( math . ceil ( sum ( user_count . values ( ) ) * percentage ) ) user_sort = sorted ( user_count . keys ( ) , key = lambda x : user_count [ x ] ) while target > 0 and len ( user_sort ) > 0 : user_id = user_sort . pop ( ) target -= user_count [ user_id ] return ( len ( user_count ) - len ( user_sort ) ) / len ( records )
The percentage of user s contacts that account for 80% of its interactions .
150
15
23,161
def number_of_interactions ( records , direction = None ) : if direction is None : return len ( records ) else : return len ( [ r for r in records if r . direction == direction ] )
The number of interactions .
44
5
23,162
def to_csv ( weekmatrices , filename , digits = 5 ) : with open ( filename , 'w' ) as f : w = csv . writer ( f , lineterminator = '\n' ) w . writerow ( [ 'year_week' , 'channel' , 'weekday' , 'section' , 'value' ] ) def make_repr ( item ) : if item is None : return None elif isinstance ( item , float ) : return repr ( round ( item , digits ) ) else : return str ( item ) for row in weekmatrices : w . writerow ( [ make_repr ( item ) for item in row ] )
Exports a list of week - matrices to a specified filename in the CSV format .
145
18
23,163
def read_csv ( filename ) : with open ( filename , 'r' ) as f : r = csv . reader ( f ) next ( r ) # remove header wm = list ( r ) # remove header and convert to numeric for i , row in enumerate ( wm ) : row [ 1 : 4 ] = map ( int , row [ 1 : 4 ] ) row [ 4 ] = float ( row [ 4 ] ) return wm
Read a list of week - matrices from a CSV file .
95
13
23,164
def _extract_list_from_generator ( generator ) : extracted = [ ] for i in generator : extracted . append ( list ( i ) ) return extracted
Iterates over a generator to extract all the objects and add them to a list . Useful when the objects have to be used multiple times .
35
28
23,165
def _seconds_to_section_split ( record , sections ) : next_section = sections [ bisect_right ( sections , _find_weektime ( record . datetime ) ) ] * 60 return next_section - _find_weektime ( record . datetime , time_type = 'sec' )
Finds the seconds to the next section from the datetime of a record .
67
16
23,166
def get_neighbors ( distance_matrix , source , eps ) : return [ dest for dest , distance in enumerate ( distance_matrix [ source ] ) if distance < eps ]
Given a matrix of distance between couples of points return the list of every point closer than eps from a certain point .
43
24
23,167
def fix_location ( records , max_elapsed_seconds = 300 ) : groups = itertools . groupby ( records , lambda r : r . direction ) groups = [ ( interaction , list ( g ) ) for interaction , g in groups ] def tdist ( t1 , t2 ) : return abs ( ( t1 - t2 ) . total_seconds ( ) ) for i , ( interaction , g ) in enumerate ( groups ) : if interaction == 'in' : continue prev_gps = groups [ i - 1 ] [ 1 ] [ - 1 ] next_gps = groups [ i + 1 ] [ 1 ] [ 0 ] for r in g : if tdist ( r . datetime , prev_gps . datetime ) <= max_elapsed_seconds : r . position = prev_gps . position elif tdist ( r . datetime , next_gps . datetime ) <= max_elapsed_seconds : r . position = next_gps . position
Update position of all records based on the position of the closest GPS record .
215
15
23,168
def fetch ( cert , issuer , hash_algo = 'sha1' , nonce = True , user_agent = None , timeout = 10 ) : if not isinstance ( cert , x509 . Certificate ) : raise TypeError ( 'cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name ( cert ) ) if not isinstance ( issuer , x509 . Certificate ) : raise TypeError ( 'issuer must be an instance of asn1crypto.x509.Certificate, not %s' % type_name ( issuer ) ) if hash_algo not in set ( [ 'sha1' , 'sha256' ] ) : raise ValueError ( 'hash_algo must be one of "sha1", "sha256", not %s' % repr ( hash_algo ) ) if not isinstance ( nonce , bool ) : raise TypeError ( 'nonce must be a bool, not %s' % type_name ( nonce ) ) if user_agent is None : user_agent = 'certvalidator %s' % __version__ elif not isinstance ( user_agent , str_cls ) : raise TypeError ( 'user_agent must be a unicode string, not %s' % type_name ( user_agent ) ) cert_id = ocsp . CertId ( { 'hash_algorithm' : algos . DigestAlgorithm ( { 'algorithm' : hash_algo } ) , 'issuer_name_hash' : getattr ( cert . issuer , hash_algo ) , 'issuer_key_hash' : getattr ( issuer . public_key , hash_algo ) , 'serial_number' : cert . serial_number , } ) request = ocsp . Request ( { 'req_cert' : cert_id , } ) tbs_request = ocsp . TBSRequest ( { 'request_list' : ocsp . Requests ( [ request ] ) , } ) if nonce : nonce_extension = ocsp . TBSRequestExtension ( { 'extn_id' : 'nonce' , 'critical' : False , 'extn_value' : core . OctetString ( core . OctetString ( os . urandom ( 16 ) ) . dump ( ) ) } ) tbs_request [ 'request_extensions' ] = ocsp . TBSRequestExtensions ( [ nonce_extension ] ) ocsp_request = ocsp . OCSPRequest ( { 'tbs_request' : tbs_request , } ) last_e = None for ocsp_url in cert . ocsp_urls : try : request = Request ( ocsp_url ) request . add_header ( 'Accept' , 'application/ocsp-response' ) request . add_header ( 'Content-Type' , 'application/ocsp-request' ) request . add_header ( 'User-Agent' , user_agent ) response = urlopen ( request , ocsp_request . dump ( ) , timeout ) ocsp_response = ocsp . OCSPResponse . load ( response . read ( ) ) request_nonce = ocsp_request . nonce_value response_nonce = ocsp_response . nonce_value if request_nonce and response_nonce and request_nonce . native != response_nonce . native : raise errors . OCSPValidationError ( 'Unable to verify OCSP response since the request and response nonces do not match' ) return ocsp_response except ( URLError ) as e : last_e = e raise last_e
Fetches an OCSP response for a certificate
816
10
23,169
def _walk_issuers ( self , path , paths , failed_paths ) : if path . first . signature in self . _ca_lookup : paths . append ( path ) return new_branches = 0 for issuer in self . _possible_issuers ( path . first ) : try : self . _walk_issuers ( path . copy ( ) . prepend ( issuer ) , paths , failed_paths ) new_branches += 1 except ( DuplicateCertificateError ) : pass if not new_branches : failed_paths . append ( path )
Recursively looks through the list of known certificates for the issuer of the certificate specified stopping once the certificate in question is one contained within the CA certs list
125
31
23,170
def _possible_issuers ( self , cert ) : issuer_hashable = cert . issuer . hashable if issuer_hashable not in self . _subject_map : return for issuer in self . _subject_map [ issuer_hashable ] : # Info from the authority key identifier extension can be used to # eliminate possible options when multiple keys with the same # subject exist, such as during a transition, or with cross-signing. if cert . authority_key_identifier and issuer . key_identifier : if cert . authority_key_identifier != issuer . key_identifier : continue elif cert . authority_issuer_serial : if cert . authority_issuer_serial != issuer . issuer_serial : continue yield issuer
Returns a generator that will list all possible issuers for the cert
159
13
23,171
def find_issuer ( self , cert ) : for entry in self : if entry . subject == cert . issuer : if entry . key_identifier and cert . authority_key_identifier : if entry . key_identifier == cert . authority_key_identifier : return entry else : return entry raise LookupError ( 'Unable to find the issuer of the certificate specified' )
Return the issuer of the cert specified as defined by this path
83
12
23,172
def truncate_to ( self , cert ) : cert_index = None for index , entry in enumerate ( self ) : if entry . issuer_serial == cert . issuer_serial : cert_index = index break if cert_index is None : raise LookupError ( 'Unable to find the certificate specified' ) while len ( self ) > cert_index + 1 : self . pop ( ) return self
Remove all certificates in the path after the cert specified
87
10
23,173
def truncate_to_issuer ( self , cert ) : issuer_index = None for index , entry in enumerate ( self ) : if entry . subject == cert . issuer : if entry . key_identifier and cert . authority_key_identifier : if entry . key_identifier == cert . authority_key_identifier : issuer_index = index break else : issuer_index = index break if issuer_index is None : raise LookupError ( 'Unable to find the issuer of the certificate specified' ) while len ( self ) > issuer_index + 1 : self . pop ( ) return self
Remove all certificates in the path after the issuer of the cert specified as defined by this path
131
18
23,174
def copy ( self ) : copy = self . __class__ ( ) copy . _certs = self . _certs [ : ] copy . _cert_hashes = self . _cert_hashes . copy ( ) return copy
Creates a copy of this path
50
7
23,175
def pop ( self ) : last_cert = self . _certs . pop ( ) self . _cert_hashes . remove ( last_cert . issuer_serial ) return self
Removes the last certificate from the path
39
8
23,176
def fetch ( cert , use_deltas = True , user_agent = None , timeout = 10 ) : if not isinstance ( cert , x509 . Certificate ) : raise TypeError ( 'cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name ( cert ) ) if user_agent is None : user_agent = 'certvalidator %s' % __version__ elif not isinstance ( user_agent , str_cls ) : raise TypeError ( 'user_agent must be a unicode string, not %s' % type_name ( user_agent ) ) output = [ ] sources = cert . crl_distribution_points if use_deltas : sources . extend ( cert . delta_crl_distribution_points ) for distribution_point in sources : url = distribution_point . url output . append ( _grab_crl ( user_agent , url , timeout ) ) return output
Fetches the CRLs for a certificate
212
10
23,177
def _grab_crl ( user_agent , url , timeout ) : request = Request ( url ) request . add_header ( 'Accept' , 'application/pkix-crl' ) request . add_header ( 'User-Agent' , user_agent ) response = urlopen ( request , None , timeout ) data = response . read ( ) if pem . detect ( data ) : _ , _ , data = pem . unarmor ( data ) return crl . CertificateList . load ( data )
Fetches a CRL and parses it
111
10
23,178
def fetch_certs ( certificate_list , user_agent = None , timeout = 10 ) : output = [ ] if user_agent is None : user_agent = 'certvalidator %s' % __version__ elif not isinstance ( user_agent , str_cls ) : raise TypeError ( 'user_agent must be a unicode string, not %s' % type_name ( user_agent ) ) for url in certificate_list . issuer_cert_urls : request = Request ( url ) request . add_header ( 'Accept' , 'application/pkix-cert,application/pkcs7-mime' ) request . add_header ( 'User-Agent' , user_agent ) response = urlopen ( request , None , timeout ) content_type = response . headers [ 'Content-Type' ] . strip ( ) response_data = response . read ( ) if content_type == 'application/pkix-cert' : output . append ( x509 . Certificate . load ( response_data ) ) elif content_type == 'application/pkcs7-mime' : signed_data = cms . SignedData . load ( response_data ) if isinstance ( signed_data [ 'certificates' ] , cms . CertificateSet ) : for cert_choice in signed_data [ 'certificates' ] : if cert_choice . name == 'certificate' : output . append ( cert_choice . chosen ) else : raise ValueError ( 'Unknown content type of %s when fetching issuer certificate for CRL' % repr ( content_type ) ) return output
Fetches certificates from the authority information access extension of an asn1crypto . crl . CertificateList object and places them into the cert registry .
353
32
23,179
def validate_usage ( self , key_usage , extended_key_usage = None , extended_optional = False ) : self . _validate_path ( ) validate_usage ( self . _context , self . _certificate , key_usage , extended_key_usage , extended_optional ) return self . _path
Validates the certificate path and that the certificate is valid for the key usage and extended key usage purposes specified .
69
22
23,180
def validate_tls ( self , hostname ) : self . _validate_path ( ) validate_tls_hostname ( self . _context , self . _certificate , hostname ) return self . _path
Validates the certificate path that the certificate is valid for the hostname provided and that the certificate is valid for the purpose of a TLS connection .
48
29
23,181
def crls ( self ) : if not self . _allow_fetching : return self . _crls output = [ ] for issuer_serial in self . _fetched_crls : output . extend ( self . _fetched_crls [ issuer_serial ] ) return output
A list of all cached asn1crypto . crl . CertificateList objects
60
17
23,182
def ocsps ( self ) : if not self . _allow_fetching : return self . _ocsps output = [ ] for issuer_serial in self . _fetched_ocsps : output . extend ( self . _fetched_ocsps [ issuer_serial ] ) return output
A list of all cached asn1crypto . ocsp . OCSPResponse objects
64
19
23,183
def _extract_ocsp_certs ( self , ocsp_response ) : status = ocsp_response [ 'response_status' ] . native if status == 'successful' : response_bytes = ocsp_response [ 'response_bytes' ] if response_bytes [ 'response_type' ] . native == 'basic_ocsp_response' : response = response_bytes [ 'response' ] . parsed if response [ 'certs' ] : for other_cert in response [ 'certs' ] : if self . certificate_registry . add_other_cert ( other_cert ) : self . _revocation_certs [ other_cert . issuer_serial ] = other_cert
Extracts any certificates included with an OCSP response and adds them to the certificate registry
156
18
23,184
def check_validation ( self , cert ) : # CA certs are automatically trusted since they are from the trust list if self . certificate_registry . is_ca ( cert ) and cert . signature not in self . _validate_map : self . _validate_map [ cert . signature ] = ValidationPath ( cert ) return self . _validate_map . get ( cert . signature )
Checks to see if a certificate has been validated and if so returns the ValidationPath used to validate it .
86
23
23,185
def clear_validation ( self , cert ) : if cert . signature in self . _validate_map : del self . _validate_map [ cert . signature ]
Clears the record that a certificate has been validated
37
10
23,186
def _find_cert_in_list ( cert , issuer , certificate_list , crl_issuer ) : revoked_certificates = certificate_list [ 'tbs_cert_list' ] [ 'revoked_certificates' ] cert_serial = cert . serial_number issuer_name = issuer . subject known_extensions = set ( [ 'crl_reason' , 'hold_instruction_code' , 'invalidity_date' , 'certificate_issuer' ] ) last_issuer_name = crl_issuer . subject for revoked_cert in revoked_certificates : # If any unknown critical extensions, the entry can not be used if revoked_cert . critical_extensions - known_extensions : raise NotImplementedError ( ) if revoked_cert . issuer_name and revoked_cert . issuer_name != last_issuer_name : last_issuer_name = revoked_cert . issuer_name if last_issuer_name != issuer_name : continue if revoked_cert [ 'user_certificate' ] . native != cert_serial : continue if not revoked_cert . crl_reason_value : crl_reason = crl . CRLReason ( 'unspecified' ) else : crl_reason = revoked_cert . crl_reason_value return ( revoked_cert [ 'revocation_date' ] , crl_reason ) return ( None , None )
Looks for a cert in the list of revoked certificates
315
10
23,187
def add_child ( self , valid_policy , qualifier_set , expected_policy_set ) : child = PolicyTreeNode ( valid_policy , qualifier_set , expected_policy_set ) child . parent = self self . children . append ( child )
Creates a new PolicyTreeNode as a child of this node
55
13
23,188
def at_depth ( self , depth ) : for child in list ( self . children ) : if depth == 0 : yield child else : for grandchild in child . at_depth ( depth - 1 ) : yield grandchild
Returns a generator yielding all nodes in the tree at a specific depth
47
13
23,189
def walk_up ( self , depth ) : for child in list ( self . children ) : if depth != 0 : for grandchild in child . walk_up ( depth - 1 ) : yield grandchild yield child
Returns a generator yielding all nodes in the tree at a specific depth or above . Yields nodes starting with leaves and traversing up to the root .
45
31
23,190
def clear ( self ) : while not self . _pool . empty ( ) : conn = yield from self . _pool . get ( ) self . _do_close ( conn )
Clear pool connections .
38
4
23,191
def acquire ( self ) : while self . size ( ) == 0 or self . size ( ) < self . _minsize : _conn = yield from self . _create_new_conn ( ) if _conn is None : break self . _pool . put_nowait ( _conn ) conn = None while not conn : _conn = yield from self . _pool . get ( ) if _conn . reader . at_eof ( ) or _conn . reader . exception ( ) : self . _do_close ( _conn ) conn = yield from self . _create_new_conn ( ) else : conn = _conn self . _in_use . add ( conn ) return conn
Acquire connection from the pool or spawn new one if pool maxsize permits .
147
16
23,192
def release ( self , conn ) : self . _in_use . remove ( conn ) if conn . reader . at_eof ( ) or conn . reader . exception ( ) : self . _do_close ( conn ) else : self . _pool . put_nowait ( conn )
Releases connection back to the pool .
62
8
23,193
def get ( self , conn , key , default = None ) : values , _ = yield from self . _multi_get ( conn , key ) return values . get ( key , default )
Gets a single value from the server .
40
9
23,194
def gets ( self , conn , key , default = None ) : values , cas_tokens = yield from self . _multi_get ( conn , key , with_cas = True ) return values . get ( key , default ) , cas_tokens . get ( key )
Gets a single value from the server together with the cas token .
61
14
23,195
def multi_get ( self , conn , * keys ) : values , _ = yield from self . _multi_get ( conn , * keys ) return tuple ( values . get ( key ) for key in keys )
Takes a list of keys and returns a list of values .
45
13
23,196
def stats ( self , conn , args = None ) : # req - stats [additional args]\r\n # resp - STAT <name> <value>\r\n (one per result) # END\r\n if args is None : args = b'' conn . writer . write ( b'' . join ( ( b'stats ' , args , b'\r\n' ) ) ) result = { } resp = yield from conn . reader . readline ( ) while resp != b'END\r\n' : terms = resp . split ( ) if len ( terms ) == 2 and terms [ 0 ] == b'STAT' : result [ terms [ 1 ] ] = None elif len ( terms ) == 3 and terms [ 0 ] == b'STAT' : result [ terms [ 1 ] ] = terms [ 2 ] elif len ( terms ) >= 3 and terms [ 0 ] == b'STAT' : result [ terms [ 1 ] ] = b' ' . join ( terms [ 2 : ] ) else : raise ClientException ( 'stats failed' , resp ) resp = yield from conn . reader . readline ( ) return result
Runs a stats command on the server .
248
9
23,197
def append ( self , conn , key , value , exptime = 0 ) : flags = 0 # TODO: fix when exception removed return ( yield from self . _storage_command ( conn , b'append' , key , value , flags , exptime ) )
Add data to an existing key after existing data
58
9
23,198
def prepend ( self , conn , key , value , exptime = 0 ) : flags = 0 # TODO: fix when exception removed return ( yield from self . _storage_command ( conn , b'prepend' , key , value , flags , exptime ) )
Add data to an existing key before existing data
60
9
23,199
def incr ( self , conn , key , increment = 1 ) : assert self . _validate_key ( key ) resp = yield from self . _incr_decr ( conn , b'incr' , key , increment ) return resp
Command is used to change data for some item in - place incrementing it . The data for the item is treated as decimal representation of a 64 - bit unsigned integer .
53
34