idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
9,800
def download_and_unpack ( self , url , * paths , * * kw ) : with self . temp_download ( url , 'ds.zip' , log = kw . pop ( 'log' , None ) ) as zipp : with TemporaryDirectory ( ) as tmpdir : with zipfile . ZipFile ( zipp . as_posix ( ) ) as zipf : for path in paths : zipf . extract ( as_posix ( path ) , path = tmpdir . as_posix ( ) ) copy ( tmpdir . joinpath ( path ) , self )
Download a zipfile and immediately unpack selected content .
127
11
9,801
def getRanking ( self , profile , sampleFileName = None ) : if sampleFileName != None : candScoresMap = self . getCandScoresMapFromSamplesFile ( profile , sampleFileName ) else : candScoresMap = self . getCandScoresMap ( profile ) # We generate a map that associates each score with the candidates that have that acore. reverseCandScoresMap = dict ( ) for key , value in candScoresMap . items ( ) : if value not in reverseCandScoresMap . keys ( ) : reverseCandScoresMap [ value ] = [ key ] else : reverseCandScoresMap [ value ] . append ( key ) # We sort the scores by either decreasing order or increasing order. if self . maximizeCandScore == True : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) , reverse = True ) else : sortedCandScores = sorted ( reverseCandScoresMap . keys ( ) ) # We put the candidates into our ranking based on the order in which their score appears ranking = [ ] for candScore in sortedCandScores : for cand in reverseCandScoresMap [ candScore ] : ranking . append ( cand ) return ranking
Returns a list of lists that orders all candidates in tiers from best to worst when we use MCMC approximation to compute Bayesian utilities for an election profile .
258
31
9,802
def getCandScoresMap ( self , profile ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) utilities = dict ( ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = 0.0 for i in range ( 0 , self . burnIn ) : V = self . sampleGenerator . getNextSample ( V ) for i in range ( 0 , self . n2 ) : for j in range ( 0 , self . n1 ) : V = self . sampleGenerator . getNextSample ( V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , V ) for cand in profile . candMap . keys ( ) : utilities [ cand ] = utilities [ cand ] / self . n2 return utilities
Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from our sampling of the profile .
186
27
9,803
def getCandScoresMapFromSamplesFile ( self , profile , sampleFileName ) : wmg = profile . getWmg ( True ) # Initialize our list of expected utilities. utilities = dict ( ) for cand in wmg . keys ( ) : utilities [ cand ] = 0.0 # Open the file and skip the lines of meta data in the file and skip samples for burn-in. sampleFile = open ( sampleFileName ) for i in range ( 0 , SAMPLESFILEMETADATALINECOUNT ) : sampleFile . readline ( ) for i in range ( 0 , self . burnIn ) : sampleFile . readline ( ) # We update our utilities as we read the file. numSamples = 0 for i in range ( 0 , self . n2 * self . n1 ) : line = sampleFile . readline ( ) if i % self . n1 != 0 : continue sample = json . loads ( line ) for cand in wmg . keys ( ) : utilities [ cand ] += self . utilityFunction . getUtility ( [ cand ] , sample ) numSamples += 1 sampleFile . close ( ) for key in utilities . keys ( ) : utilities [ key ] = utilities [ key ] / numSamples return utilities
Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from the samples we generated into a file .
268
29
9,804
def printMcmcSamplesToFile ( self , profile , numSamples , outFileName ) : wmg = profile . getWmg ( True ) V = self . getInitialSample ( wmg ) # Print the number of candidates, phi, and the number of samples. outFile = open ( outFileName , 'w' ) outFile . write ( "m," + str ( profile . numCands ) + '\n' ) outFile . write ( "phi," + str ( self . phi ) + '\n' ) outFile . write ( "numSamples," + str ( numSamples ) ) for i in range ( 0 , numSamples ) : V = self . sampleGenerator . getNextSample ( V ) outFile . write ( "\n" + json . dumps ( V ) ) outFile . close ( )
Generate samples to a file .
184
7
9,805
def kendallTau ( self , orderVector , wmgMap ) : discordantPairs = 0.0 for i in itertools . combinations ( orderVector , 2 ) : discordantPairs = discordantPairs + max ( 0 , wmgMap [ i [ 1 ] ] [ i [ 0 ] ] ) return discordantPairs
Given a ranking for a single vote and a wmg for the entire election calculate the kendall - tau distance . a . k . a the number of discordant pairs between the wmg for the vote and the wmg for the election . Currently we expect the vote to be a strict complete ordering over the candidates .
75
66
9,806
def getInitialSample ( self , wmg ) : V = copy . deepcopy ( wmg . keys ( ) ) random . shuffle ( V ) return V
Generate an initial sample for the Markov chain . This function will return a list containing integer representations of each candidate in order of their rank in the current vote from first to last . The list will be a complete strict ordering over the candidates . Initially we rank the candidates in random order .
33
58
9,807
def getInitialSample ( self , wmg ) : cands = range ( len ( wmg ) ) allPairs = itertools . combinations ( cands , 2 ) V = self . createBinaryRelation ( len ( cands ) ) for pair in allPairs : if wmg [ pair [ 0 ] + 1 ] [ pair [ 1 ] + 1 ] > 0 : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 1 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 0 else : V [ pair [ 0 ] ] [ pair [ 1 ] ] = 0 V [ pair [ 1 ] ] [ pair [ 0 ] ] = 1 return V
Generate an initial sample for the Markov chain . This function will return a two - dimensional array of integers such that for each pair of candidates cand1 and cand2 the array contains 1 if more votes rank cand1 above cand2 and 0 otherwise .
146
51
9,808
def filter_input ( keys , raw ) : if len ( keys ) == 1 : if keys [ 0 ] in UI . keys [ 'up' ] : keys [ 0 ] = 'up' elif keys [ 0 ] in UI . keys [ 'down' ] : keys [ 0 ] = 'down' elif len ( keys [ 0 ] ) == 4 and keys [ 0 ] [ 0 ] == 'mouse press' : if keys [ 0 ] [ 1 ] == 4 : keys [ 0 ] = 'up' elif keys [ 0 ] [ 1 ] == 5 : keys [ 0 ] = 'down' return keys
Adds fancy mouse wheel functionality and VI navigation to ListBox
131
11
9,809
def wordlist2cognates ( wordlist , source , expert = 'expert' , ref = 'cogid' ) : for k in wordlist : yield dict ( Form_ID = wordlist [ k , 'lid' ] , ID = k , Form = wordlist [ k , 'ipa' ] , Cognateset_ID = '{0}-{1}' . format ( slug ( wordlist [ k , 'concept' ] ) , wordlist [ k , ref ] ) , Cognate_Detection_Method = expert , Source = source )
Turn a wordlist into a cognate set list using the cldf parameters .
124
17
9,810
def _cldf2wld ( dataset ) : header = [ f for f in dataset . dataset . lexeme_class . fieldnames ( ) if f != 'ID' ] D = { 0 : [ 'lid' ] + [ h . lower ( ) for h in header ] } for idx , row in enumerate ( dataset . objects [ 'FormTable' ] ) : row = deepcopy ( row ) row [ 'Segments' ] = ' ' . join ( row [ 'Segments' ] ) D [ idx + 1 ] = [ row [ 'ID' ] ] + [ row [ h ] for h in header ] return D
Make lingpy - compatible dictinary out of cldf main data .
140
15
9,811
def _cldf2lexstat ( dataset , segments = 'segments' , transcription = 'value' , row = 'parameter_id' , col = 'language_id' ) : D = _cldf2wld ( dataset ) return lingpy . LexStat ( D , segments = segments , transcription = transcription , row = row , col = col )
Read LexStat object from cldf dataset .
79
10
9,812
def _cldf2wordlist ( dataset , row = 'parameter_id' , col = 'language_id' ) : return lingpy . Wordlist ( _cldf2wld ( dataset ) , row = row , col = col )
Read worldist object from cldf dataset .
55
10
9,813
def iter_cognates ( dataset , column = 'Segments' , method = 'turchin' , threshold = 0.5 , * * kw ) : if method == 'turchin' : for row in dataset . objects [ 'FormTable' ] : sounds = '' . join ( lingpy . tokens2class ( row [ column ] , 'dolgo' ) ) if sounds . startswith ( 'V' ) : sounds = 'H' + sounds sounds = '-' . join ( [ s for s in sounds if s != 'V' ] [ : 2 ] ) cogid = slug ( row [ 'Parameter_ID' ] ) + '-' + sounds if '0' not in sounds : yield dict ( Form_ID = row [ 'ID' ] , Form = row [ 'Value' ] , Cognateset_ID = cogid , Cognate_Detection_Method = 'CMM' ) if method in [ 'sca' , 'lexstat' ] : lex = _cldf2lexstat ( dataset ) if method == 'lexstat' : lex . get_scorer ( * * kw ) lex . cluster ( method = method , threshold = threshold , ref = 'cogid' ) for k in lex : yield Cognate ( Form_ID = lex [ k , 'lid' ] , Form = lex [ k , 'value' ] , Cognateset_ID = lex [ k , 'cogid' ] , Cognate_Detection_Method = method + '-t{0:.2f}' . format ( threshold ) )
Compute cognates automatically for a given dataset .
344
10
9,814
def iter_alignments ( dataset , cognate_sets , column = 'Segments' , method = 'library' ) : if not isinstance ( dataset , lingpy . basic . parser . QLCParser ) : wordlist = _cldf2wordlist ( dataset ) cognates = { r [ 'Form_ID' ] : r for r in cognate_sets } wordlist . add_entries ( 'cogid' , 'lid' , lambda x : cognates [ x ] [ 'Cognateset_ID' ] if x in cognates else 0 ) alm = lingpy . Alignments ( wordlist , ref = 'cogid' , row = 'parameter_id' , col = 'language_id' , segments = column . lower ( ) ) alm . align ( method = method ) for k in alm : if alm [ k , 'lid' ] in cognates : cognate = cognates [ alm [ k , 'lid' ] ] cognate [ 'Alignment' ] = alm [ k , 'alignment' ] cognate [ 'Alignment_Method' ] = method else : alm = lingpy . Alignments ( dataset , ref = 'cogid' ) alm . align ( method = method ) for cognate in cognate_sets : idx = cognate [ 'ID' ] or cognate [ 'Form_ID' ] cognate [ 'Alignment' ] = alm [ int ( idx ) , 'alignment' ] cognate [ 'Alignment_Method' ] = 'SCA-' + method
Function computes automatic alignments and writes them to file .
350
12
9,815
def tohdf5 ( input_files , output_file , n_events , conv_times_to_jte , * * kwargs ) : if len ( input_files ) > 1 : cprint ( "Preparing to convert {} files to HDF5." . format ( len ( input_files ) ) ) from km3pipe import Pipeline # noqa from km3pipe . io import GenericPump , HDF5Sink , HDF5MetaData # noqa for input_file in input_files : cprint ( "Converting '{}'..." . format ( input_file ) ) if len ( input_files ) > 1 : output_file = input_file + '.h5' meta_data = kwargs . copy ( ) meta_data [ 'origin' ] = input_file pipe = Pipeline ( ) pipe . attach ( HDF5MetaData , data = meta_data ) pipe . attach ( GenericPump , filenames = input_file , * * kwargs ) pipe . attach ( StatusBar , every = 250 ) if conv_times_to_jte : from km3modules . mc import MCTimeCorrector pipe . attach ( MCTimeCorrector ) pipe . attach ( HDF5Sink , filename = output_file , * * kwargs ) pipe . drain ( n_events ) cprint ( "File '{}' was converted." . format ( input_file ) )
Convert Any file to HDF5 file
313
9
9,816
def update_channels ( self ) : logging . info ( "Updating channels" ) with switch_db ( StreamDefinitionModel , 'hyperstream' ) : for s in StreamDefinitionModel . objects ( ) : try : stream_id = StreamId ( name = s . stream_id . name , meta_data = s . stream_id . meta_data ) except AttributeError as e : raise e logging . debug ( "Processing {}" . format ( stream_id ) ) try : # This can fail if a plugin has been defined by a different instantiation of HyperStream on the same # database. channel = self . get_channel ( s . channel_id ) except ChannelNotFoundError as e : logging . warn ( e ) continue # calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals)) last_accessed = utcnow ( ) last_updated = s . last_updated if s . last_updated else utcnow ( ) if stream_id in channel . streams : if isinstance ( channel , ( AssetsChannel , AssetsFileChannel ) ) : continue raise StreamAlreadyExistsError ( stream_id ) from . import MemoryChannel , DatabaseChannel if isinstance ( channel , MemoryChannel ) : channel . create_stream ( stream_id ) elif isinstance ( channel , DatabaseChannel ) : if channel == self . assets : stream_type = AssetStream else : stream_type = DatabaseStream channel . streams [ stream_id ] = stream_type ( channel = channel , stream_id = stream_id , calculated_intervals = None , # Not required since it's initialised from mongo_model in __init__ last_accessed = last_accessed , last_updated = last_updated , sandbox = s . sandbox , mongo_model = s ) else : logging . warn ( "Unable to parse stream {}" . format ( stream_id ) )
Pulls out all of the stream definitions from the database and populates the channels with stream references
421
19
9,817
def get_tool_class ( self , tool ) : if isinstance ( tool , string_types ) : tool_id = StreamId ( tool ) elif isinstance ( tool , StreamId ) : tool_id = tool else : raise TypeError ( tool ) tool_stream_view = None # Look in the main tool channel first if tool_id in self . tools : tool_stream_view = self . tools [ tool_id ] . window ( ( MIN_DATE , self . tools . up_to_timestamp ) ) else : # Otherwise look through all the channels in the order they were defined for tool_channel in self . tool_channels : if tool_channel == self . tools : continue if tool_id in tool_channel : # noinspection PyTypeChecker tool_stream_view = tool_channel [ tool_id ] . window ( ( MIN_DATE , tool_channel . up_to_timestamp ) ) if tool_stream_view is None : raise ToolNotFoundError ( tool ) # TODO: Use tool versions - here we just take the latest one last = tool_stream_view . last ( ) if last is None : raise ToolNotFoundError ( tool ) return tool_stream_view . last ( ) . value
Gets the actual class which can then be instantiated with its parameters
272
14
9,818
def is_sub_plate ( self , other ) : if all ( v in set ( other . values ) for v in self . values ) : return True if all ( any ( all ( spv in m for spv in v ) for m in map ( set , other . values ) ) for v in self . values ) : return True if other in self . ancestor_plates : # added by MK, but still not sure whether all cases are covered return True return False
Determines if this plate is a sub - plate of another plate - i . e . has the same meta data but a restricted set of values
99
30
9,819
def normalize_value ( value ) : value = str ( value ) value = value . casefold ( ) value = re . sub ( r'\/\s*\d+' , '' , value ) # Remove "/<totaltracks>" from track number. value = re . sub ( r'^0+([0-9]+)' , r'\1' , value ) # Remove leading zero(s) from track number. value = re . sub ( r'^(\d+)\.+' , r'\1' , value ) # Remove dots from track number. value = re . sub ( r'[^\w\s]' , '' , value ) # Remove leading non-word characters. value = re . sub ( r'^the\s+' , '' , value ) # Remove leading "the". value = re . sub ( r'^\s+' , '' , value ) # Remove leading spaces. value = re . sub ( r'\s+$' , '' , value ) # Remove trailing spaces. value = re . sub ( r'\s+' , ' ' , value ) # Reduce multiple spaces to a single space. return value
Normalize metadata value to improve match accuracy .
252
9
9,820
def _init_from_file ( self , filename ) : if not filename . endswith ( "detx" ) : raise NotImplementedError ( 'Only the detx format is supported.' ) self . _open_file ( filename ) self . _extract_comments ( ) self . _parse_header ( ) self . _parse_doms ( ) self . _det_file . close ( )
Create detector from detx file .
88
7
9,821
def _readline ( self , ignore_comments = True ) : while True : line = self . _det_file . readline ( ) if line == '' : return line # To conform the EOF behaviour of .readline() line = line . strip ( ) if line == '' : continue # white-space-only line if line . startswith ( '#' ) : if not ignore_comments : return line else : return line
The next line of the DETX file optionally ignores comments
93
11
9,822
def _extract_comments ( self ) : self . _det_file . seek ( 0 , 0 ) for line in self . _det_file . readlines ( ) : line = line . strip ( ) if line . startswith ( '#' ) : self . add_comment ( line [ 1 : ] )
Retrieve all comments from the file
69
7
9,823
def _parse_header ( self ) : self . print ( "Parsing the DETX header" ) self . _det_file . seek ( 0 , 0 ) first_line = self . _readline ( ) try : self . det_id , self . n_doms = split ( first_line , int ) self . version = 'v1' except ValueError : det_id , self . version = first_line . split ( ) self . det_id = int ( det_id ) validity = self . _readline ( ) . strip ( ) self . valid_from , self . valid_until = split ( validity , float ) raw_utm_info = self . _readline ( ) . strip ( ) . split ( ' ' ) try : self . utm_info = UTMInfo ( * raw_utm_info [ 1 : ] ) except TypeError : log . warning ( "Missing UTM information." ) n_doms = self . _readline ( ) self . n_doms = int ( n_doms )
Extract information from the header of the detector file
224
10
9,824
def dom_positions ( self ) : if not self . _dom_positions : for dom_id in self . dom_ids : mask = self . pmts . dom_id == dom_id pmt_pos = self . pmts [ mask ] . pos pmt_dir = self . pmts [ mask ] . dir centre = intersect_3d ( pmt_pos , pmt_pos - pmt_dir * 10 ) self . _dom_positions [ dom_id ] = centre return self . _dom_positions
The positions of the DOMs calculated from PMT directions .
118
12
9,825
def dom_table ( self ) : if self . _dom_table is None : data = defaultdict ( list ) for dom_id , ( du , floor , _ ) in self . doms . items ( ) : data [ 'dom_id' ] . append ( dom_id ) data [ 'du' ] . append ( du ) data [ 'floor' ] . append ( floor ) dom_position = self . dom_positions [ dom_id ] data [ 'pos_x' ] . append ( dom_position [ 0 ] ) data [ 'pos_y' ] . append ( dom_position [ 1 ] ) data [ 'pos_z' ] . append ( dom_position [ 2 ] ) self . _dom_table = Table ( data , name = 'DOMs' , h5loc = '/dom_table' ) return self . _dom_table
A Table containing DOM attributes
188
5
9,826
def com ( self ) : if self . _com is None : self . _com = np . mean ( self . pmts . pos , axis = 0 ) return self . _com
Center of mass calculated from the mean of the PMT positions
39
12
9,827
def xy_positions ( self ) : if self . _xy_positions is None or len ( self . _xy_positions ) == 0 : xy_pos = [ ] for dom_id , pos in self . dom_positions . items ( ) : if self . domid2floor ( dom_id ) == 1 : xy_pos . append ( np . array ( [ pos [ 0 ] , pos [ 1 ] ] ) ) self . _xy_positions = np . array ( xy_pos ) return self . _xy_positions
XY positions of the DUs given by the DOMs on floor 1 .
123
15
9,828
def translate_detector ( self , vector ) : vector = np . array ( vector , dtype = float ) self . pmts . pos_x += vector [ 0 ] self . pmts . pos_y += vector [ 1 ] self . pmts . pos_z += vector [ 2 ] self . reset_caches ( )
Translate the detector by a given vector
71
8
9,829
def pmt_angles ( self ) : if self . _pmt_angles == [ ] : mask = ( self . pmts . du == 1 ) & ( self . pmts . floor == 1 ) self . _pmt_angles = self . pmts . dir [ mask ] return self . _pmt_angles
A list of PMT directions sorted by PMT channel on DU - 1 floor - 1
69
18
9,830
def ascii ( self ) : comments = '' if self . version == 'v3' : for comment in self . comments : if not comment . startswith ( ' ' ) : comment = ' ' + comment comments += "#" + comment + "\n" if self . version == 'v1' : header = "{det.det_id} {det.n_doms}" . format ( det = self ) else : header = "{det.det_id} {det.version}" . format ( det = self ) header += "\n{0} {1}" . format ( self . valid_from , self . valid_until ) header += "\n" + str ( self . utm_info ) + "\n" header += str ( self . n_doms ) doms = "" for dom_id , ( line , floor , n_pmts ) in self . doms . items ( ) : doms += "{0} {1} {2} {3}\n" . format ( dom_id , line , floor , n_pmts ) for channel_id in range ( n_pmts ) : pmt_idx = self . _pmt_index_by_omkey [ ( line , floor , channel_id ) ] pmt = self . pmts [ pmt_idx ] doms += " {0} {1} {2} {3} {4} {5} {6} {7}" . format ( pmt . pmt_id , pmt . pos_x , pmt . pos_y , pmt . pos_z , pmt . dir_x , pmt . dir_y , pmt . dir_z , pmt . t0 ) if self . version == 'v3' : doms += " {0}" . format ( pmt . status ) doms += "\n" return comments + header + "\n" + doms
The ascii representation of the detector
413
8
9,831
def write ( self , filename ) : with open ( filename , 'w' ) as f : f . write ( self . ascii ) self . print ( "Detector file saved as '{0}'" . format ( filename ) )
Save detx file .
51
5
9,832
def pmt_with_id ( self , pmt_id ) : try : return self . pmts [ self . _pmt_index_by_pmt_id [ pmt_id ] ] except KeyError : raise KeyError ( "No PMT found for ID: {0}" . format ( pmt_id ) )
Get PMT with global pmt_id
73
9
9,833
def get_pmt ( self , dom_id , channel_id ) : du , floor , _ = self . doms [ dom_id ] pmt = self . pmts [ self . _pmt_index_by_omkey [ ( du , floor , channel_id ) ] ] return pmt
Return PMT with DOM ID and DAQ channel ID
67
11
9,834
def convert_mc_times_to_jte_times ( times_mc , evt_timestamp_in_ns , evt_mc_time ) : # needs to be cast to normal ndarray (not recarray), or else we # would get invalid type promotion times_mc = np . array ( times_mc ) . astype ( float ) times_jte = times_mc - evt_timestamp_in_ns + evt_mc_time return times_jte
Function that converts MC times to JTE times .
108
10
9,835
def iexists ( irods_path ) : try : subprocess . check_output ( 'ils {}' . format ( irods_path ) , shell = True , stderr = subprocess . PIPE , ) return True except subprocess . CalledProcessError : return False
Returns True of iRODS path exists otherwise False
63
11
9,836
def token_urlsafe ( nbytes = 32 ) : tok = os . urandom ( nbytes ) return base64 . urlsafe_b64encode ( tok ) . rstrip ( b'=' ) . decode ( 'ascii' )
Return a random URL - safe text string in Base64 encoding .
56
13
9,837
def prettyln ( text , fill = '-' , align = '^' , prefix = '[ ' , suffix = ' ]' , length = 69 ) : text = '{prefix}{0}{suffix}' . format ( text , prefix = prefix , suffix = suffix ) print ( "{0:{fill}{align}{length}}" . format ( text , fill = fill , align = align , length = length ) )
Wrap text in a pretty line with maximum length .
88
11
9,838
def unpack_nfirst ( seq , nfirst ) : iterator = iter ( seq ) for _ in range ( nfirst ) : yield next ( iterator , None ) yield tuple ( iterator )
Unpack the nfrist items from the list and return the rest .
40
15
9,839
def split ( string , callback = None , sep = None ) : if callback is not None : return [ callback ( i ) for i in string . split ( sep ) ] else : return string . split ( sep )
Split the string and execute the callback function on each part .
45
12
9,840
def namedtuple_with_defaults ( typename , field_names , default_values = [ ] ) : the_tuple = collections . namedtuple ( typename , field_names ) the_tuple . __new__ . __defaults__ = ( None , ) * len ( the_tuple . _fields ) if isinstance ( default_values , collections . Mapping ) : prototype = the_tuple ( * * default_values ) else : prototype = the_tuple ( * default_values ) the_tuple . __new__ . __defaults__ = tuple ( prototype ) return the_tuple
Create a namedtuple with default values
136
8
9,841
def remain_file_pointer ( function ) : def wrapper ( * args , * * kwargs ) : """Wrap the function and remain its parameters and return values""" file_obj = args [ - 1 ] old_position = file_obj . tell ( ) return_value = function ( * args , * * kwargs ) file_obj . seek ( old_position , 0 ) return return_value return wrapper
Remain the file pointer position after calling the decorated function
89
11
9,842
def decamelise ( text ) : s = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , text ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s ) . lower ( )
Convert CamelCase to lower_and_underscore .
76
12
9,843
def camelise ( text , capital_first = True ) : def camelcase ( ) : if not capital_first : yield str . lower while True : yield str . capitalize if istype ( text , 'unicode' ) : text = text . encode ( 'utf8' ) c = camelcase ( ) return "" . join ( next ( c ) ( x ) if x else '_' for x in text . split ( "_" ) )
Convert lower_underscore to CamelCase .
94
10
9,844
def colored ( text , color = None , on_color = None , attrs = None , ansi_code = None ) : if os . getenv ( 'ANSI_COLORS_DISABLED' ) is None : if ansi_code is not None : return "\033[38;5;{}m{}\033[0m" . format ( ansi_code , text ) fmt_str = '\033[%dm%s' if color is not None : text = re . sub ( COLORS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( COLORS [ color ] , text ) if on_color is not None : text = re . sub ( HIGHLIGHTS_RE + '(.*?)' + RESET_RE , r'\1' , text ) text = fmt_str % ( HIGHLIGHTS [ on_color ] , text ) if attrs is not None : text = re . sub ( ATTRIBUTES_RE + '(.*?)' + RESET_RE , r'\1' , text ) for attr in attrs : text = fmt_str % ( ATTRIBUTES [ attr ] , text ) return text + RESET else : return text
Colorize text while stripping nested ANSI color sequences .
281
11
9,845
def zero_pad ( m , n = 1 ) : return np . pad ( m , ( n , n ) , mode = 'constant' , constant_values = [ 0 ] )
Pad a matrix with zeros on all sides .
40
10
9,846
def supports_color ( ) : if isnotebook ( ) : return True supported_platform = sys . platform != 'win32' or 'ANSICON' in os . environ is_a_tty = hasattr ( sys . stdout , 'isatty' ) and sys . stdout . isatty ( ) if not supported_platform or not is_a_tty : return False return True
Checks if the terminal supports color .
87
8
9,847
def get_jpp_revision ( via_command = 'JPrint' ) : try : output = subprocess . check_output ( [ via_command , '-v' ] , stderr = subprocess . STDOUT ) except subprocess . CalledProcessError as e : if e . returncode == 1 : output = e . output else : return None except OSError : return None revision = output . decode ( ) . split ( '\n' ) [ 0 ] . split ( ) [ 1 ] . strip ( ) return revision
Retrieves the Jpp revision number
117
8
9,848
def timed_cache ( * * timed_cache_kwargs ) : def _wrapper ( f ) : maxsize = timed_cache_kwargs . pop ( 'maxsize' , 128 ) typed = timed_cache_kwargs . pop ( 'typed' , False ) update_delta = timedelta ( * * timed_cache_kwargs ) # nonlocal workaround to support Python 2 # https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/ d = { 'next_update' : datetime . utcnow ( ) - update_delta } try : f = functools . lru_cache ( maxsize = maxsize , typed = typed ) ( f ) except AttributeError : print ( "LRU caching is not available in Pyton 2.7, " "this will have no effect!" ) pass @ functools . wraps ( f ) def _wrapped ( * args , * * kwargs ) : now = datetime . utcnow ( ) if now >= d [ 'next_update' ] : try : f . cache_clear ( ) except AttributeError : pass d [ 'next_update' ] = now + update_delta return f ( * args , * * kwargs ) return _wrapped return _wrapper
LRU cache decorator with timeout .
297
8
9,849
def _get_point ( self , profile , point ) : cur_points_z = [ p . location . z for p in profile . elements ] try : cur_idx = cur_points_z . index ( point . z ) return profile . elements [ cur_idx ] except ValueError : new_idx = bisect_left ( cur_points_z , point . z ) new_point = Point ( ) new_point . location = sPoint ( point ) new_point . time = profile . time profile . elements . insert ( new_idx , new_point ) return new_point
Finds the given point in the profile or adds it in sorted z order .
131
16
9,850
def _parse_data_array ( self , data_array ) : # decimalSeparator = data_array.encoding.decimalSeparator tokenSeparator = data_array . encoding . tokenSeparator blockSeparator = data_array . encoding . blockSeparator # collapseWhiteSpaces = data_array.encoding.collapseWhiteSpaces data_values = data_array . values lines = [ x for x in data_values . split ( blockSeparator ) if x != "" ] ret_val = [ ] for row in lines : values = row . split ( tokenSeparator ) ret_val . append ( [ float ( v ) if " " not in v . strip ( ) else [ float ( vv ) for vv in v . split ( ) ] for v in values ] ) # transpose into columns return [ list ( x ) for x in zip ( * ret_val ) ]
Parses a general DataArray .
201
8
9,851
def _parse_sensor_record ( self , sensor_data_rec , sensor_info , rem_values ) : val_idx = 0 # @TODO seems there is only a single field in each of these assert len ( sensor_data_rec . field ) == 1 sensor_data_array = sensor_data_rec . field [ 0 ] . content # there is probably not going to be a count in the def, it'll be in the data count = None count_text = sensor_data_array . elementCount . text if count_text : count = int ( count_text . strip ( ) ) if not count : count = int ( rem_values [ val_idx ] ) val_idx += 1 parsed = [ ] for recnum in range ( count ) : cur = [ ] for f in sensor_data_array . elementType . field : cur_val = rem_values [ val_idx ] val_idx += 1 m = Member ( name = f . name , standard = f . content . definition ) if hasattr ( f . content , "uom" ) : m [ "units" ] = f . content . uom try : m [ "value" ] = float ( cur_val ) except ValueError : m [ "value" ] = cur_val if len ( f . quality ) : m [ "quality" ] = [ ] for qual in f . quality : cur_qual = rem_values [ val_idx ] val_idx += 1 # @TODO check this against constraints m [ "quality" ] . append ( cur_qual ) cur . append ( m ) parsed . append ( cur ) return parsed , val_idx
Parses values via sensor data record passed in . Returns parsed values AND how many items it consumed out of rem_values .
365
26
9,852
def execute ( self , debug = False ) : if debug : # Set some default times for execution (debugging) start_time = datetime ( year = 2016 , month = 10 , day = 19 , hour = 12 , minute = 28 , tzinfo = UTC ) duration = timedelta ( seconds = 5 ) end_time = start_time + duration relative_interval = RelativeTimeInterval ( 0 , 0 ) time_interval = TimeInterval ( start_time , end_time ) # workflow_id = "lda_localisation_model_predict" else : duration = 0 # not needed relative_interval = self . hyperstream . config . online_engine . interval time_interval = relative_interval . absolute ( utcnow ( ) ) for _ in range ( self . hyperstream . config . online_engine . iterations ) : if not debug : # if this takes more than x minutes, kill myself signal . alarm ( self . hyperstream . config . online_engine . alarm ) logging . info ( "Online engine starting up." ) # self.hyperstream.workflow_manager.set_requested_intervals(workflow_id, TimeIntervals([time_interval])) self . hyperstream . workflow_manager . set_all_requested_intervals ( TimeIntervals ( [ time_interval ] ) ) self . hyperstream . workflow_manager . execute_all ( ) logging . info ( "Online engine shutting down." ) logging . info ( "" ) sleep ( self . hyperstream . config . online_engine . sleep ) if debug : time_interval += duration else : time_interval = TimeInterval ( time_interval . end , utcnow ( ) + timedelta ( seconds = relative_interval . end ) )
Execute the engine - currently simple executes all workflows .
384
12
9,853
def show ( movie ) : for key , value in sorted ( movie . iteritems ( ) , cmp = metadata_sorter , key = lambda x : x [ 0 ] ) : if isinstance ( value , list ) : if not value : continue other = value [ 1 : ] value = value [ 0 ] else : other = [ ] printer . p ( '<b>{key}</b>: {value}' , key = key , value = value ) for value in other : printer . p ( '{pad}{value}' , value = value , pad = ' ' * ( len ( key ) + 2 ) )
Show the movie metadata .
134
5
9,854
def metadata_sorter ( x , y ) : if x == y : return 0 if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST : return - 1 if METADATA_SORTER_FIRST . index ( x ) < METADATA_SORTER_FIRST . index ( y ) else 1 elif x in METADATA_SORTER_FIRST : return - 1 elif y in METADATA_SORTER_FIRST : return 1 else : if x . startswith ( '_' ) and y . startswith ( '_' ) : return cmp ( x [ 1 : ] , y [ 1 : ] ) elif x . startswith ( '_' ) : return 1 elif y . startswith ( '_' ) : return - 1 else : return cmp ( x , y )
Sort metadata keys by priority .
199
6
9,855
def parse_lines ( log_parsers , fileinp ) : while 1 : logentry = fileinp . readline ( ) if not logentry : break elif not logentry . rstrip ( ) : continue # skip newlines processed = False for lp in log_parsers : if lp . grok ( logentry ) : processed = True if not processed : # error: none of the logparsers worked on the line logger = logging . getLogger ( 'logparser' ) logger . warning ( #'Could not parse line %s, in file %s >>>%s<<<', #fileinp.lineno(), fileinp.filename(), line.rstrip()) 'Could not parse line >>>%s<<<' , logentry . rstrip ( ) ) print ( 'Could not parse line >>>%s<<<' % logentry . rstrip ( ) )
parse lines from the fileinput and send them to the log_parsers
194
16
9,856
def load_commands ( self , parser ) : entrypoints = self . _get_entrypoints ( ) already_loaded = set ( ) for entrypoint in entrypoints : if entrypoint . name not in already_loaded : command_class = entrypoint . load ( ) command_class ( entrypoint . name , self , parser ) . prepare ( ) already_loaded . add ( entrypoint . name )
Load commands of this profile .
86
6
9,857
def copy ( tree , source_filename ) : #_, ext = os.path.splitext(source_filename) filehash = sha1 ( ) with printer . progress ( os . path . getsize ( source_filename ) ) as update : with open ( source_filename , 'rb' ) as fsource : with NamedTemporaryFile ( dir = os . path . join ( tree , '.kolekto' , 'movies' ) , delete = False ) as fdestination : # Copy the source into the temporary destination: while True : buf = fsource . read ( 10 * 1024 ) if not buf : break filehash . update ( buf ) fdestination . write ( buf ) update ( len ( buf ) ) # Rename the file to its final name or raise an error if # the file already exists: dest = os . path . join ( tree , '.kolekto' , 'movies' , filehash . hexdigest ( ) ) if os . path . exists ( dest ) : raise IOError ( 'This file already exists in tree (%s)' % filehash . hexdigest ( ) ) else : os . rename ( fdestination . name , dest ) return filehash . hexdigest ( )
Copy file in tree show a progress bar during operations and return the sha1 sum of copied file .
266
21
9,858
def list_attachments ( fullname ) : parent , filename = os . path . split ( fullname ) filename_without_ext , ext = os . path . splitext ( filename ) attachments = [ ] for found_filename in os . listdir ( parent ) : found_filename_without_ext , _ = os . path . splitext ( found_filename ) if filename_without_ext == found_filename_without_ext and found_filename != filename : attachments . append ( os . path . join ( parent , found_filename ) ) return attachments
List attachment for the specified fullname .
120
8
9,859
def write_to_stream ( self , stream_id , data , sandbox = None ) : if sandbox is not None : raise NotImplementedError if stream_id not in self . streams : raise StreamNotFoundError ( "Stream with id '{}' does not exist" . format ( stream_id ) ) writer = self . get_stream_writer ( self . streams [ stream_id ] ) if isinstance ( data , StreamInstance ) : data = [ data ] for instance in data : if not isinstance ( instance , StreamInstance ) : raise ValueError ( "Expected StreamInstance, got {}" . format ( str ( type ( instance ) ) ) ) writer ( instance )
Write to the stream
147
4
9,860
def _startXTVDNode ( self , name , attrs ) : schemaVersion = attrs . get ( 'schemaVersion' ) validFrom = self . _parseDateTime ( attrs . get ( 'from' ) ) validTo = self . _parseDateTime ( attrs . get ( 'to' ) ) self . _progress . printMsg ( 'Parsing version %s data from %s to %s' % ( schemaVersion , validFrom . strftime ( '%Y/%m/%d' ) , validTo . strftime ( '%Y/%m/%d' ) ) )
Process the start of the top - level xtvd node
135
12
9,861
def startElement ( self , name , attrs ) : self . _contextStack . append ( self . _context ) self . _contentList = [ ] if name in self . _statusDict : self . _itemTag , itemType = self . _statusDict [ name ] self . _progress . startItem ( itemType ) elif name == self . _itemTag : self . _error = False self . _progress . newItem ( ) try : if self . _context == 'root' : if name == 'xtvd' : self . _context = 'xtvd' self . _startXTVDNode ( name , attrs ) elif self . _context == 'xtvd' : self . _context = name elif self . _context == 'stations' : self . _startStationsNode ( name , attrs ) elif self . _context == 'lineups' : self . _startLineupsNode ( name , attrs ) elif self . _context == 'schedules' : self . _startSchedulesNode ( name , attrs ) elif self . _context == 'programs' : self . _startProgramsNode ( name , attrs ) elif self . _context == 'productionCrew' : self . _startProductionCrewNode ( name , attrs ) elif self . _context == 'genres' : self . _startGenresNode ( name , attrs ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True )
Callback run at the start of each XML element
338
9
9,862
def endElement ( self , name ) : content = '' . join ( self . _contentList ) if name == 'xtvd' : self . _progress . endItems ( ) else : try : if self . _context == 'stations' : self . _endStationsNode ( name , content ) elif self . _context == 'lineups' : self . _endLineupsNode ( name , content ) elif self . _context == 'schedules' : self . _endSchedulesNode ( name , content ) elif self . _context == 'programs' : self . _endProgramsNode ( name , content ) elif self . _context == 'productionCrew' : self . _endProductionCrewNode ( name , content ) elif self . _context == 'genres' : self . _endGenresNode ( name , content ) except Exception , e : self . _error = True self . _progress . printMsg ( str ( e ) , error = True ) self . _context = self . _contextStack . pop ( )
Callback run at the end of each XML element
229
9
9,863
def error ( self , msg ) : self . _error = True self . _progress . printMsg ( 'XML parse error: %s' % msg , error = True )
Callback run when a recoverable parsing error occurs
38
9
9,864
def format_all ( format_string , env ) : prepared_env = parse_pattern ( format_string , env , lambda x , y : [ FormatWrapper ( x , z ) for z in y ] ) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product ( * prepared_env . itervalues ( ) ) : format_env = dict ( izip ( prepared_env . iterkeys ( ) , field_values ) ) yield format_string . format ( * * format_env )
Format the input string using each possible combination of lists in the provided environment . Returns a list of formated strings .
122
23
9,865
def preloop ( self ) : Cmd . preloop ( self ) # sets up command completion self . _hist = [ ] # No history yet self . _locals = { } # Initialize execution namespace for user self . _globals = { }
Initialization before prompting user for commands .
55
8
9,866
def execute_tool ( self , stream , interval ) : if interval . end > self . up_to_timestamp : raise ValueError ( 'The stream is not available after ' + str ( self . up_to_timestamp ) + ' and cannot be calculated' ) required_intervals = TimeIntervals ( [ interval ] ) - stream . calculated_intervals if not required_intervals . is_empty : for interval in required_intervals : stream . tool . execute ( stream . input_streams , stream , interval ) stream . calculated_intervals += interval if not stream . required_intervals . is_empty : raise RuntimeError ( 'Tool execution did not cover the specified time interval.' )
Executes the stream s tool over the given time interval
151
11
9,867
def get_or_create_stream ( self , stream_id , try_create = True ) : stream_id = get_stream_id ( stream_id ) if stream_id in self . streams : logging . debug ( "found {}" . format ( stream_id ) ) return self . streams [ stream_id ] elif try_create : # Try to create the stream logging . debug ( "creating {}" . format ( stream_id ) ) return self . create_stream ( stream_id = stream_id )
Helper function to get a stream or create one if it s not already defined
114
15
9,868
def find_streams ( self , * * kwargs ) : found = { } if 'name' in kwargs : name = kwargs . pop ( 'name' ) else : name = None for stream_id , stream in self . streams . items ( ) : if name is not None and stream_id . name != name : continue d = dict ( stream_id . meta_data ) if all ( k in d and d [ k ] == str ( v ) for k , v in kwargs . items ( ) ) : found [ stream_id ] = stream return found
Finds streams with the given meta data values . Useful for debugging purposes .
127
15
9,869
def find_stream ( self , * * kwargs ) : found = list ( self . find_streams ( * * kwargs ) . values ( ) ) if not found : raise StreamNotFoundError ( kwargs ) if len ( found ) > 1 : raise MultipleStreamsFoundError ( kwargs ) return found [ 0 ]
Finds a single stream with the given meta data values . Useful for debugging purposes .
74
17
9,870
def next_blob ( self ) : blob_file = self . blob_file try : preamble = DAQPreamble ( file_obj = blob_file ) except struct . error : raise StopIteration try : data_type = DATA_TYPES [ preamble . data_type ] except KeyError : log . error ( "Unkown datatype: {0}" . format ( preamble . data_type ) ) data_type = 'Unknown' blob = Blob ( ) blob [ data_type ] = None blob [ 'DAQPreamble' ] = preamble if data_type == 'DAQSummaryslice' : daq_frame = DAQSummaryslice ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header elif data_type == 'DAQEvent' : daq_frame = DAQEvent ( blob_file ) blob [ data_type ] = daq_frame blob [ 'DAQHeader' ] = daq_frame . header else : log . warning ( "Skipping DAQ frame with data type code '{0}'." . format ( preamble . data_type ) ) blob_file . seek ( preamble . length - DAQPreamble . size , 1 ) return blob
Get the next frame from file
289
6
9,871
def seek_to_frame ( self , index ) : pointer_position = self . frame_positions [ index ] self . blob_file . seek ( pointer_position , 0 )
Move file pointer to the frame with given index .
39
10
9,872
def _parse_file ( self , file_obj ) : byte_data = file_obj . read ( self . size ) self . _parse_byte_data ( byte_data )
Directly read from file handler .
40
7
9,873
def _parse_summary_frames ( self , file_obj ) : for _ in range ( self . n_summary_frames ) : dom_id = unpack ( '<i' , file_obj . read ( 4 ) ) [ 0 ] dq_status = file_obj . read ( 4 ) # probably dom status? # noqa dom_status = unpack ( '<iiii' , file_obj . read ( 16 ) ) raw_rates = unpack ( 'b' * 31 , file_obj . read ( 31 ) ) pmt_rates = [ self . _get_rate ( value ) for value in raw_rates ] self . summary_frames [ dom_id ] = pmt_rates self . dq_status [ dom_id ] = dq_status self . dom_status [ dom_id ] = dom_status self . dom_rates [ dom_id ] = np . sum ( pmt_rates )
Iterate through the byte data and fill the summary_frames
205
12
9,874
def _get_rate ( self , value ) : if value == 0 : return 0 else : return MINIMAL_RATE_HZ * math . exp ( value * self . _get_factor ( ) )
Return the rate in Hz from the short int value
46
10
9,875
def _parse_triggered_hits ( self , file_obj ) : for _ in range ( self . n_triggered_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] trigger_mask = unpack ( '<Q' , file_obj . read ( 8 ) ) self . triggered_hits . append ( ( dom_id , pmt_id , tdc_time , tot , trigger_mask ) )
Parse and store triggered hits .
160
7
9,876
def _parse_snapshot_hits ( self , file_obj ) : for _ in range ( self . n_snapshot_hits ) : dom_id , pmt_id = unpack ( '<ib' , file_obj . read ( 5 ) ) tdc_time = unpack ( '>I' , file_obj . read ( 4 ) ) [ 0 ] tot = unpack ( '<b' , file_obj . read ( 1 ) ) [ 0 ] self . snapshot_hits . append ( ( dom_id , pmt_id , tdc_time , tot ) )
Parse and store snapshot hits .
133
7
9,877
def runtable ( det_id , n = 5 , run_range = None , compact = False , sep = '\t' , regex = None ) : db = kp . db . DBManager ( ) df = db . run_table ( det_id ) if run_range is not None : try : from_run , to_run = [ int ( r ) for r in run_range . split ( '-' ) ] except ValueError : log . critical ( "Please specify a valid range (e.g. 3100-3200)!" ) raise SystemExit else : df = df [ ( df . RUN >= from_run ) & ( df . RUN <= to_run ) ] if regex is not None : try : re . compile ( regex ) except re . error : log . error ( "Invalid regex!" ) return df = df [ df [ 'RUNSETUPNAME' ] . str . contains ( regex ) | df [ 'RUNSETUPID' ] . str . contains ( regex ) ] if n is not None : df = df . tail ( n ) if compact : df = df [ [ 'RUN' , 'DATETIME' , 'RUNSETUPNAME' ] ] df . to_csv ( sys . stdout , sep = sep )
Print the run table of the last n runs for given detector
276
12
9,878
def _extract_calibration ( xroot ) : names = [ c . text for c in xroot . findall ( ".//Name" ) ] val = [ [ i . text for i in c ] for c in xroot . findall ( ".//Values" ) ] # The fields has to be reindeced, these are the index mappings col_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Column(-)" ) ] ] try : row_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Matrix_Row(-)" ) ] ] except ValueError : row_ic = [ 2 , 2 , 2 , 1 , 1 , 1 , 0 , 0 , 0 ] try : vec_ic = [ int ( v ) for v in val [ names . index ( "AHRS_Vector_Index(-)" ) ] ] except ValueError : vec_ic = [ 2 , 1 , 0 ] Aoff_ix = names . index ( "AHRS_Acceleration_Offset(g/ms^2-)" ) Arot_ix = names . index ( "AHRS_Acceleration_Rotation(-)" ) Hrot_ix = names . index ( "AHRS_Magnetic_Rotation(-)" ) Aoff = np . array ( val [ Aoff_ix ] ) [ vec_ic ] . astype ( float ) Arot = np . array ( val [ Arot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hrot = np . array ( val [ Hrot_ix ] ) . reshape ( 3 , 3 ) [ col_ic , row_ic ] . reshape ( 3 , 3 ) . astype ( float ) Hoff = [ ] for q in 'XYZ' : values = [ ] for t in ( 'Min' , 'Max' ) : ix = names . index ( "AHRS_Magnetic_{}{}(G-)" . format ( q , t ) ) values . append ( float ( val [ ix ] [ 0 ] ) ) Hoff . append ( sum ( values ) / 2. ) Hoff = np . array ( Hoff ) return Aoff , Arot , Hoff , Hrot
Extract AHRS calibration information from XML root .
506
10
9,879
def calibrate ( self ) : now = time . time ( ) dom_ids = self . A . keys ( ) print ( "Calibrating AHRS from median A and H for {} DOMs." . format ( len ( dom_ids ) ) ) calibrations = { } for dom_id in dom_ids : print ( "Calibrating DOM ID {}" . format ( dom_id ) ) clb_upi = self . db . doms . via_dom_id ( dom_id ) . clb_upi ahrs_calib = get_latest_ahrs_calibration ( clb_upi ) if ahrs_calib is None : log . warning ( "AHRS calibration missing for '{}'" . format ( dom_id ) ) continue du , floor , _ = self . detector . doms [ dom_id ] A = np . median ( self . A [ dom_id ] , axis = 0 ) H = np . median ( self . H [ dom_id ] , axis = 0 ) cyaw , cpitch , croll = fit_ahrs ( A , H , * ahrs_calib ) calibrations [ dom_id ] = ( now , du , floor , cyaw , cpitch , croll ) self . A = defaultdict ( list ) self . H = defaultdict ( list ) return calibrations
Calculate yaw pitch and roll from the median of A and H .
295
16
9,880
def humanize_filesize ( value ) : value = float ( value ) if value == 1 : return '1 Byte' elif value < 1024 : return '%d Bytes' % value elif value < 1024 : return '%dB' % value for i , s in enumerate ( SUFFIXES ) : unit = 1024 ** ( i + 2 ) if value < unit : return '%.1f %s' % ( ( 1024 * value / unit ) , s ) return '%.1f %s' % ( ( 1024 * value / unit ) , s )
Return an humanized file size .
122
7
9,881
def format_top ( counter , top = 3 ) : items = islice ( reversed ( sorted ( counter . iteritems ( ) , key = lambda x : x [ 1 ] ) ) , 0 , top ) return u'; ' . join ( u'{g} ({nb})' . format ( g = g , nb = nb ) for g , nb in items )
Format a top .
82
4
9,882
def check_input_stream_count ( expected_number_of_streams ) : def stream_count_decorator ( func ) : def func_wrapper ( * args , * * kwargs ) : self = args [ 0 ] sources = kwargs [ 'sources' ] if 'sources' in kwargs else args [ 1 ] if expected_number_of_streams == 0 : if sources : raise ValueError ( "No input streams expected" ) else : given_number_of_streams = len ( sources ) if sources else 0 if given_number_of_streams != expected_number_of_streams : raise ValueError ( "{} tool takes {} stream(s) as input ({} given)" . format ( self . __class__ . __name__ , expected_number_of_streams , given_number_of_streams ) ) return func ( * args , * * kwargs ) return func_wrapper return stream_count_decorator
Decorator for Tool . _execute that checks the number of input streams
216
15
9,883
def main ( ) : from docopt import docopt args = docopt ( __doc__ , version = kp . version ) kp . logger . set_level ( "km3pipe" , args [ '-d' ] ) pipe = kp . Pipeline ( ) pipe . attach ( kp . io . ch . CHPump , host = args [ 'SOURCE_IP' ] , port = int ( args [ '-p' ] ) , tags = args [ '-m' ] , timeout = int ( args [ '-x' ] ) , max_queue = int ( args [ '-s' ] ) ) pipe . attach ( LigierSender , target_ip = args [ '-t' ] , port = int ( args [ '-q' ] ) ) pipe . drain ( )
The main script
175
3
9,884
def parse ( filename ) : with open ( filename , "rb" ) as data : header , v_major , v_minor , chunk_count = struct . unpack ( "!4sHHI" , data . read ( 12 ) ) assert header == b"ASEF" assert ( v_major , v_minor ) == ( 1 , 0 ) return [ c for c in parser . parse_chunk ( data ) ]
parses a . ase file and returns a list of colors and color groups
94
17
9,885
def dumps ( obj ) : header = b'ASEF' v_major , v_minor = 1 , 0 chunk_count = writer . chunk_count ( obj ) head = struct . pack ( '!4sHHI' , header , v_major , v_minor , chunk_count ) body = b'' . join ( [ writer . chunk_for_object ( c ) for c in obj ] ) return head + body
converts a swatch to bytes suitable for writing
94
10
9,886
def isFullPreferenceOrder ( self , candList ) : # If a candidate is missing from the wmgMap or if there is a pair of candidates for which # there is no value in the wmgMap, then the wmgMap cannot be a full preference order. for cand1 in candList : if cand1 not in self . wmgMap . keys ( ) : return False for cand2 in candList : if cand1 == cand2 : continue if cand2 not in self . wmgMap [ cand1 ] . keys ( ) : return False return True
Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise .
119
22
9,887
def containsTie ( self ) : # If a value of 0 is present in the wmgMap, we assume that it represents a tie. for cand in self . wmgMap . keys ( ) : if 0 in self . wmgMap [ cand ] . values ( ) : return True return False
Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise .
63
21
9,888
def getIncEdgesMap ( self ) : # We calculate the number of incoming edges for each candidate and store it into a dictionary # that associates the number of incoming edges with the candidates with that number. incEdgesMap = dict ( ) for cand1 in self . wmgMap . keys ( ) : incEdgesSum = 0 for cand2 in self . wmgMap [ cand1 ] . keys ( ) : if self . wmgMap [ cand1 ] [ cand2 ] > 0 : incEdgesSum += self . wmgMap [ cand1 ] [ cand2 ] # Check if this is the first candidate associated with this number of associated edges. if incEdgesSum in incEdgesMap . keys ( ) : incEdgesMap [ incEdgesSum ] . append ( cand1 ) else : incEdgesMap [ incEdgesSum ] = [ cand1 ] return incEdgesMap
Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges .
192
25
9,889
def getRankMap ( self ) : # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) rankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] for cand in cands : rankMap [ cand ] = pos pos += 1 return rankMap
Returns a dictionary that associates the integer representation of each candidate with its position in the ranking starting from 1 .
118
21
9,890
def getReverseRankMap ( self ) : # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self . getIncEdgesMap ( ) sortedKeys = sorted ( incEdgesMap . keys ( ) , reverse = True ) reverseRankMap = dict ( ) pos = 1 for key in sortedKeys : cands = incEdgesMap [ key ] reverseRankMap [ pos ] = cands pos += 1 return reverseRankMap
Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position .
119
24
9,891
def histogram ( a , bins ) : if any ( map ( lambda x : x < 0 , diff ( bins ) ) ) : raise ValueError ( 'bins must increase monotonically.' ) try : sa = sorted ( a ) except TypeError : # Perhaps just a single value? Treat as a list and carry on sa = sorted ( [ a ] ) # import numpy as np # nl = np.searchsorted(sa, bins[:-1], 'left') # nr = np.searchsorted(sa, bins[-1], 'right') # nn = np.r_[nl, nr] # # # cl = list(accumulate(Counter(map(lambda x: bisect_left(bins[:-1], x), sa))) # # print("cl") # # print([cl[i] for i in range(len(bins))]) # print("nl") # print(list(nl)) # # print(Counter(map(lambda x: bisect_right([bins[-1]], x), sa))) # print("nr") # print([nr]) # print("nn") # print(list(nn)) # print("hist") # print(list(np.diff(nn))) # print(list(np.histogram(a, bins)[0])) nl = list ( accumulate ( [ Counter ( map ( lambda x : bisect_left ( bins [ : - 1 ] , x ) , sa ) ) [ i ] for i in range ( len ( bins ) - 1 ) ] ) ) # print("nl") # print(nl) nr = Counter ( map ( lambda x : bisect_right ( [ bins [ 1 ] ] , x ) , sa ) ) [ 1 ] # print(nl) # print(nr) n = list ( nl ) + [ nr ] return diff ( n ) , bins
Compute the histogram of a set of data .
409
11
9,892
def deprecation ( self , message , * args , * * kws ) : self . _log ( DEPRECATION , message , args , * * kws )
Show a deprecation warning .
37
7
9,893
def once ( self , message , * args , * * kws ) : # TODO: after py2 support drop, put this into # function signature: identifier=None (between *args and **kws) identifier = kws . pop ( 'identifier' , None ) if identifier is None : caller = getframeinfo ( stack ( ) [ 1 ] [ 0 ] ) identifier = "%s:%d" % ( caller . filename , caller . lineno ) if not hasattr ( self , 'once_dict' ) : self . once_dict = { } if identifier in self . once_dict : return self . once_dict [ identifier ] = True self . _log ( ONCE , message , args , * * kws )
Show a message only once determined by position in source or identifer .
158
14
9,894
def get_logger ( name ) : if name in loggers : return loggers [ name ] logger = logging . getLogger ( name ) logger . propagate = False pre1 , suf1 = hash_coloured_escapes ( name ) if supports_color ( ) else ( '' , '' ) pre2 , suf2 = hash_coloured_escapes ( name + 'salt' ) if supports_color ( ) else ( '' , '' ) formatter = logging . Formatter ( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s' . format ( pre1 , pre2 , suf1 ) ) ch = logging . StreamHandler ( ) ch . setFormatter ( formatter ) logger . addHandler ( ch ) loggers [ name ] = logger logger . once_dict = { } return logger
Helper function to get a logger
190
6
9,895
def get_printer ( name , color = None , ansi_code = None , force_color = False ) : if force_color or supports_color ( ) : if color is None and ansi_code is None : cpre_1 , csuf_1 = hash_coloured_escapes ( name ) cpre_2 , csuf_2 = hash_coloured_escapes ( name + 'salt' ) name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name else : name = colored ( name , color = color , ansi_code = ansi_code ) prefix = name + ': ' def printer ( text ) : print ( prefix + str ( text ) ) return printer
Return a function which prints a message with a coloured name prefix
172
12
9,896
def hash_coloured ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 return colored ( text , ansi_code = ansi_code )
Return a ANSI coloured text based on its hash
57
10
9,897
def hash_coloured_escapes ( text ) : ansi_code = int ( sha256 ( text . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 230 prefix , suffix = colored ( 'SPLIT' , ansi_code = ansi_code ) . split ( 'SPLIT' ) return prefix , suffix
Return the ANSI hash colour prefix and suffix for a given text
80
13
9,898
def tai_timestamp ( ) : timestamp = time . time ( ) date = datetime . utcfromtimestamp ( timestamp ) if date . year < 1972 : return timestamp offset = 10 + timestamp leap_seconds = [ ( 1972 , 1 , 1 ) , ( 1972 , 7 , 1 ) , ( 1973 , 1 , 1 ) , ( 1974 , 1 , 1 ) , ( 1975 , 1 , 1 ) , ( 1976 , 1 , 1 ) , ( 1977 , 1 , 1 ) , ( 1978 , 1 , 1 ) , ( 1979 , 1 , 1 ) , ( 1980 , 1 , 1 ) , ( 1981 , 7 , 1 ) , ( 1982 , 7 , 1 ) , ( 1983 , 7 , 1 ) , ( 1985 , 7 , 1 ) , ( 1988 , 1 , 1 ) , ( 1990 , 1 , 1 ) , ( 1991 , 1 , 1 ) , ( 1992 , 7 , 1 ) , ( 1993 , 7 , 1 ) , ( 1994 , 7 , 1 ) , ( 1996 , 1 , 1 ) , ( 1997 , 7 , 1 ) , ( 1999 , 1 , 1 ) , ( 2006 , 1 , 1 ) , ( 2009 , 1 , 1 ) , ( 2012 , 7 , 1 ) , ( 2015 , 7 , 1 ) , ( 2017 , 1 , 1 ) , ] for idx , leap_date in enumerate ( leap_seconds ) : if leap_date >= ( date . year , date . month , date . day ) : return idx - 1 + offset return len ( leap_seconds ) - 1 + offset
Return current TAI timestamp .
326
6
9,899
def msg ( self , * args , * * kwargs ) : if self . timestamp is None or self . _interval_reached ( ) : self . callback ( * args , * * kwargs ) self . reset ( )
Only execute callback when interval is reached .
51
8