idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
232,900
def from_event ( cls , ion_event ) : if ion_event . value is not None : args , kwargs = cls . _to_constructor_args ( ion_event . value ) else : # if value is None (i.e. this is a container event), args must be empty or initialization of the # underlying container will fail. args , kwargs = ( ) , { } value = cls ( * args , * * kwargs ) value . ion_event = ion_event value . ion_type = ion_event . ion_type value . ion_annotations = ion_event . annotations return value
Constructs the given native extension from the properties of an event .
139
13
232,901
def from_value ( cls , ion_type , value , annotations = ( ) ) : if value is None : value = IonPyNull ( ) else : args , kwargs = cls . _to_constructor_args ( value ) value = cls ( * args , * * kwargs ) value . ion_event = None value . ion_type = ion_type value . ion_annotations = annotations return value
Constructs a value as a copy with an associated Ion type and annotations .
93
15
232,902
def to_event ( self , event_type , field_name = None , depth = None ) : if self . ion_event is None : value = self if isinstance ( self , IonPyNull ) : value = None self . ion_event = IonEvent ( event_type , ion_type = self . ion_type , value = value , field_name = field_name , annotations = self . ion_annotations , depth = depth ) return self . ion_event
Constructs an IonEvent from this _IonNature value .
102
13
232,903
def _remove_sig ( signature , idempotent = False ) : try : signaturep = next ( signature . iterancestors ( ) ) except StopIteration : if idempotent : return raise ValueError ( "Can't remove the root signature node" ) if signature . tail is not None : try : signatures = next ( signature . itersiblings ( preceding = True ) ) except StopIteration : if signaturep . text is not None : signaturep . text = signaturep . text + signature . tail else : signaturep . text = signature . tail else : if signatures . tail is not None : signatures . tail = signatures . tail + signature . tail else : signatures . tail = signature . tail signaturep . remove ( signature )
Remove the signature node from its parent keeping any tail element . This is needed for eneveloped signatures .
158
22
232,904
def authorize ( self , scope = None , redirect_uri = None , state = None ) : _logger . debug ( "Called authorize()" ) params = { 'client_id' : self . client_id } if scope : params [ 'scope' ] = scope if redirect_uri : params [ 'redirect_uri' ] = redirect_uri if state : params [ 'state' ] = state url = self . auth_url + 'authorize?' + urlencode ( params ) _logger . debug ( "Redirecting to %s" , url ) return redirect ( url )
Redirect to GitHub and request access to a user s data .
129
13
232,905
def authorized_handler ( self , f ) : @ wraps ( f ) def decorated ( * args , * * kwargs ) : if 'code' in request . args : data = self . _handle_response ( ) else : data = self . _handle_invalid_response ( ) return f ( * ( ( data , ) + args ) , * * kwargs ) return decorated
Decorator for the route that is used as the callback for authorizing with GitHub . This callback URL can be set in the settings for the app or passed in during authorization .
83
35
232,906
def _handle_response ( self ) : _logger . debug ( "Handling response from GitHub" ) params = { 'code' : request . args . get ( 'code' ) , 'client_id' : self . client_id , 'client_secret' : self . client_secret } url = self . auth_url + 'access_token' _logger . debug ( "POSTing to %s" , url ) _logger . debug ( params ) response = self . session . post ( url , data = params ) data = parse_qs ( response . content ) _logger . debug ( "response.content = %s" , data ) for k , v in data . items ( ) : if len ( v ) == 1 : data [ k ] = v [ 0 ] token = data . get ( b'access_token' , None ) if token is not None : token = token . decode ( 'ascii' ) return token
Handles response after the redirect to GitHub . This response determines if the user has allowed the this application access . If we were then we send a POST request for the access_key used to authenticate requests to GitHub .
206
44
232,907
def decode_lazy ( rlp , sedes = None , * * sedes_kwargs ) : item , end = consume_item_lazy ( rlp , 0 ) if end != len ( rlp ) : raise DecodingError ( 'RLP length prefix announced wrong length' , rlp ) if isinstance ( item , LazyList ) : item . sedes = sedes item . sedes_kwargs = sedes_kwargs return item elif sedes : return sedes . deserialize ( item , * * sedes_kwargs ) else : return item
Decode an RLP encoded object in a lazy fashion .
126
12
232,908
def consume_item_lazy ( rlp , start ) : p , t , l , s = consume_length_prefix ( rlp , start ) if t is bytes : item , _ , end = consume_payload ( rlp , p , s , bytes , l ) return item , end else : assert t is list return LazyList ( rlp , s , s + l ) , s + l
Read an item from an RLP string lazily .
88
11
232,909
def peek ( rlp , index , sedes = None ) : ll = decode_lazy ( rlp ) if not isinstance ( index , Iterable ) : index = [ index ] for i in index : if isinstance ( ll , Atomic ) : raise IndexError ( 'Too many indices given' ) ll = ll [ i ] if sedes : return sedes . deserialize ( ll ) else : return ll
Get a specific element from an rlp encoded nested list .
89
12
232,910
def fixed_length ( cls , l , allow_empty = False ) : return cls ( l , l , allow_empty = allow_empty )
Create a sedes for text data with exactly l encoded characters .
33
13
232,911
def _eq ( left , right ) : if isinstance ( left , ( tuple , list ) ) and isinstance ( right , ( tuple , list ) ) : return len ( left ) == len ( right ) and all ( _eq ( * pair ) for pair in zip ( left , right ) ) else : return left == right
Equality comparison that allows for equality between tuple and list types with equivalent elements .
69
16
232,912
def is_sequence ( obj ) : return isinstance ( obj , Sequence ) and not ( isinstance ( obj , str ) or BinaryClass . is_valid_type ( obj ) )
Check if obj is a sequence but not a string or bytes .
39
13
232,913
def encode ( obj , sedes = None , infer_serializer = True , cache = True ) : if isinstance ( obj , Serializable ) : cached_rlp = obj . _cached_rlp if sedes is None and cached_rlp : return cached_rlp else : really_cache = ( cache and sedes is None ) else : really_cache = False if sedes : item = sedes . serialize ( obj ) elif infer_serializer : item = infer_sedes ( obj ) . serialize ( obj ) else : item = obj result = encode_raw ( item ) if really_cache : obj . _cached_rlp = result return result
Encode a Python object in RLP format .
148
10
232,914
def consume_payload ( rlp , prefix , start , type_ , length ) : if type_ is bytes : item = rlp [ start : start + length ] return ( item , [ prefix + item ] , start + length ) elif type_ is list : items = [ ] per_item_rlp = [ ] list_rlp = prefix next_item_start = start end = next_item_start + length while next_item_start < end : p , t , l , s = consume_length_prefix ( rlp , next_item_start ) item , item_rlp , next_item_start = consume_payload ( rlp , p , s , t , l ) per_item_rlp . append ( item_rlp ) # When the item returned above is a single element, item_rlp will also contain a # single element, but when it's a list, the first element will be the RLP of the # whole List, which is what we want here. list_rlp += item_rlp [ 0 ] items . append ( item ) per_item_rlp . insert ( 0 , list_rlp ) if next_item_start > end : raise DecodingError ( 'List length prefix announced a too small ' 'length' , rlp ) return ( items , per_item_rlp , next_item_start ) else : raise TypeError ( 'Type must be either list or bytes' )
Read the payload of an item from an RLP string .
315
12
232,915
def consume_item ( rlp , start ) : p , t , l , s = consume_length_prefix ( rlp , start ) return consume_payload ( rlp , p , s , t , l )
Read an item from an RLP string .
47
9
232,916
def decode ( rlp , sedes = None , strict = True , recursive_cache = False , * * kwargs ) : if not is_bytes ( rlp ) : raise DecodingError ( 'Can only decode RLP bytes, got type %s' % type ( rlp ) . __name__ , rlp ) try : item , per_item_rlp , end = consume_item ( rlp , 0 ) except IndexError : raise DecodingError ( 'RLP string too short' , rlp ) if end != len ( rlp ) and strict : msg = 'RLP string ends with {} superfluous bytes' . format ( len ( rlp ) - end ) raise DecodingError ( msg , rlp ) if sedes : obj = sedes . deserialize ( item , * * kwargs ) if is_sequence ( obj ) or hasattr ( obj , '_cached_rlp' ) : _apply_rlp_cache ( obj , per_item_rlp , recursive_cache ) return obj else : return item
Decode an RLP encoded object .
228
8
232,917
def infer_sedes ( obj ) : if is_sedes ( obj . __class__ ) : return obj . __class__ elif not isinstance ( obj , bool ) and isinstance ( obj , int ) and obj >= 0 : return big_endian_int elif BinaryClass . is_valid_type ( obj ) : return binary elif not isinstance ( obj , str ) and isinstance ( obj , collections . Sequence ) : return List ( map ( infer_sedes , obj ) ) elif isinstance ( obj , bool ) : return boolean elif isinstance ( obj , str ) : return text msg = 'Did not find sedes handling type {}' . format ( type ( obj ) . __name__ ) raise TypeError ( msg )
Try to find a sedes objects suitable for a given Python object .
162
14
232,918
def destinations ( self , cluster = 'main' ) : if not self . config . has_section ( cluster ) : raise SystemExit ( "Cluster '%s' not defined in %s" % ( cluster , self . config_file ) ) destinations = self . config . get ( cluster , 'destinations' ) return destinations . replace ( ' ' , '' ) . split ( ',' )
Return a list of destinations for a cluster .
84
9
232,919
def replication_factor ( self , cluster = 'main' ) : if not self . config . has_section ( cluster ) : raise SystemExit ( "Cluster '%s' not defined in %s" % ( cluster , self . config_file ) ) return int ( self . config . get ( cluster , 'replication_factor' ) )
Return the replication factor for a cluster as an integer .
74
11
232,920
def ssh_user ( self , cluster = 'main' ) : if not self . config . has_section ( cluster ) : raise SystemExit ( "Cluster '%s' not defined in %s" % ( cluster , self . config_file ) ) try : return self . config . get ( cluster , 'ssh_user' ) except NoOptionError : return pwd . getpwuid ( os . getuid ( ) ) . pw_name
Return the ssh user for a cluster or current user if undefined .
98
13
232,921
def whisper_lock_writes ( self , cluster = 'main' ) : if not self . config . has_section ( cluster ) : raise SystemExit ( "Cluster '%s' not defined in %s" % ( cluster , self . config_file ) ) try : return bool ( self . config . get ( cluster , 'whisper_lock_writes' ) ) except NoOptionError : return False
Lock whisper files during carbon - sync .
90
8
232,922
def hashing_type ( self , cluster = 'main' ) : if not self . config . has_section ( cluster ) : raise SystemExit ( "Cluster '%s' not defined in %s" % ( cluster , self . config_file ) ) hashing_type = 'carbon_ch' try : return self . config . get ( cluster , 'hashing_type' ) except NoOptionError : return hashing_type
Hashing type of cluster .
91
6
232,923
def fill_archives ( src , dst , startFrom , endAt = 0 , overwrite = False , lock_writes = False ) : if lock_writes is False : whisper . LOCK = False elif whisper . CAN_LOCK and lock_writes is True : whisper . LOCK = True header = whisper . info ( dst ) archives = header [ 'archives' ] archives = sorted ( archives , key = lambda t : t [ 'retention' ] ) for archive in archives : fromTime = max ( endAt , time . time ( ) - archive [ 'retention' ] ) if fromTime >= startFrom : continue ( timeInfo , values ) = whisper . fetch ( dst , fromTime , untilTime = startFrom ) ( start , end , step ) = timeInfo gapstart = None for value in values : has_value = bool ( value and not overwrite ) if not has_value and not gapstart : gapstart = start elif has_value and gapstart : if ( start - gapstart ) >= archive [ 'secondsPerPoint' ] : fill ( src , dst , gapstart - step , start ) gapstart = None start += step # fill if this gap continues to the end if gapstart : fill ( src , dst , gapstart - step , end - step ) # The next archive only needs to be filled up to the latest point # in time we updated. startFrom = fromTime
Fills gaps in dst using data from src .
299
10
232,924
def data ( path , hours , offset = 0 ) : now = time . time ( ) end = now - _to_sec ( offset ) # Will default to now start = end - _to_sec ( hours ) _data = whisper . fetch ( path , start , end ) return all ( x is None for x in _data [ - 1 ] )
Does the metric at path have any whisper data newer than hours ?
75
13
232,925
def stat ( path , hours , offset = None ) : return os . stat ( path ) . st_mtime < ( time . time ( ) - _to_sec ( hours ) )
Has the metric file at path been modified since hours ago?
40
12
232,926
def short_path ( path , cwd = None ) : if not isinstance ( path , str ) : return path if cwd is None : cwd = os . getcwd ( ) abspath = os . path . abspath ( path ) relpath = os . path . relpath ( path , cwd ) if len ( abspath ) <= len ( relpath ) : return abspath return relpath
Return relative or absolute path name whichever is shortest .
87
10
232,927
def check_rest ( module , names , dots = True ) : try : skip_types = ( dict , str , unicode , float , int ) except NameError : # python 3 skip_types = ( dict , str , float , int ) results = [ ] if module . __name__ [ 6 : ] not in OTHER_MODULE_DOCS : results += [ ( module . __name__ , ) + validate_rst_syntax ( inspect . getdoc ( module ) , module . __name__ , dots = dots ) ] for name in names : full_name = module . __name__ + '.' + name obj = getattr ( module , name , None ) if obj is None : results . append ( ( full_name , False , "%s has no docstring" % ( full_name , ) ) ) continue elif isinstance ( obj , skip_types ) : continue if inspect . ismodule ( obj ) : text = inspect . getdoc ( obj ) else : try : text = str ( get_doc_object ( obj ) ) except : import traceback results . append ( ( full_name , False , "Error in docstring format!\n" + traceback . format_exc ( ) ) ) continue m = re . search ( "([\x00-\x09\x0b-\x1f])" , text ) if m : msg = ( "Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % ( m . group ( 1 ) , ) ) results . append ( ( full_name , False , msg ) ) continue try : src_file = short_path ( inspect . getsourcefile ( obj ) ) except TypeError : src_file = None if src_file : file_full_name = src_file + ':' + full_name else : file_full_name = full_name results . append ( ( full_name , ) + validate_rst_syntax ( text , file_full_name , dots = dots ) ) return results
Check reStructuredText formatting of docstrings
441
9
232,928
def update_header ( self ) : set_technician ( self . handle , du ( self . technician ) ) set_recording_additional ( self . handle , du ( self . recording_additional ) ) set_patientname ( self . handle , du ( self . patient_name ) ) set_patientcode ( self . handle , du ( self . patient_code ) ) set_patient_additional ( self . handle , du ( self . patient_additional ) ) set_equipment ( self . handle , du ( self . equipment ) ) set_admincode ( self . handle , du ( self . admincode ) ) if isinstance ( self . gender , int ) : set_gender ( self . handle , self . gender ) elif self . gender == "Male" : set_gender ( self . handle , 0 ) elif self . gender == "Female" : set_gender ( self . handle , 1 ) set_datarecord_duration ( self . handle , self . duration ) set_number_of_annotation_signals ( self . handle , self . number_of_annotations ) set_startdatetime ( self . handle , self . recording_start_time . year , self . recording_start_time . month , self . recording_start_time . day , self . recording_start_time . hour , self . recording_start_time . minute , self . recording_start_time . second ) if isstr ( self . birthdate ) : if self . birthdate != '' : birthday = datetime . strptime ( self . birthdate , '%d %b %Y' ) . date ( ) set_birthdate ( self . handle , birthday . year , birthday . month , birthday . day ) else : set_birthdate ( self . handle , self . birthdate . year , self . birthdate . month , self . birthdate . day ) for i in np . arange ( self . n_channels ) : set_samplefrequency ( self . handle , i , self . channels [ i ] [ 'sample_rate' ] ) set_physical_maximum ( self . handle , i , self . channels [ i ] [ 'physical_max' ] ) set_physical_minimum ( self . handle , i , self . channels [ i ] [ 'physical_min' ] ) set_digital_maximum ( self . handle , i , self . channels [ i ] [ 'digital_max' ] ) set_digital_minimum ( self . handle , i , self . channels [ i ] [ 'digital_min' ] ) set_label ( self . handle , i , du ( self . channels [ i ] [ 'label' ] ) ) set_physical_dimension ( self . handle , i , du ( self . channels [ i ] [ 'dimension' ] ) ) set_transducer ( self . handle , i , du ( self . channels [ i ] [ 'transducer' ] ) ) set_prefilter ( self . handle , i , du ( self . channels [ i ] [ 'prefilter' ] ) )
Updates header to edffile struct
662
8
232,929
def setHeader ( self , fileHeader ) : self . technician = fileHeader [ "technician" ] self . recording_additional = fileHeader [ "recording_additional" ] self . patient_name = fileHeader [ "patientname" ] self . patient_additional = fileHeader [ "patient_additional" ] self . patient_code = fileHeader [ "patientcode" ] self . equipment = fileHeader [ "equipment" ] self . admincode = fileHeader [ "admincode" ] self . gender = fileHeader [ "gender" ] self . recording_start_time = fileHeader [ "startdate" ] self . birthdate = fileHeader [ "birthdate" ] self . update_header ( )
Sets the file header
157
5
232,930
def setSignalHeader ( self , edfsignal , channel_info ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] = channel_info self . update_header ( )
Sets the parameter for signal edfsignal .
70
11
232,931
def setSignalHeaders ( self , signalHeaders ) : for edfsignal in np . arange ( self . n_channels ) : self . channels [ edfsignal ] = signalHeaders [ edfsignal ] self . update_header ( )
Sets the parameter for all signals
59
7
232,932
def set_number_of_annotation_signals ( self , number_of_annotations ) : number_of_annotations = max ( ( min ( ( int ( number_of_annotations ) , 64 ) ) , 1 ) ) self . number_of_annotations = number_of_annotations self . update_header ( )
Sets the number of annotation signals . The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don t need to change the default value . Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording you can use this function to increase the storage space for annotations Minimum is 1 maximum is 64
75
89
232,933
def setStartdatetime ( self , recording_start_time ) : if isinstance ( recording_start_time , datetime ) : self . recording_start_time = recording_start_time else : self . recording_start_time = datetime . strptime ( recording_start_time , "%d %b %Y %H:%M:%S" ) self . update_header ( )
Sets the recording start Time
88
6
232,934
def setSamplefrequency ( self , edfsignal , samplefrequency ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'sample_rate' ] = samplefrequency self . update_header ( )
Sets the samplefrequency of signal edfsignal .
74
12
232,935
def setPhysicalMaximum ( self , edfsignal , physical_maximum ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'physical_max' ] = physical_maximum self . update_header ( )
Sets the physical_maximum of signal edfsignal .
76
13
232,936
def setPhysicalMinimum ( self , edfsignal , physical_minimum ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'physical_min' ] = physical_minimum self . update_header ( )
Sets the physical_minimum of signal edfsignal .
78
13
232,937
def setDigitalMaximum ( self , edfsignal , digital_maximum ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'digital_max' ] = digital_maximum self . update_header ( )
Sets the samplefrequency of signal edfsignal . Usually the value 32767 is used for EDF + and 8388607 for BDF + .
78
32
232,938
def setTransducer ( self , edfsignal , transducer ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'transducer' ] = transducer self . update_header ( )
Sets the transducer of signal edfsignal
79
12
232,939
def readAnnotations ( self ) : annot = self . read_annotation ( ) annot = np . array ( annot ) if ( annot . shape [ 0 ] == 0 ) : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) ann_time = self . _get_float ( annot [ : , 0 ] ) ann_text = annot [ : , 2 ] ann_text_out = [ "" for x in range ( len ( annot [ : , 1 ] ) ) ] for i in np . arange ( len ( annot [ : , 1 ] ) ) : ann_text_out [ i ] = self . _convert_string ( ann_text [ i ] ) if annot [ i , 1 ] == '' : annot [ i , 1 ] = '-1' ann_duration = self . _get_float ( annot [ : , 1 ] ) return ann_time / 10000000 , ann_duration , np . array ( ann_text_out )
Annotations from a edf - file
218
8
232,940
def getHeader ( self ) : return { "technician" : self . getTechnician ( ) , "recording_additional" : self . getRecordingAdditional ( ) , "patientname" : self . getPatientName ( ) , "patient_additional" : self . getPatientAdditional ( ) , "patientcode" : self . getPatientCode ( ) , "equipment" : self . getEquipment ( ) , "admincode" : self . getAdmincode ( ) , "gender" : self . getGender ( ) , "startdate" : self . getStartdatetime ( ) , "birthdate" : self . getBirthdate ( ) }
Returns the file header as dict
147
6
232,941
def getSignalHeader ( self , chn ) : return { 'label' : self . getLabel ( chn ) , 'dimension' : self . getPhysicalDimension ( chn ) , 'sample_rate' : self . getSampleFrequency ( chn ) , 'physical_max' : self . getPhysicalMaximum ( chn ) , 'physical_min' : self . getPhysicalMinimum ( chn ) , 'digital_max' : self . getDigitalMaximum ( chn ) , 'digital_min' : self . getDigitalMinimum ( chn ) , 'prefilter' : self . getPrefilter ( chn ) , 'transducer' : self . getTransducer ( chn ) }
Returns the header of one signal as dicts
156
9
232,942
def getSignalHeaders ( self ) : signalHeader = [ ] for chn in np . arange ( self . signals_in_file ) : signalHeader . append ( self . getSignalHeader ( chn ) ) return signalHeader
Returns the header of all signals as array of dicts
52
11
232,943
def getStartdatetime ( self ) : return datetime ( self . startdate_year , self . startdate_month , self . startdate_day , self . starttime_hour , self . starttime_minute , self . starttime_second )
Returns the date and starttime as datetime object
55
10
232,944
def getBirthdate ( self , string = True ) : if string : return self . _convert_string ( self . birthdate . rstrip ( ) ) else : return datetime . strptime ( self . _convert_string ( self . birthdate . rstrip ( ) ) , "%d %b %Y" )
Returns the birthdate as string object
71
7
232,945
def getSampleFrequencies ( self ) : return np . array ( [ round ( self . samplefrequency ( chn ) ) for chn in np . arange ( self . signals_in_file ) ] )
Returns samplefrequencies of all signals .
46
9
232,946
def getSampleFrequency ( self , chn ) : if 0 <= chn < self . signals_in_file : return round ( self . samplefrequency ( chn ) ) else : return 0
Returns the samplefrequency of signal edfsignal .
42
11
232,947
def getPhysicalMaximum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . physical_max ( chn ) else : return 0 else : physMax = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : physMax [ i ] = self . physical_max ( i ) return physMax
Returns the maximum physical value of signal edfsignal .
100
12
232,948
def getPhysicalMinimum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . physical_min ( chn ) else : return 0 else : physMin = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : physMin [ i ] = self . physical_min ( i ) return physMin
Returns the minimum physical value of signal edfsignal .
100
12
232,949
def getDigitalMaximum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . digital_max ( chn ) else : return 0 else : digMax = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : digMax [ i ] = self . digital_max ( i ) return digMax
Returns the maximum digital value of signal edfsignal .
100
12
232,950
def getDigitalMinimum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . digital_min ( chn ) else : return 0 else : digMin = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : digMin [ i ] = self . digital_min ( i ) return digMin
Returns the minimum digital value of signal edfsignal .
100
12
232,951
def readSignal ( self , chn , start = 0 , n = None ) : if start < 0 : return np . array ( [ ] ) if n is not None and n < 0 : return np . array ( [ ] ) nsamples = self . getNSamples ( ) if chn < len ( nsamples ) : if n is None : n = nsamples [ chn ] elif n > nsamples [ chn ] : return np . array ( [ ] ) x = np . zeros ( n , dtype = np . float64 ) self . readsignal ( chn , start , n , x ) return x else : return np . array ( [ ] )
Returns the physical data of signal chn . When start and n is set a subset is returned
146
19
232,952
def stackplot ( marray , seconds = None , start_time = None , ylabels = None ) : tarray = np . transpose ( marray ) stackplot_t ( tarray , seconds = seconds , start_time = start_time , ylabels = ylabels ) plt . show ( )
will plot a stack of traces one above the other assuming marray . shape = numRows numSamples
69
22
232,953
def stackplot_t ( tarray , seconds = None , start_time = None , ylabels = None ) : data = tarray numSamples , numRows = tarray . shape # data = np.random.randn(numSamples,numRows) # test data # data.shape = numSamples, numRows if seconds : t = seconds * np . arange ( numSamples , dtype = float ) / numSamples # import pdb # pdb.set_trace() if start_time : t = t + start_time xlm = ( start_time , start_time + seconds ) else : xlm = ( 0 , seconds ) else : t = np . arange ( numSamples , dtype = float ) xlm = ( 0 , numSamples ) ticklocs = [ ] ax = plt . subplot ( 111 ) plt . xlim ( * xlm ) # xticks(np.linspace(xlm, 10)) dmin = data . min ( ) dmax = data . max ( ) dr = ( dmax - dmin ) * 0.7 # Crowd them a bit. y0 = dmin y1 = ( numRows - 1 ) * dr + dmax plt . ylim ( y0 , y1 ) segs = [ ] for i in range ( numRows ) : segs . append ( np . hstack ( ( t [ : , np . newaxis ] , data [ : , i , np . newaxis ] ) ) ) # print "segs[-1].shape:", segs[-1].shape ticklocs . append ( i * dr ) offsets = np . zeros ( ( numRows , 2 ) , dtype = float ) offsets [ : , 1 ] = ticklocs lines = LineCollection ( segs , offsets = offsets , transOffset = None , ) ax . add_collection ( lines ) # set the yticks to use axes coords on the y axis ax . set_yticks ( ticklocs ) # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9']) # if not plt.ylabels: plt . ylabels = [ "%d" % ii for ii in range ( numRows ) ] ax . set_yticklabels ( ylabels ) plt . xlabel ( 'time (s)' )
will plot a stack of traces one above the other assuming tarray . shape = numSamples numRows
535
22
232,954
def find_path ( start , goal , neighbors_fnct , reversePath = False , heuristic_cost_estimate_fnct = lambda a , b : Infinite , distance_between_fnct = lambda a , b : 1.0 , is_goal_reached_fnct = lambda a , b : a == b ) : class FindPath ( AStar ) : def heuristic_cost_estimate ( self , current , goal ) : return heuristic_cost_estimate_fnct ( current , goal ) def distance_between ( self , n1 , n2 ) : return distance_between_fnct ( n1 , n2 ) def neighbors ( self , node ) : return neighbors_fnct ( node ) def is_goal_reached ( self , current , goal ) : return is_goal_reached_fnct ( current , goal ) return FindPath ( ) . astar ( start , goal , reversePath )
A non - class version of the path finding algorithm
203
10
232,955
def validate ( source , * * options ) : source , options , inspector_settings = _parse_arguments ( source , * * options ) # Validate inspector = Inspector ( * * inspector_settings ) report = inspector . inspect ( source , * * options ) return report
Validates a source file and returns a report .
57
10
232,956
def init_datapackage ( resource_paths ) : dp = datapackage . Package ( { 'name' : 'change-me' , 'schema' : 'tabular-data-package' , } ) for path in resource_paths : dp . infer ( path ) return dp
Create tabular data package with resources .
69
8
232,957
def init ( paths , output , * * kwargs ) : dp = goodtables . init_datapackage ( paths ) click . secho ( json_module . dumps ( dp . descriptor , indent = 4 ) , file = output ) exit ( dp . valid )
Init data package from list of files .
62
8
232,958
def _clean_empty ( d ) : if not isinstance ( d , ( dict , list ) ) : return d if isinstance ( d , list ) : return [ v for v in ( _clean_empty ( v ) for v in d ) if v is not None ] return { k : v for k , v in ( ( k , _clean_empty ( v ) ) for k , v in d . items ( ) ) if v is not None }
Remove None values from a dict .
98
7
232,959
def create_cells ( headers , schema_fields , values = None , row_number = None ) : fillvalue = '_fillvalue' is_header_row = ( values is None ) cells = [ ] iterator = zip_longest ( headers , schema_fields , values or [ ] , fillvalue = fillvalue ) for column_number , ( header , field , value ) in enumerate ( iterator , start = 1 ) : if header == fillvalue : header = None elif is_header_row : value = header if field == fillvalue : field = None if value == fillvalue : value = None elif value is None : value = '' cell = create_cell ( header , value , field , column_number , row_number ) cells . append ( cell ) return cells
Create list of cells from headers fields and values .
167
10
232,960
def __impl_read_chain ( self , start , read_sector_f , read_fat_f ) : sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO ( ) while sector != ENDOFCHAIN : buffer . write ( read_sector_f ( sector ) ) next = read_fat_f ( sector ) if next in check : logging . error ( 'infinite loop detected at {0} to {1} starting at {2}' . format ( sector , next , sector_start ) ) return buffer . getvalue ( ) check . append ( next ) sector = next return buffer . getvalue ( )
Returns the entire contents of a chain starting at the given sector .
143
13
232,961
def get_charm_url ( self ) : if self . rank_id <= 4 : return self . RANK_CHARMS [ 0 ] if self . rank_id <= 8 : return self . RANK_CHARMS [ 1 ] if self . rank_id <= 12 : return self . RANK_CHARMS [ 2 ] if self . rank_id <= 16 : return self . RANK_CHARMS [ 3 ] if self . rank_id <= 19 : return self . RANK_CHARMS [ 4 ] return self . RANK_CHARMS [ 5 ]
Get charm URL for the bracket this rank is in
122
10
232,962
def load_rank ( self , region , season = - 1 ) : data = yield from self . auth . get ( "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s&region_id=%s&season_id=%s" % ( self . spaceid , self . platform_url , self . id , region , season ) ) if "players" in data and self . id in data [ "players" ] : regionkey = "%s:%s" % ( region , season ) self . ranks [ regionkey ] = Rank ( data [ "players" ] [ self . id ] ) return self . ranks [ regionkey ] else : raise InvalidRequest ( "Missing players key in returned JSON object %s" % str ( data ) )
|coro| Loads the players rank for this region and season
198
14
232,963
def libdmtx_function ( fname , restype , * args ) : prototype = CFUNCTYPE ( restype , * args ) return prototype ( ( fname , load_libdmtx ( ) ) )
Returns a foreign function exported by libdmtx .
49
11
232,964
def _image ( pixels , width , height , pack ) : image = dmtxImageCreate ( pixels , width , height , pack ) if not image : raise PyLibDMTXError ( 'Could not create image' ) else : try : yield image finally : dmtxImageDestroy ( byref ( image ) )
A context manager for DmtxImage created and destroyed by dmtxImageCreate and dmtxImageDestroy .
68
24
232,965
def _decoder ( image , shrink ) : decoder = dmtxDecodeCreate ( image , shrink ) if not decoder : raise PyLibDMTXError ( 'Could not create decoder' ) else : try : yield decoder finally : dmtxDecodeDestroy ( byref ( decoder ) )
A context manager for DmtxDecode created and destroyed by dmtxDecodeCreate and dmtxDecodeDestroy .
68
27
232,966
def _region ( decoder , timeout ) : region = dmtxRegionFindNext ( decoder , timeout ) try : yield region finally : if region : dmtxRegionDestroy ( byref ( region ) )
A context manager for DmtxRegion created and destroyed by dmtxRegionFindNext and dmtxRegionDestroy .
45
25
232,967
def _decoded_matrix_region ( decoder , region , corrections ) : message = dmtxDecodeMatrixRegion ( decoder , region , corrections ) try : yield message finally : if message : dmtxMessageDestroy ( byref ( message ) )
A context manager for DmtxMessage created and destoyed by dmtxDecodeMatrixRegion and dmtxMessageDestroy .
56
28
232,968
def _decode_region ( decoder , region , corrections , shrink ) : with _decoded_matrix_region ( decoder , region , corrections ) as msg : if msg : # Coordinates p00 = DmtxVector2 ( ) p11 = DmtxVector2 ( 1.0 , 1.0 ) dmtxMatrix3VMultiplyBy ( p00 , region . contents . fit2raw ) dmtxMatrix3VMultiplyBy ( p11 , region . contents . fit2raw ) x0 = int ( ( shrink * p00 . X ) + 0.5 ) y0 = int ( ( shrink * p00 . Y ) + 0.5 ) x1 = int ( ( shrink * p11 . X ) + 0.5 ) y1 = int ( ( shrink * p11 . Y ) + 0.5 ) return Decoded ( string_at ( msg . contents . output ) , Rect ( x0 , y0 , x1 - x0 , y1 - y0 ) ) else : return None
Decodes and returns the value in a region .
226
10
232,969
def encode ( data , scheme = None , size = None ) : size = size if size else 'ShapeAuto' size_name = '{0}{1}' . format ( ENCODING_SIZE_PREFIX , size ) if not hasattr ( DmtxSymbolSize , size_name ) : raise PyLibDMTXError ( 'Invalid size [{0}]: should be one of {1}' . format ( size , ENCODING_SIZE_NAMES ) ) size = getattr ( DmtxSymbolSize , size_name ) scheme = scheme if scheme else 'Ascii' scheme_name = '{0}{1}' . format ( ENCODING_SCHEME_PREFIX , scheme . capitalize ( ) ) if not hasattr ( DmtxScheme , scheme_name ) : raise PyLibDMTXError ( 'Invalid scheme [{0}]: should be one of {1}' . format ( scheme , ENCODING_SCHEME_NAMES ) ) scheme = getattr ( DmtxScheme , scheme_name ) with _encoder ( ) as encoder : dmtxEncodeSetProp ( encoder , DmtxProperty . DmtxPropScheme , scheme ) dmtxEncodeSetProp ( encoder , DmtxProperty . DmtxPropSizeRequest , size ) if dmtxEncodeDataMatrix ( encoder , len ( data ) , cast ( data , c_ubyte_p ) ) == 0 : raise PyLibDMTXError ( 'Could not encode data, possibly because the image is not ' 'large enough to contain the data' ) w , h , bpp = map ( partial ( dmtxImageGetProp , encoder [ 0 ] . image ) , ( DmtxProperty . DmtxPropWidth , DmtxProperty . DmtxPropHeight , DmtxProperty . DmtxPropBitsPerPixel ) ) size = w * h * bpp // 8 pixels = cast ( encoder [ 0 ] . image [ 0 ] . pxl , ctypes . POINTER ( ctypes . c_ubyte * size ) ) return Encoded ( width = w , height = h , bpp = bpp , pixels = ctypes . string_at ( pixels , size ) )
Encodes data in a DataMatrix image .
510
9
232,970
def add_edge ( edges , edge_points , coords , i , j ) : if ( i , j ) in edges or ( j , i ) in edges : # already added return ( edges . add ( ( i , j ) ) , edge_points . append ( coords [ [ i , j ] ] ) )
Add a line between the i - th and j - th points if not in the list already
69
19
232,971
def sequence ( self ) : if ( len ( self . Points [ 0 ] ) == 2 ) : if ( self . Sort == 'X' or self . Sort == 'x' ) : self . Points . sort ( key = lambda x : x [ 0 ] ) self . order ( self . Points ) elif ( self . Sort == 'Y' or self . Sort == 'y' ) : self . Points . sort ( key = lambda x : x [ 1 ] ) self . order ( self . Points ) else : self . order ( self . Points ) if ( len ( self . Points [ 0 ] ) == 3 ) : if ( self . Sort == 'X' or self . Sort == 'x' ) : self . Points . sort ( key = lambda x : x [ 0 ] ) self . order ( self . Points ) elif ( self . Sort == 'Y' or self . Sort == 'y' ) : self . Points . sort ( key = lambda x : x [ 1 ] ) self . order ( self . Points ) elif ( self . Sort == 'Z' or self . Sort == 'Z' ) : self . Points . sort ( key = lambda x : x [ 2 ] ) self . order ( self . Points ) else : self . order ( self . Points )
sort the points in the line with given option
274
9
232,972
def resample ( df , rule , time_index , groupby = None , aggregation = 'mean' ) : if groupby : df = df . groupby ( groupby ) df = df . resample ( rule , on = time_index ) df = getattr ( df , aggregation ) ( ) for column in groupby : del df [ column ] return df
pd . DataFrame . resample adapter .
77
9
232,973
def _join_names ( names ) : levels = ( str ( name ) for name in names if name != '' ) return '_' . join ( levels )
Join the names of a multi - level index with an underscore .
34
13
232,974
def unstack ( df , level = - 1 , reset_index = True ) : df = df . unstack ( level = level ) if reset_index : df = df . reset_index ( ) df . columns = df . columns . map ( _join_names ) return df
pd . DataFrame . unstack adapter .
60
9
232,975
def load_boston_multitask ( ) : dataset = datasets . load_boston ( ) y = dataset . target target = np . column_stack ( [ y , 2 * y + 5 ] ) return Dataset ( load_boston . __doc__ , dataset . data , target , r2_score )
Boston House Prices Dataset with a synthetic multitask output .
70
13
232,976
def energy ( data ) : data = np . mean ( data , axis = 1 ) return np . sum ( data ** 2 ) / np . float64 ( len ( data ) )
Computes signal energy of data
38
6
232,977
def zcr ( data ) : data = np . mean ( data , axis = 1 ) count = len ( data ) countZ = np . sum ( np . abs ( np . diff ( np . sign ( data ) ) ) ) / 2 return ( np . float64 ( countZ ) / np . float64 ( count - 1.0 ) )
Computes zero crossing rate of segment
74
7
232,978
def spectral_flux ( d0 , d1 ) : # compute the spectral flux as the sum of square distances: d0 = np . mean ( d0 , axis = 1 ) d1 = np . mean ( d1 , axis = 1 ) nFFT = min ( len ( d0 ) // 2 , len ( d1 ) // 2 ) X = FFT ( d0 , nFFT ) Xprev = FFT ( d1 , nFFT ) # L = min(len(X), len(Xprev)) sumX = np . sum ( X + EPSILON ) sumPrevX = np . sum ( Xprev + EPSILON ) return np . sum ( ( X / sumX - Xprev / sumPrevX ) ** 2 )
Computes the spectral flux feature of the current frame
163
10
232,979
def rolling_window_sequences ( X , index , window_size , target_size , target_column ) : out_X = list ( ) out_y = list ( ) X_index = list ( ) y_index = list ( ) target = X [ : , target_column ] for start in range ( len ( X ) - window_size - target_size + 1 ) : end = start + window_size out_X . append ( X [ start : end ] ) out_y . append ( target [ end : end + target_size ] ) X_index . append ( index [ start ] ) y_index . append ( index [ end ] ) return np . asarray ( out_X ) , np . asarray ( out_y ) , np . asarray ( X_index ) , np . asarray ( y_index )
Create rolling window sequences out of timeseries data .
183
10
232,980
def time_segments_average ( X , interval , time_column ) : warnings . warn ( _TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING , DeprecationWarning ) if isinstance ( X , np . ndarray ) : X = pd . DataFrame ( X ) X = X . sort_values ( time_column ) . set_index ( time_column ) start_ts = X . index . values [ 0 ] max_ts = X . index . values [ - 1 ] values = list ( ) index = list ( ) while start_ts <= max_ts : end_ts = start_ts + interval subset = X . loc [ start_ts : end_ts - 1 ] means = subset . mean ( skipna = True ) . values values . append ( means ) index . append ( start_ts ) start_ts = end_ts return np . asarray ( values ) , np . asarray ( index )
Compute average of values over fixed length time segments .
206
11
232,981
def time_segments_aggregate ( X , interval , time_column , method = [ 'mean' ] ) : if isinstance ( X , np . ndarray ) : X = pd . DataFrame ( X ) X = X . sort_values ( time_column ) . set_index ( time_column ) if isinstance ( method , str ) : method = [ method ] start_ts = X . index . values [ 0 ] max_ts = X . index . values [ - 1 ] values = list ( ) index = list ( ) while start_ts <= max_ts : end_ts = start_ts + interval subset = X . loc [ start_ts : end_ts - 1 ] aggregated = [ getattr ( subset , agg ) ( skipna = True ) . values for agg in method ] values . append ( np . concatenate ( aggregated ) ) index . append ( start_ts ) start_ts = end_ts return np . asarray ( values ) , np . asarray ( index )
Aggregate values over fixed length time segments .
222
9
232,982
def image_transform ( X , function , reshape_before = False , reshape_after = False , width = None , height = None , * * kwargs ) : if not callable ( function ) : function = import_object ( function ) elif not callable ( function ) : raise ValueError ( "function must be a str or a callable" ) flat_image = len ( X [ 0 ] . shape ) == 1 if reshape_before and flat_image : if not ( width and height ) : side_length = math . sqrt ( X . shape [ 1 ] ) if side_length . is_integer ( ) : side_length = int ( side_length ) width = side_length height = side_length else : raise ValueError ( "Image sizes must be given for non-square images" ) else : reshape_before = False new_X = [ ] for image in X : if reshape_before : image = image . reshape ( ( width , height ) ) features = function ( image , * * kwargs ) if reshape_after : features = np . reshape ( features , X . shape [ 1 ] ) new_X . append ( features ) return np . array ( new_X )
Apply a function image by image .
267
7
232,983
def regression_errors ( y , y_hat , smoothing_window = 0.01 , smooth = True ) : errors = np . abs ( y - y_hat ) [ : , 0 ] if not smooth : return errors smoothing_window = int ( smoothing_window * len ( y ) ) return pd . Series ( errors ) . ewm ( span = smoothing_window ) . mean ( ) . values
Compute an array of absolute errors comparing predictions and expected output .
90
13
232,984
def deltas ( errors , epsilon , mean , std ) : below = errors [ errors <= epsilon ] if not len ( below ) : return 0 , 0 return mean - below . mean ( ) , std - below . std ( )
Compute mean and std deltas .
53
9
232,985
def count_above ( errors , epsilon ) : above = errors > epsilon total_above = len ( errors [ above ] ) above = pd . Series ( above ) shift = above . shift ( 1 ) change = above != shift total_consecutive = sum ( above & change ) return total_above , total_consecutive
Count number of errors and continuous sequences above epsilon .
74
12
232,986
def z_cost ( z , errors , mean , std ) : epsilon = mean + z * std delta_mean , delta_std = deltas ( errors , epsilon , mean , std ) above , consecutive = count_above ( errors , epsilon ) numerator = - ( delta_mean / mean + delta_std / std ) denominator = above + consecutive ** 2 if denominator == 0 : return np . inf return numerator / denominator
Compute how bad a z value is .
100
9
232,987
def find_threshold ( errors , z_range = ( 0 , 10 ) ) : mean = errors . mean ( ) std = errors . std ( ) min_z , max_z = z_range best_z = min_z best_cost = np . inf for z in range ( min_z , max_z ) : best = fmin ( z_cost , z , args = ( errors , mean , std ) , full_output = True , disp = False ) z , cost = best [ 0 : 2 ] if cost < best_cost : best_z = z [ 0 ] return mean + best_z * std
Find the ideal threshold .
136
5
232,988
def find_sequences ( errors , epsilon ) : above = pd . Series ( errors > epsilon ) shift = above . shift ( 1 ) . fillna ( False ) change = above != shift index = above . index starts = index [ above & change ] . tolist ( ) ends = ( index [ ~ above & change ] - 1 ) . tolist ( ) if len ( ends ) == len ( starts ) - 1 : ends . append ( len ( above ) - 1 ) return list ( zip ( starts , ends ) )
Find sequences of values that are above epsilon .
115
11
232,989
def find_anomalies ( errors , index , z_range = ( 0 , 10 ) ) : threshold = find_threshold ( errors , z_range ) sequences = find_sequences ( errors , threshold ) anomalies = list ( ) denominator = errors . mean ( ) + errors . std ( ) for start , stop in sequences : max_error = errors [ start : stop + 1 ] . max ( ) score = ( max_error - threshold ) / denominator anomalies . append ( [ index [ start ] , index [ stop ] , score ] ) return np . asarray ( anomalies )
Find sequences of values that are anomalous .
126
9
232,990
def GaussianBlur ( X , ksize_width , ksize_height , sigma_x , sigma_y ) : return image_transform ( X , cv2 . GaussianBlur , ksize = ( ksize_width , ksize_height ) , sigmaX = sigma_x , sigmaY = sigma_y )
Apply Gaussian blur to the given data .
78
9
232,991
def get_anomalies ( smoothed_errors , y_true , z , window , all_anomalies , error_buffer ) : mu = np . mean ( smoothed_errors ) sigma = np . std ( smoothed_errors ) epsilon = mu + ( z * sigma ) # compare to epsilon errors_seq , anomaly_indices , max_error_below_e = group_consecutive_anomalies ( smoothed_errors , epsilon , y_true , error_buffer , window , all_anomalies ) if len ( errors_seq ) > 0 : anomaly_indices = prune_anomalies ( errors_seq , smoothed_errors , max_error_below_e , anomaly_indices ) return anomaly_indices
Helper method to get anomalies .
173
6
232,992
def prune_anomalies ( e_seq , smoothed_errors , max_error_below_e , anomaly_indices ) : # min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max , smoothed_errors_max = [ ] , [ ] for error_seq in e_seq : if len ( smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ] ) > 0 : sliced_errors = smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ] e_seq_max . append ( max ( sliced_errors ) ) smoothed_errors_max . append ( max ( sliced_errors ) ) smoothed_errors_max . sort ( reverse = True ) if max_error_below_e > 0 : smoothed_errors_max . append ( max_error_below_e ) indices_remove = [ ] for i in range ( len ( smoothed_errors_max ) ) : if i < len ( smoothed_errors_max ) - 1 : delta = smoothed_errors_max [ i ] - smoothed_errors_max [ i + 1 ] perc_change = delta / smoothed_errors_max [ i ] if perc_change < MIN_PERCENT_DECREASE : indices_remove . append ( e_seq_max . index ( smoothed_errors_max [ i ] ) ) for index in sorted ( indices_remove , reverse = True ) : del e_seq [ index ] pruned_indices = [ ] for i in anomaly_indices : for error_seq in e_seq : if i >= error_seq [ 0 ] and i <= error_seq [ 1 ] : pruned_indices . append ( i ) return pruned_indices
Helper method that removes anomalies which don t meet a minimum separation from next anomaly .
403
16
232,993
def _configure_nodes ( self , nodes ) : if isinstance ( nodes , str ) : nodes = [ nodes ] elif not isinstance ( nodes , ( dict , list ) ) : raise ValueError ( 'nodes configuration should be a list or a dict,' ' got {}' . format ( type ( nodes ) ) ) conf_changed = False for node in nodes : conf = { 'hostname' : node , 'instance' : None , 'nodename' : node , 'port' : None , 'vnodes' : self . _default_vnodes , 'weight' : 1 } current_conf = self . runtime . _nodes . get ( node , { } ) nodename = node # new node, trigger a ring update if not current_conf : conf_changed = True # complex config if isinstance ( nodes , dict ) : node_conf = nodes [ node ] if isinstance ( node_conf , int ) : conf [ 'weight' ] = node_conf elif isinstance ( node_conf , dict ) : for k , v in node_conf . items ( ) : if k in conf : conf [ k ] = v # changing those config trigger a ring update if k in [ 'nodename' , 'vnodes' , 'weight' ] : if current_conf . get ( k ) != v : conf_changed = True else : raise ValueError ( 'node configuration should be a dict or an int,' ' got {}' . format ( type ( node_conf ) ) ) if self . _weight_fn : conf [ 'weight' ] = self . _weight_fn ( * * conf ) # changing the weight of a node trigger a ring update if current_conf . get ( 'weight' ) != conf [ 'weight' ] : conf_changed = True self . runtime . _nodes [ nodename ] = conf return conf_changed
Parse and set up the given nodes .
407
9
232,994
def _get_pos ( self , key ) : p = bisect ( self . runtime . _keys , self . hashi ( key ) ) if p == len ( self . runtime . _keys ) : return 0 else : return p
Get the index of the given key in the sorted key list .
50
13
232,995
def _get ( self , key , what ) : if not self . runtime . _ring : return None pos = self . _get_pos ( key ) if what == 'pos' : return pos nodename = self . runtime . _ring [ self . runtime . _keys [ pos ] ] if what in [ 'hostname' , 'instance' , 'port' , 'weight' ] : return self . runtime . _nodes [ nodename ] [ what ] elif what == 'dict' : return self . runtime . _nodes [ nodename ] elif what == 'nodename' : return nodename elif what == 'tuple' : return ( self . runtime . _keys [ pos ] , nodename )
Generic getter magic method .
156
6
232,996
def get_instances ( self ) : return [ c . get ( 'instance' ) for c in self . runtime . _nodes . values ( ) if c . get ( 'instance' ) ]
Returns a list of the instances of all the configured nodes .
43
12
232,997
def iterate_nodes ( self , key , distinct = True ) : if not self . runtime . _ring : yield None else : for node in self . range ( key , unique = distinct ) : yield node [ 'nodename' ]
hash_ring compatibility implementation .
52
6
232,998
def print_continuum ( self ) : numpoints = len ( self . runtime . _keys ) if numpoints : print ( 'Numpoints in continuum: {}' . format ( numpoints ) ) else : print ( 'Continuum empty' ) for p in self . get_points ( ) : point , node = p print ( '{} ({})' . format ( node , point ) )
Prints a ketama compatible continuum report .
92
9
232,999
def patch_memcache ( ) : def _init ( self , servers , * k , * * kw ) : self . _old_init ( servers , * k , * * kw ) nodes = { } for server in self . servers : conf = { 'hostname' : server . ip , 'instance' : server , 'port' : server . port , 'weight' : server . weight } nodes [ server . ip ] = conf self . uhashring = HashRing ( nodes ) def _get_server ( self , key ) : if isinstance ( key , tuple ) : return self . _old_get_server ( key ) for i in range ( self . _SERVER_RETRIES ) : for node in self . uhashring . range ( key ) : if node [ 'instance' ] . connect ( ) : return node [ 'instance' ] , key return None , None memcache = __import__ ( 'memcache' ) memcache . Client . _old_get_server = memcache . Client . _get_server memcache . Client . _old_init = memcache . Client . __init__ memcache . Client . __init__ = _init memcache . Client . _get_server = _get_server
Monkey patch python - memcached to implement our consistent hashring in its node selection and operations .
269
21