idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
237,800
def _decodeTimestamp ( byteIter ) : dateStr = decodeSemiOctets ( byteIter , 7 ) timeZoneStr = dateStr [ - 2 : ] return datetime . strptime ( dateStr [ : - 2 ] , '%y%m%d%H%M%S' ) . replace ( tzinfo = SmsPduTzInfo ( timeZoneStr ) )
Decodes a 7 - octet timestamp
87
8
237,801
def decodeUcs2 ( byteIter , numBytes ) : userData = [ ] i = 0 try : while i < numBytes : userData . append ( unichr ( ( next ( byteIter ) << 8 ) | next ( byteIter ) ) ) i += 2 except StopIteration : # Not enough bytes in iterator to reach numBytes; return what we have pass return '' . join ( userData )
Decodes UCS2 - encoded text from the specified byte iterator up to a maximum of numBytes
87
19
237,802
def encode ( self ) : result = bytearray ( ) result . append ( self . id ) result . append ( self . dataLength ) result . extend ( self . data ) return result
Encodes this IE and returns the resulting bytes
41
9
237,803
def parseArgsPy26 ( ) : from gsmtermlib . posoptparse import PosOptionParser , Option parser = PosOptionParser ( description = 'Simple script for sending SMS messages' ) parser . add_option ( '-i' , '--port' , metavar = 'PORT' , help = 'port to which the GSM modem is connected; a number or a device name.' ) parser . add_option ( '-b' , '--baud' , metavar = 'BAUDRATE' , default = 115200 , help = 'set baud rate' ) parser . add_option ( '-p' , '--pin' , metavar = 'PIN' , default = None , help = 'SIM card PIN' ) parser . add_option ( '-d' , '--deliver' , action = 'store_true' , help = 'wait for SMS delivery report' ) parser . add_positional_argument ( Option ( '--destination' , metavar = 'DESTINATION' , help = 'destination mobile number' ) ) options , args = parser . parse_args ( ) if len ( args ) != 1 : parser . error ( 'Incorrect number of arguments - please specify a DESTINATION to send to, e.g. {0} 012789456' . format ( sys . argv [ 0 ] ) ) else : options . destination = args [ 0 ] return options
Argument parser for Python 2 . 6
314
8
237,804
def lineMatching ( regexStr , lines ) : regex = re . compile ( regexStr ) for line in lines : m = regex . match ( line ) if m : return m else : return None
Searches through the specified list of strings and returns the regular expression match for the first line that matches the specified regex string or None if no match was found
42
32
237,805
def lineMatchingPattern ( pattern , lines ) : for line in lines : m = pattern . match ( line ) if m : return m else : return None
Searches through the specified list of strings and returns the regular expression match for the first line that matches the specified pre - compiled regex pattern or None if no match was found
33
35
237,806
def allLinesMatchingPattern ( pattern , lines ) : result = [ ] for line in lines : m = pattern . match ( line ) if m : result . append ( m ) return result
Like lineMatchingPattern but returns all lines that match the specified pattern
41
14
237,807
def clean ( decrypted : bytes ) -> str : last = decrypted [ - 1 ] if isinstance ( last , int ) : return decrypted [ : - last ] . decode ( 'utf8' ) return decrypted [ : - ord ( last ) ] . decode ( 'utf8' )
r Strip padding from decrypted value .
63
8
237,808
def set_pattern_step_setpoint ( self , patternnumber , stepnumber , setpointvalue ) : _checkPatternNumber ( patternnumber ) _checkStepNumber ( stepnumber ) _checkSetpointValue ( setpointvalue , self . setpoint_max ) address = _calculateRegisterAddress ( 'setpoint' , patternnumber , stepnumber ) self . write_register ( address , setpointvalue , 1 )
Set the setpoint value for a step .
90
9
237,809
def get_pattern_step_time ( self , patternnumber , stepnumber ) : _checkPatternNumber ( patternnumber ) _checkStepNumber ( stepnumber ) address = _calculateRegisterAddress ( 'time' , patternnumber , stepnumber ) return self . read_register ( address , 0 )
Get the step time .
64
5
237,810
def set_pattern_step_time ( self , patternnumber , stepnumber , timevalue ) : _checkPatternNumber ( patternnumber ) _checkStepNumber ( stepnumber ) _checkTimeValue ( timevalue , self . time_max ) address = _calculateRegisterAddress ( 'time' , patternnumber , stepnumber ) self . write_register ( address , timevalue , 0 )
Set the step time .
83
5
237,811
def get_pattern_actual_step ( self , patternnumber ) : _checkPatternNumber ( patternnumber ) address = _calculateRegisterAddress ( 'actualstep' , patternnumber ) return self . read_register ( address , 0 )
Get the actual step parameter for a given pattern .
51
10
237,812
def set_pattern_actual_step ( self , patternnumber , value ) : _checkPatternNumber ( patternnumber ) _checkStepNumber ( value ) address = _calculateRegisterAddress ( 'actualstep' , patternnumber ) self . write_register ( address , value , 0 )
Set the actual step parameter for a given pattern .
61
10
237,813
def get_pattern_additional_cycles ( self , patternnumber ) : _checkPatternNumber ( patternnumber ) address = _calculateRegisterAddress ( 'cycles' , patternnumber ) return self . read_register ( address )
Get the number of additional cycles for a given pattern .
49
11
237,814
def set_pattern_additional_cycles ( self , patternnumber , value ) : _checkPatternNumber ( patternnumber ) minimalmodbus . _checkInt ( value , minvalue = 0 , maxvalue = 99 , description = 'number of additional cycles' ) address = _calculateRegisterAddress ( 'cycles' , patternnumber ) self . write_register ( address , value , 0 )
Set the number of additional cycles for a given pattern .
83
11
237,815
def get_pattern_link_topattern ( self , patternnumber ) : _checkPatternNumber ( patternnumber ) address = _calculateRegisterAddress ( 'linkpattern' , patternnumber ) return self . read_register ( address )
Get the linked pattern value for a given pattern .
51
10
237,816
def get_all_pattern_variables ( self , patternnumber ) : _checkPatternNumber ( patternnumber ) outputstring = '' for stepnumber in range ( 8 ) : outputstring += 'SP{0}: {1} Time{0}: {2}\n' . format ( stepnumber , self . get_pattern_step_setpoint ( patternnumber , stepnumber ) , self . get_pattern_step_time ( patternnumber , stepnumber ) ) outputstring += 'Actual step: {0}\n' . format ( self . get_pattern_actual_step ( patternnumber ) ) outputstring += 'Additional cycles: {0}\n' . format ( self . get_pattern_additional_cycles ( patternnumber ) ) outputstring += 'Linked pattern: {0}\n' . format ( self . get_pattern_link_topattern ( patternnumber ) ) return outputstring
Get all variables for a given pattern at one time .
193
11
237,817
def set_all_pattern_variables ( self , patternnumber , sp0 , ti0 , sp1 , ti1 , sp2 , ti2 , sp3 , ti3 , sp4 , ti4 , sp5 , ti5 , sp6 , ti6 , sp7 , ti7 , actual_step , additional_cycles , link_pattern ) : _checkPatternNumber ( patternnumber ) self . set_pattern_step_setpoint ( patternnumber , 0 , sp0 ) self . set_pattern_step_setpoint ( patternnumber , 1 , sp1 ) self . set_pattern_step_setpoint ( patternnumber , 2 , sp2 ) self . set_pattern_step_setpoint ( patternnumber , 3 , sp3 ) self . set_pattern_step_setpoint ( patternnumber , 4 , sp4 ) self . set_pattern_step_setpoint ( patternnumber , 5 , sp5 ) self . set_pattern_step_setpoint ( patternnumber , 6 , sp6 ) self . set_pattern_step_setpoint ( patternnumber , 7 , sp7 ) self . set_pattern_step_time ( patternnumber , 0 , ti0 ) self . set_pattern_step_time ( patternnumber , 1 , ti1 ) self . set_pattern_step_time ( patternnumber , 2 , ti2 ) self . set_pattern_step_time ( patternnumber , 3 , ti3 ) self . set_pattern_step_time ( patternnumber , 4 , ti4 ) self . set_pattern_step_time ( patternnumber , 5 , ti5 ) self . set_pattern_step_time ( patternnumber , 6 , ti6 ) self . set_pattern_step_time ( patternnumber , 7 , ti7 ) self . set_pattern_additional_cycles ( patternnumber , additional_cycles ) self . set_pattern_link_topattern ( patternnumber , link_pattern ) self . set_pattern_actual_step ( patternnumber , actual_step )
Set all variables for a given pattern at one time .
434
11
237,818
def close ( self ) : if VERBOSE : _print_out ( '\nDummy_serial: Closing port\n' ) if not self . _isOpen : raise IOError ( 'Dummy_serial: The port is already closed' ) self . _isOpen = False self . port = None
Close a port on dummy_serial .
67
8
237,819
def write ( self , inputdata ) : if VERBOSE : _print_out ( '\nDummy_serial: Writing to port. Given:' + repr ( inputdata ) + '\n' ) if sys . version_info [ 0 ] > 2 : if not type ( inputdata ) == bytes : raise TypeError ( 'The input must be type bytes. Given:' + repr ( inputdata ) ) inputstring = str ( inputdata , encoding = 'latin1' ) else : inputstring = inputdata if not self . _isOpen : raise IOError ( 'Dummy_serial: Trying to write, but the port is not open. Given:' + repr ( inputdata ) ) # Look up which data that should be waiting for subsequent read commands try : response = RESPONSES [ inputstring ] except : response = DEFAULT_RESPONSE self . _waiting_data = response
Write to a port on dummy_serial .
193
9
237,820
def read ( self , numberOfBytes ) : if VERBOSE : _print_out ( '\nDummy_serial: Reading from port (max length {!r} bytes)' . format ( numberOfBytes ) ) if numberOfBytes < 0 : raise IOError ( 'Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}' . format ( numberOfBytes ) ) if not self . _isOpen : raise IOError ( 'Dummy_serial: Trying to read, but the port is not open.' ) # Do the actual reading from the waiting data, and simulate the influence of numberOfBytes if self . _waiting_data == DEFAULT_RESPONSE : returnstring = self . _waiting_data elif numberOfBytes == len ( self . _waiting_data ) : returnstring = self . _waiting_data self . _waiting_data = NO_DATA_PRESENT elif numberOfBytes < len ( self . _waiting_data ) : if VERBOSE : _print_out ( 'Dummy_serial: The numberOfBytes to read is smaller than the available data. ' + 'Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}' . format ( self . _waiting_data , len ( self . _waiting_data ) , numberOfBytes ) ) returnstring = self . _waiting_data [ : numberOfBytes ] self . _waiting_data = self . _waiting_data [ numberOfBytes : ] else : # Wait for timeout, as we have asked for more data than available if VERBOSE : _print_out ( 'Dummy_serial: The numberOfBytes to read is larger than the available data. ' + 'Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}' . format ( self . _waiting_data , len ( self . _waiting_data ) , numberOfBytes ) ) time . sleep ( self . timeout ) returnstring = self . _waiting_data self . _waiting_data = NO_DATA_PRESENT # TODO Adapt the behavior to better mimic the Windows behavior if VERBOSE : _print_out ( 'Dummy_serial read return data: {!r} (has length {})\n' . format ( returnstring , len ( returnstring ) ) ) if sys . version_info [ 0 ] > 2 : # Convert types to make it python3 compatible return bytes ( returnstring , encoding = 'latin1' ) else : return returnstring
Read from a port on dummy_serial .
571
9
237,821
def _embedPayload ( slaveaddress , mode , functioncode , payloaddata ) : _checkSlaveaddress ( slaveaddress ) _checkMode ( mode ) _checkFunctioncode ( functioncode , None ) _checkString ( payloaddata , description = 'payload' ) firstPart = _numToOneByteString ( slaveaddress ) + _numToOneByteString ( functioncode ) + payloaddata if mode == MODE_ASCII : request = _ASCII_HEADER + _hexencode ( firstPart ) + _hexencode ( _calculateLrcString ( firstPart ) ) + _ASCII_FOOTER else : request = firstPart + _calculateCrcString ( firstPart ) return request
Build a request from the slaveaddress the function code and the payload data .
156
15
237,822
def _predictResponseSize ( mode , functioncode , payloadToSlave ) : MIN_PAYLOAD_LENGTH = 4 # For implemented functioncodes here BYTERANGE_FOR_GIVEN_SIZE = slice ( 2 , 4 ) # Within the payload NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4 NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1 RTU_TO_ASCII_PAYLOAD_FACTOR = 2 NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2 NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2 NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5 NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4 # Argument validity testing _checkMode ( mode ) _checkFunctioncode ( functioncode , None ) _checkString ( payloadToSlave , description = 'payload' , minlength = MIN_PAYLOAD_LENGTH ) # Calculate payload size if functioncode in [ 5 , 6 , 15 , 16 ] : response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION elif functioncode in [ 1 , 2 , 3 , 4 ] : given_size = _twoByteStringToNum ( payloadToSlave [ BYTERANGE_FOR_GIVEN_SIZE ] ) if functioncode == 1 or functioncode == 2 : # Algorithm from MODBUS APPLICATION PROTOCOL SPECIFICATION V1.1b number_of_inputs = given_size response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + number_of_inputs // 8 + ( 1 if number_of_inputs % 8 else 0 ) elif functioncode == 3 or functioncode == 4 : number_of_registers = given_size response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER else : raise ValueError ( 'Wrong functioncode: {}. The payload is: {!r}' . format ( functioncode , payloadToSlave ) ) # Calculate number of bytes to read if mode == MODE_ASCII : return NUMBER_OF_ASCII_RESPONSE_STARTBYTES + response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR + NUMBER_OF_ASCII_RESPONSE_ENDBYTES else : return NUMBER_OF_RTU_RESPONSE_STARTBYTES + response_payload_size + NUMBER_OF_RTU_RESPONSE_ENDBYTES
Calculate the number of bytes that should be received from the slave .
660
15
237,823
def _calculate_minimum_silent_period ( baudrate ) : _checkNumerical ( baudrate , minvalue = 1 , description = 'baudrate' ) # Avoid division by zero BITTIMES_PER_CHARACTERTIME = 11 MINIMUM_SILENT_CHARACTERTIMES = 3.5 bittime = 1 / float ( baudrate ) return bittime * BITTIMES_PER_CHARACTERTIME * MINIMUM_SILENT_CHARACTERTIMES
Calculate the silent period length to comply with the 3 . 5 character silence between messages .
120
19
237,824
def _numToTwoByteString ( value , numberOfDecimals = 0 , LsbFirst = False , signed = False ) : _checkNumerical ( value , description = 'inputvalue' ) _checkInt ( numberOfDecimals , minvalue = 0 , description = 'number of decimals' ) _checkBool ( LsbFirst , description = 'LsbFirst' ) _checkBool ( signed , description = 'signed parameter' ) multiplier = 10 ** numberOfDecimals integer = int ( float ( value ) * multiplier ) if LsbFirst : formatcode = '<' # Little-endian else : formatcode = '>' # Big-endian if signed : formatcode += 'h' # (Signed) short (2 bytes) else : formatcode += 'H' # Unsigned short (2 bytes) outstring = _pack ( formatcode , integer ) assert len ( outstring ) == 2 return outstring
Convert a numerical value to a two - byte string possibly scaling it .
206
15
237,825
def _twoByteStringToNum ( bytestring , numberOfDecimals = 0 , signed = False ) : _checkString ( bytestring , minlength = 2 , maxlength = 2 , description = 'bytestring' ) _checkInt ( numberOfDecimals , minvalue = 0 , description = 'number of decimals' ) _checkBool ( signed , description = 'signed parameter' ) formatcode = '>' # Big-endian if signed : formatcode += 'h' # (Signed) short (2 bytes) else : formatcode += 'H' # Unsigned short (2 bytes) fullregister = _unpack ( formatcode , bytestring ) if numberOfDecimals == 0 : return fullregister divisor = 10 ** numberOfDecimals return fullregister / float ( divisor )
Convert a two - byte string to a numerical value possibly scaling it .
183
15
237,826
def _pack ( formatstring , value ) : _checkString ( formatstring , description = 'formatstring' , minlength = 1 ) try : result = struct . pack ( formatstring , value ) except : errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.' errortext += ' Value: {0!r} Struct format code is: {1}' raise ValueError ( errortext . format ( value , formatstring ) ) if sys . version_info [ 0 ] > 2 : return str ( result , encoding = 'latin1' ) # Convert types to make it Python3 compatible return result
Pack a value into a bytestring .
144
9
237,827
def _unpack ( formatstring , packed ) : _checkString ( formatstring , description = 'formatstring' , minlength = 1 ) _checkString ( packed , description = 'packed string' , minlength = 1 ) if sys . version_info [ 0 ] > 2 : packed = bytes ( packed , encoding = 'latin1' ) # Convert types to make it Python3 compatible try : value = struct . unpack ( formatstring , packed ) [ 0 ] except : errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.' errortext += ' Bytestring: {0!r} Struct format code is: {1}' raise ValueError ( errortext . format ( packed , formatstring ) ) return value
Unpack a bytestring into a value .
169
10
237,828
def _hexencode ( bytestring , insert_spaces = False ) : _checkString ( bytestring , description = 'byte string' ) separator = '' if not insert_spaces else ' ' # Use plain string formatting instead of binhex.hexlify, # in order to have it Python 2.x and 3.x compatible byte_representions = [ ] for c in bytestring : byte_representions . append ( '{0:02X}' . format ( ord ( c ) ) ) return separator . join ( byte_representions ) . strip ( )
Convert a byte string to a hex encoded string .
128
11
237,829
def _hexdecode ( hexstring ) : # Note: For Python3 the appropriate would be: raise TypeError(new_error_message) from err # but the Python2 interpreter will indicate SyntaxError. # Thus we need to live with this warning in Python3: # 'During handling of the above exception, another exception occurred' _checkString ( hexstring , description = 'hexstring' ) if len ( hexstring ) % 2 != 0 : raise ValueError ( 'The input hexstring must be of even length. Given: {!r}' . format ( hexstring ) ) if sys . version_info [ 0 ] > 2 : by = bytes ( hexstring , 'latin1' ) try : return str ( binascii . unhexlify ( by ) , encoding = 'latin1' ) except binascii . Error as err : new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}' . format ( err . args [ 0 ] , hexstring ) raise TypeError ( new_error_message ) else : try : return hexstring . decode ( 'hex' ) except TypeError as err : raise TypeError ( 'Hexdecode reported an error: {}. Input hexstring: {}' . format ( err . message , hexstring ) )
Convert a hex encoded string to a byte string .
286
11
237,830
def _bitResponseToValue ( bytestring ) : _checkString ( bytestring , description = 'bytestring' , minlength = 1 , maxlength = 1 ) RESPONSE_ON = '\x01' RESPONSE_OFF = '\x00' if bytestring == RESPONSE_ON : return 1 elif bytestring == RESPONSE_OFF : return 0 else : raise ValueError ( 'Could not convert bit response to a value. Input: {0!r}' . format ( bytestring ) )
Convert a response string to a numerical value .
119
10
237,831
def _createBitpattern ( functioncode , value ) : _checkFunctioncode ( functioncode , [ 5 , 15 ] ) _checkInt ( value , minvalue = 0 , maxvalue = 1 , description = 'inputvalue' ) if functioncode == 5 : if value == 0 : return '\x00\x00' else : return '\xff\x00' elif functioncode == 15 : if value == 0 : return '\x00' else : return '\x01'
Create the bit pattern that is used for writing single bits .
105
12
237,832
def _twosComplement ( x , bits = 16 ) : _checkInt ( bits , minvalue = 0 , description = 'number of bits' ) _checkInt ( x , description = 'input' ) upperlimit = 2 ** ( bits - 1 ) - 1 lowerlimit = - 2 ** ( bits - 1 ) if x > upperlimit or x < lowerlimit : raise ValueError ( 'The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' . format ( x , lowerlimit , upperlimit , bits ) ) # Calculate two'2 complement if x >= 0 : return x return x + 2 ** bits
Calculate the two s complement of an integer .
150
11
237,833
def _setBitOn ( x , bitNum ) : _checkInt ( x , minvalue = 0 , description = 'input value' ) _checkInt ( bitNum , minvalue = 0 , description = 'bitnumber' ) return x | ( 1 << bitNum )
Set bit bitNum to True .
58
7
237,834
def _calculateCrcString ( inputstring ) : _checkString ( inputstring , description = 'input CRC string' ) # Preload a 16-bit register with ones register = 0xFFFF for char in inputstring : register = ( register >> 8 ) ^ _CRC16TABLE [ ( register ^ ord ( char ) ) & 0xFF ] return _numToTwoByteString ( register , LsbFirst = True )
Calculate CRC - 16 for Modbus .
92
10
237,835
def _calculateLrcString ( inputstring ) : _checkString ( inputstring , description = 'input LRC string' ) register = 0 for character in inputstring : register += ord ( character ) lrc = ( ( register ^ 0xFF ) + 1 ) & 0xFF lrcString = _numToOneByteString ( lrc ) return lrcString
Calculate LRC for Modbus .
80
9
237,836
def _checkMode ( mode ) : if not isinstance ( mode , str ) : raise TypeError ( 'The {0} should be a string. Given: {1!r}' . format ( "mode" , mode ) ) if mode not in [ MODE_RTU , MODE_ASCII ] : raise ValueError ( "Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given." . format ( mode ) )
Check that the Modbus mode is valie .
109
10
237,837
def _checkFunctioncode ( functioncode , listOfAllowedValues = [ ] ) : FUNCTIONCODE_MIN = 1 FUNCTIONCODE_MAX = 127 _checkInt ( functioncode , FUNCTIONCODE_MIN , FUNCTIONCODE_MAX , description = 'functioncode' ) if listOfAllowedValues is None : return if not isinstance ( listOfAllowedValues , list ) : raise TypeError ( 'The listOfAllowedValues should be a list. Given: {0!r}' . format ( listOfAllowedValues ) ) for value in listOfAllowedValues : _checkInt ( value , FUNCTIONCODE_MIN , FUNCTIONCODE_MAX , description = 'functioncode inside listOfAllowedValues' ) if functioncode not in listOfAllowedValues : raise ValueError ( 'Wrong function code: {0}, allowed values are {1!r}' . format ( functioncode , listOfAllowedValues ) )
Check that the given functioncode is in the listOfAllowedValues .
215
15
237,838
def _checkResponseByteCount ( payload ) : POSITION_FOR_GIVEN_NUMBER = 0 NUMBER_OF_BYTES_TO_SKIP = 1 _checkString ( payload , minlength = 1 , description = 'payload' ) givenNumberOfDatabytes = ord ( payload [ POSITION_FOR_GIVEN_NUMBER ] ) countedNumberOfDatabytes = len ( payload ) - NUMBER_OF_BYTES_TO_SKIP if givenNumberOfDatabytes != countedNumberOfDatabytes : errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + ' The data payload is: {3!r}' errortext = errortemplate . format ( givenNumberOfDatabytes , countedNumberOfDatabytes , len ( payload ) , payload ) raise ValueError ( errortext )
Check that the number of bytes as given in the response is correct .
201
14
237,839
def _checkResponseRegisterAddress ( payload , registeraddress ) : _checkString ( payload , minlength = 2 , description = 'payload' ) _checkRegisteraddress ( registeraddress ) BYTERANGE_FOR_STARTADDRESS = slice ( 0 , 2 ) bytesForStartAddress = payload [ BYTERANGE_FOR_STARTADDRESS ] receivedStartAddress = _twoByteStringToNum ( bytesForStartAddress ) if receivedStartAddress != registeraddress : raise ValueError ( 'Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}' . format ( receivedStartAddress , registeraddress , payload ) )
Check that the start adress as given in the response is correct .
146
14
237,840
def _checkResponseNumberOfRegisters ( payload , numberOfRegisters ) : _checkString ( payload , minlength = 4 , description = 'payload' ) _checkInt ( numberOfRegisters , minvalue = 1 , maxvalue = 0xFFFF , description = 'numberOfRegisters' ) BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice ( 2 , 4 ) bytesForNumberOfRegisters = payload [ BYTERANGE_FOR_NUMBER_OF_REGISTERS ] receivedNumberOfWrittenReisters = _twoByteStringToNum ( bytesForNumberOfRegisters ) if receivedNumberOfWrittenReisters != numberOfRegisters : raise ValueError ( 'Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}' . format ( receivedNumberOfWrittenReisters , numberOfRegisters , payload ) )
Check that the number of written registers as given in the response is correct .
198
15
237,841
def _checkResponseWriteData ( payload , writedata ) : _checkString ( payload , minlength = 4 , description = 'payload' ) _checkString ( writedata , minlength = 2 , maxlength = 2 , description = 'writedata' ) BYTERANGE_FOR_WRITEDATA = slice ( 2 , 4 ) receivedWritedata = payload [ BYTERANGE_FOR_WRITEDATA ] if receivedWritedata != writedata : raise ValueError ( 'Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}' . format ( receivedWritedata , writedata , payload ) )
Check that the write data as given in the response is correct .
153
13
237,842
def _checkString ( inputstring , description , minlength = 0 , maxlength = None ) : # Type checking if not isinstance ( description , str ) : raise TypeError ( 'The description should be a string. Given: {0!r}' . format ( description ) ) if not isinstance ( inputstring , str ) : raise TypeError ( 'The {0} should be a string. Given: {1!r}' . format ( description , inputstring ) ) if not isinstance ( maxlength , ( int , type ( None ) ) ) : raise TypeError ( 'The maxlength must be an integer or None. Given: {0!r}' . format ( maxlength ) ) # Check values _checkInt ( minlength , minvalue = 0 , maxvalue = None , description = 'minlength' ) if len ( inputstring ) < minlength : raise ValueError ( 'The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}' . format ( description , len ( inputstring ) , minlength , inputstring ) ) if not maxlength is None : if maxlength < 0 : raise ValueError ( 'The maxlength must be positive. Given: {0}' . format ( maxlength ) ) if maxlength < minlength : raise ValueError ( 'The maxlength must not be smaller than minlength. Given: {0} and {1}' . format ( maxlength , minlength ) ) if len ( inputstring ) > maxlength : raise ValueError ( 'The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}' . format ( description , len ( inputstring ) , maxlength , inputstring ) )
Check that the given string is valid .
376
8
237,843
def _checkInt ( inputvalue , minvalue = None , maxvalue = None , description = 'inputvalue' ) : if not isinstance ( description , str ) : raise TypeError ( 'The description should be a string. Given: {0!r}' . format ( description ) ) if not isinstance ( inputvalue , ( int , long ) ) : raise TypeError ( 'The {0} must be an integer. Given: {1!r}' . format ( description , inputvalue ) ) if not isinstance ( minvalue , ( int , long , type ( None ) ) ) : raise TypeError ( 'The minvalue must be an integer or None. Given: {0!r}' . format ( minvalue ) ) if not isinstance ( maxvalue , ( int , long , type ( None ) ) ) : raise TypeError ( 'The maxvalue must be an integer or None. Given: {0!r}' . format ( maxvalue ) ) _checkNumerical ( inputvalue , minvalue , maxvalue , description )
Check that the given integer is valid .
225
8
237,844
def _checkNumerical ( inputvalue , minvalue = None , maxvalue = None , description = 'inputvalue' ) : # Type checking if not isinstance ( description , str ) : raise TypeError ( 'The description should be a string. Given: {0!r}' . format ( description ) ) if not isinstance ( inputvalue , ( int , long , float ) ) : raise TypeError ( 'The {0} must be numerical. Given: {1!r}' . format ( description , inputvalue ) ) if not isinstance ( minvalue , ( int , float , long , type ( None ) ) ) : raise TypeError ( 'The minvalue must be numeric or None. Given: {0!r}' . format ( minvalue ) ) if not isinstance ( maxvalue , ( int , float , long , type ( None ) ) ) : raise TypeError ( 'The maxvalue must be numeric or None. Given: {0!r}' . format ( maxvalue ) ) # Consistency checking if ( not minvalue is None ) and ( not maxvalue is None ) : if maxvalue < minvalue : raise ValueError ( 'The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.' . format ( maxvalue , minvalue ) ) # Value checking if not minvalue is None : if inputvalue < minvalue : raise ValueError ( 'The {0} is too small: {1}, but minimum value is {2}.' . format ( description , inputvalue , minvalue ) ) if not maxvalue is None : if inputvalue > maxvalue : raise ValueError ( 'The {0} is too large: {1}, but maximum value is {2}.' . format ( description , inputvalue , maxvalue ) )
Check that the given numerical value is valid .
385
9
237,845
def _checkBool ( inputvalue , description = 'inputvalue' ) : _checkString ( description , minlength = 1 , description = 'description string' ) if not isinstance ( inputvalue , bool ) : raise TypeError ( 'The {0} must be boolean. Given: {1!r}' . format ( description , inputvalue ) )
Check that the given inputvalue is a boolean .
76
10
237,846
def _getDiagnosticString ( ) : text = '\n## Diagnostic output from minimalmodbus ## \n\n' text += 'Minimalmodbus version: ' + __version__ + '\n' text += 'Minimalmodbus status: ' + __status__ + '\n' text += 'File name (with relative path): ' + __file__ + '\n' text += 'Full file path: ' + os . path . abspath ( __file__ ) + '\n\n' text += 'pySerial version: ' + serial . VERSION + '\n' text += 'pySerial full file path: ' + os . path . abspath ( serial . __file__ ) + '\n\n' text += 'Platform: ' + sys . platform + '\n' text += 'Filesystem encoding: ' + repr ( sys . getfilesystemencoding ( ) ) + '\n' text += 'Byteorder: ' + sys . byteorder + '\n' text += 'Python version: ' + sys . version + '\n' text += 'Python version info: ' + repr ( sys . version_info ) + '\n' text += 'Python flags: ' + repr ( sys . flags ) + '\n' text += 'Python argv: ' + repr ( sys . argv ) + '\n' text += 'Python prefix: ' + repr ( sys . prefix ) + '\n' text += 'Python exec prefix: ' + repr ( sys . exec_prefix ) + '\n' text += 'Python executable: ' + repr ( sys . executable ) + '\n' try : text += 'Long info: ' + repr ( sys . long_info ) + '\n' except : text += 'Long info: (none)\n' # For Python3 compatibility try : text += 'Float repr style: ' + repr ( sys . float_repr_style ) + '\n\n' except : text += 'Float repr style: (none) \n\n' # For Python 2.6 compatibility text += 'Variable __name__: ' + __name__ + '\n' text += 'Current directory: ' + os . getcwd ( ) + '\n\n' text += 'Python path: \n' text += '\n' . join ( sys . path ) + '\n' text += '\n## End of diagnostic output ## \n' return text
Generate a diagnostic string showing the module version the platform current directory etc .
536
15
237,847
def read_bit ( self , registeraddress , functioncode = 2 ) : _checkFunctioncode ( functioncode , [ 1 , 2 ] ) return self . _genericCommand ( functioncode , registeraddress )
Read one bit from the slave .
43
7
237,848
def write_bit ( self , registeraddress , value , functioncode = 5 ) : _checkFunctioncode ( functioncode , [ 5 , 15 ] ) _checkInt ( value , minvalue = 0 , maxvalue = 1 , description = 'input value' ) self . _genericCommand ( functioncode , registeraddress , value )
Write one bit to the slave .
69
7
237,849
def read_register ( self , registeraddress , numberOfDecimals = 0 , functioncode = 3 , signed = False ) : _checkFunctioncode ( functioncode , [ 3 , 4 ] ) _checkInt ( numberOfDecimals , minvalue = 0 , maxvalue = 10 , description = 'number of decimals' ) _checkBool ( signed , description = 'signed' ) return self . _genericCommand ( functioncode , registeraddress , numberOfDecimals = numberOfDecimals , signed = signed )
Read an integer from one 16 - bit register in the slave possibly scaling it .
114
16
237,850
def write_register ( self , registeraddress , value , numberOfDecimals = 0 , functioncode = 16 , signed = False ) : _checkFunctioncode ( functioncode , [ 6 , 16 ] ) _checkInt ( numberOfDecimals , minvalue = 0 , maxvalue = 10 , description = 'number of decimals' ) _checkBool ( signed , description = 'signed' ) _checkNumerical ( value , description = 'input value' ) self . _genericCommand ( functioncode , registeraddress , value , numberOfDecimals , signed = signed )
Write an integer to one 16 - bit register in the slave possibly scaling it .
126
16
237,851
def read_float ( self , registeraddress , functioncode = 3 , numberOfRegisters = 2 ) : _checkFunctioncode ( functioncode , [ 3 , 4 ] ) _checkInt ( numberOfRegisters , minvalue = 2 , maxvalue = 4 , description = 'number of registers' ) return self . _genericCommand ( functioncode , registeraddress , numberOfRegisters = numberOfRegisters , payloadformat = 'float' )
Read a floating point number from the slave .
94
9
237,852
def write_float ( self , registeraddress , value , numberOfRegisters = 2 ) : _checkNumerical ( value , description = 'input value' ) _checkInt ( numberOfRegisters , minvalue = 2 , maxvalue = 4 , description = 'number of registers' ) self . _genericCommand ( 16 , registeraddress , value , numberOfRegisters = numberOfRegisters , payloadformat = 'float' )
Write a floating point number to the slave .
92
9
237,853
def read_string ( self , registeraddress , numberOfRegisters = 16 , functioncode = 3 ) : _checkFunctioncode ( functioncode , [ 3 , 4 ] ) _checkInt ( numberOfRegisters , minvalue = 1 , description = 'number of registers for read string' ) return self . _genericCommand ( functioncode , registeraddress , numberOfRegisters = numberOfRegisters , payloadformat = 'string' )
Read a string from the slave .
92
7
237,854
def write_string ( self , registeraddress , textstring , numberOfRegisters = 16 ) : _checkInt ( numberOfRegisters , minvalue = 1 , description = 'number of registers for write string' ) _checkString ( textstring , 'input string' , minlength = 1 , maxlength = 2 * numberOfRegisters ) self . _genericCommand ( 16 , registeraddress , textstring , numberOfRegisters = numberOfRegisters , payloadformat = 'string' )
Write a string to the slave .
104
7
237,855
def write_registers ( self , registeraddress , values ) : if not isinstance ( values , list ) : raise TypeError ( 'The "values parameter" must be a list. Given: {0!r}' . format ( values ) ) _checkInt ( len ( values ) , minvalue = 1 , description = 'length of input list' ) # Note: The content of the list is checked at content conversion. self . _genericCommand ( 16 , registeraddress , values , numberOfRegisters = len ( values ) , payloadformat = 'registers' )
Write integers to 16 - bit registers in the slave .
121
11
237,856
def _playsoundWin ( sound , block = True ) : from ctypes import c_buffer , windll from random import random from time import sleep from sys import getfilesystemencoding def winCommand ( * command ) : buf = c_buffer ( 255 ) command = ' ' . join ( command ) . encode ( getfilesystemencoding ( ) ) errorCode = int ( windll . winmm . mciSendStringA ( command , buf , 254 , 0 ) ) if errorCode : errorBuffer = c_buffer ( 255 ) windll . winmm . mciGetErrorStringA ( errorCode , errorBuffer , 254 ) exceptionMessage = ( '\n Error ' + str ( errorCode ) + ' for command:' '\n ' + command . decode ( ) + '\n ' + errorBuffer . value . decode ( ) ) raise PlaysoundException ( exceptionMessage ) return buf . value alias = 'playsound_' + str ( random ( ) ) winCommand ( 'open "' + sound + '" alias' , alias ) winCommand ( 'set' , alias , 'time format milliseconds' ) durationInMS = winCommand ( 'status' , alias , 'length' ) winCommand ( 'play' , alias , 'from 0 to' , durationInMS . decode ( ) ) if block : sleep ( float ( durationInMS ) / 1000.0 )
Utilizes windll . winmm . Tested and known to work with MP3 and WAVE on Windows 7 with Python 2 . 7 . Probably works with more file formats . Probably works on Windows XP thru Windows 10 . Probably works with all versions of Python .
293
54
237,857
def _playsoundOSX ( sound , block = True ) : from AppKit import NSSound from Foundation import NSURL from time import sleep if '://' not in sound : if not sound . startswith ( '/' ) : from os import getcwd sound = getcwd ( ) + '/' + sound sound = 'file://' + sound url = NSURL . URLWithString_ ( sound ) nssound = NSSound . alloc ( ) . initWithContentsOfURL_byReference_ ( url , True ) if not nssound : raise IOError ( 'Unable to load sound named: ' + sound ) nssound . play ( ) if block : sleep ( nssound . duration ( ) )
Utilizes AppKit . NSSound . Tested and known to work with MP3 and WAVE on OS X 10 . 11 with Python 2 . 7 . Probably works with anything QuickTime supports . Probably works on OS X 10 . 5 and newer . Probably works with all versions of Python .
156
61
237,858
def _playsoundNix ( sound , block = True ) : if not block : raise NotImplementedError ( "block=False cannot be used on this platform yet" ) # pathname2url escapes non-URL-safe characters import os try : from urllib . request import pathname2url except ImportError : # python 2 from urllib import pathname2url import gi gi . require_version ( 'Gst' , '1.0' ) from gi . repository import Gst Gst . init ( None ) playbin = Gst . ElementFactory . make ( 'playbin' , 'playbin' ) if sound . startswith ( ( 'http://' , 'https://' ) ) : playbin . props . uri = sound else : playbin . props . uri = 'file://' + pathname2url ( os . path . abspath ( sound ) ) set_result = playbin . set_state ( Gst . State . PLAYING ) if set_result != Gst . StateChangeReturn . ASYNC : raise PlaysoundException ( "playbin.set_state returned " + repr ( set_result ) ) # FIXME: use some other bus method than poll() with block=False # https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html bus = playbin . get_bus ( ) bus . poll ( Gst . MessageType . EOS , Gst . CLOCK_TIME_NONE ) playbin . set_state ( Gst . State . NULL )
Play a sound using GStreamer .
346
7
237,859
def remove_rows_matching ( df , column , match ) : df = df . copy ( ) mask = df [ column ] . values != match return df . iloc [ mask , : ]
Return a DataFrame with rows where column values match match are removed .
42
14
237,860
def remove_rows_containing ( df , column , match ) : df = df . copy ( ) mask = [ match not in str ( v ) for v in df [ column ] . values ] return df . iloc [ mask , : ]
Return a DataFrame with rows where column values containing match are removed .
51
14
237,861
def filter_localization_probability ( df , threshold = 0.75 ) : df = df . copy ( ) localization_probability_mask = df [ 'Localization prob' ] . values >= threshold return df . iloc [ localization_probability_mask , : ]
Remove rows with a localization probability below 0 . 75
62
10
237,862
def minimum_valid_values_in_any_group ( df , levels = None , n = 1 , invalid = np . nan ) : df = df . copy ( ) if levels is None : if 'Group' in df . columns . names : levels = [ df . columns . names . index ( 'Group' ) ] # Filter by at least 7 (values in class:timepoint) at least in at least one group if invalid is np . nan : dfx = ~ np . isnan ( df ) else : dfx = df != invalid dfc = dfx . astype ( int ) . sum ( axis = 1 , level = levels ) dfm = dfc . max ( axis = 1 ) >= n mask = dfm . values return df . iloc [ mask , : ]
Filter DataFrame by at least n valid values in at least one group .
168
15
237,863
def search ( df , match , columns = [ 'Proteins' , 'Protein names' , 'Gene names' ] ) : df = df . copy ( ) dft = df . reset_index ( ) mask = np . zeros ( ( dft . shape [ 0 ] , ) , dtype = bool ) idx = [ 'Proteins' , 'Protein names' , 'Gene names' ] for i in idx : if i in dft . columns : mask = mask | np . array ( [ match in str ( l ) for l in dft [ i ] . values ] ) return df . iloc [ mask ]
Search for a given string in a set of columns in a processed DataFrame .
139
16
237,864
def filter_select_columns_intensity ( df , prefix , columns ) : # Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something. return df . filter ( regex = '^(%s.+|%s)$' % ( prefix , '|' . join ( columns ) ) )
Filter dataframe to include specified columns retaining any Intensity columns .
84
13
237,865
def filter_intensity ( df , label = "" , with_multiplicity = False ) : label += ".*__\d" if with_multiplicity else "" dft = df . filter ( regex = "^(?!Intensity).*$" ) dfi = df . filter ( regex = '^(.*Intensity.*%s.*__\d)$' % label ) return pd . concat ( [ dft , dfi ] , axis = 1 )
Filter to include only the Intensity values with optional specified label excluding other Intensity measurements but retaining all other columns .
101
23
237,866
def filter_ratio ( df , label = "" , with_multiplicity = False ) : label += ".*__\d" if with_multiplicity else "" dft = df . filter ( regex = "^(?!Ratio).*$" ) dfr = df . filter ( regex = '^(.*Ratio.*%s)$' % label ) return pd . concat ( [ dft , dfr ] , axis = 1 )
Filter to include only the Ratio values with optional specified label excluding other Intensity measurements but retaining all other columns .
98
22
237,867
def read_perseus ( f ) : df = pd . read_csv ( f , delimiter = '\t' , header = [ 0 , 1 , 2 , 3 ] , low_memory = False ) df . columns = pd . MultiIndex . from_tuples ( [ ( x , ) for x in df . columns . get_level_values ( 0 ) ] ) return df
Load a Perseus processed data table
85
7
237,868
def write_perseus ( f , df ) : ### Generate the Perseus like type index FIELD_TYPE_MAP = { 'Amino acid' : 'C' , 'Charge' : 'C' , 'Reverse' : 'C' , 'Potential contaminant' : 'C' , 'Multiplicity' : 'C' , 'Localization prob' : 'N' , 'PEP' : 'N' , 'Score' : 'N' , 'Delta score' : 'N' , 'Score for localization' : 'N' , 'Mass error [ppm]' : 'N' , 'Intensity' : 'N' , 'Position' : 'N' , 'Proteins' : 'T' , 'Positions within proteins' : 'T' , 'Leading proteins' : 'T' , 'Protein names' : 'T' , 'Gene names' : 'T' , 'Sequence window' : 'T' , 'Unique identifier' : 'T' , } def map_field_type ( n , c ) : try : t = FIELD_TYPE_MAP [ c ] except : t = "E" # In the first element, add type indicator if n == 0 : t = "#!{Type}%s" % t return t df = df . copy ( ) df . columns = pd . MultiIndex . from_tuples ( [ ( k , map_field_type ( n , k ) ) for n , k in enumerate ( df . columns ) ] , names = [ "Label" , "Type" ] ) df = df . transpose ( ) . reset_index ( ) . transpose ( ) df . to_csv ( f , index = False , header = False )
Export a dataframe to Perseus ; recreating the format
381
12
237,869
def write_phosphopath_ratio ( df , f , a , * args , * * kwargs ) : timepoint_idx = kwargs . get ( 'timepoint_idx' , None ) proteins = [ get_protein_id ( k ) for k in df . index . get_level_values ( 'Proteins' ) ] amino_acids = df . index . get_level_values ( 'Amino acid' ) positions = _get_positions ( df ) multiplicity = [ int ( k [ - 1 ] ) for k in df . index . get_level_values ( 'Multiplicity' ) ] apos = [ "%s%s" % x for x in zip ( amino_acids , positions ) ] phdfs = [ ] # Convert timepoints to 1-based ordinal. tp_map = set ( ) for c in args : tp_map . add ( c [ timepoint_idx ] ) tp_map = sorted ( tp_map ) for c in args : v = df [ a ] . mean ( axis = 1 ) . values / df [ c ] . mean ( axis = 1 ) . values tp = [ 1 + tp_map . index ( c [ timepoint_idx ] ) ] tps = tp * len ( proteins ) if timepoint_idx else [ 1 ] * len ( proteins ) prar = [ "%s-%s-%d-%d" % x for x in zip ( proteins , apos , multiplicity , tps ) ] phdf = pd . DataFrame ( np . array ( list ( zip ( prar , v ) ) ) ) phdf . columns = [ "ID" , "Ratio" ] phdfs . append ( phdf ) pd . concat ( phdfs ) . to_csv ( f , sep = '\t' , index = None )
Write out the data frame ratio between two groups protein - Rsite - multiplicity - timepoint ID Ratio Q13619 - S10 - 1 - 1 0 . 5 Q9H3Z4 - S10 - 1 - 1 0 . 502 Q6GQQ9 - S100 - 1 - 1 0 . 504 Q86YP4 - S100 - 1 - 1 0 . 506 Q9H307 - S100 - 1 - 1 0 . 508 Q8NEY1 - S1000 - 1 - 1 0 . 51 Q13541 - S101 - 1 - 1 0 . 512 O95785 - S1012 - 2 - 1 0 . 514 O95785 - S1017 - 2 - 1 0 . 516 Q9Y4G8 - S1022 - 1 - 1 0 . 518 P35658 - S1023 - 1 - 1 0 . 52
419
187
237,870
def write_r ( df , f , sep = "," , index_join = "@" , columns_join = "." ) : df = df . copy ( ) df . index = [ "@" . join ( [ str ( s ) for s in v ] ) for v in df . index . values ] df . columns = [ "." . join ( [ str ( s ) for s in v ] ) for v in df . index . values ] df . to_csv ( f , sep = sep )
Export dataframe in a format easily importable to R
108
11
237,871
def gaussian ( df , width = 0.3 , downshift = - 1.8 , prefix = None ) : df = df . copy ( ) imputed = df . isnull ( ) # Keep track of what's real if prefix : mask = np . array ( [ l . startswith ( prefix ) for l in df . columns . values ] ) mycols = np . arange ( 0 , df . shape [ 1 ] ) [ mask ] else : mycols = np . arange ( 0 , df . shape [ 1 ] ) if type ( width ) is not list : width = [ width ] * len ( mycols ) elif len ( mycols ) != len ( width ) : raise ValueError ( "Length of iterable 'width' does not match # of columns" ) if type ( downshift ) is not list : downshift = [ downshift ] * len ( mycols ) elif len ( mycols ) != len ( downshift ) : raise ValueError ( "Length of iterable 'downshift' does not match # of columns" ) for i in mycols : data = df . iloc [ : , i ] mask = data . isnull ( ) . values mean = data . mean ( axis = 0 ) stddev = data . std ( axis = 0 ) m = mean + downshift [ i ] * stddev s = stddev * width [ i ] # Generate a list of random numbers for filling in values = np . random . normal ( loc = m , scale = s , size = df . shape [ 0 ] ) # Now fill them in df . iloc [ mask , i ] = values [ mask ] return df , imputed
Impute missing values by drawing from a normal distribution
365
10
237,872
def _pca_scores ( scores , pc1 = 0 , pc2 = 1 , fcol = None , ecol = None , marker = 'o' , markersize = 30 , label_scores = None , show_covariance_ellipse = True , optimize_label_iter = OPTIMIZE_LABEL_ITER_DEFAULT , * * kwargs ) : fig = plt . figure ( figsize = ( 8 , 8 ) ) ax = fig . add_subplot ( 1 , 1 , 1 ) levels = [ 0 , 1 ] for c in set ( scores . columns . values ) : try : data = scores [ c ] . values . reshape ( 2 , - 1 ) except : continue fc = hierarchical_match ( fcol , c , 'k' ) ec = hierarchical_match ( ecol , c ) if ec is None : ec = fc if type ( markersize ) == str : # Use as a key vs. index value in this levels idx = scores . columns . names . index ( markersize ) s = c [ idx ] elif callable ( markersize ) : s = markersize ( c ) else : s = markersize ax . scatter ( data [ pc1 , : ] , data [ pc2 , : ] , s = s , marker = marker , edgecolors = ec , c = fc ) if show_covariance_ellipse and data . shape [ 1 ] > 2 : cov = data [ [ pc1 , pc2 ] , : ] . T ellip = plot_point_cov ( cov , nstd = 2 , linestyle = 'dashed' , linewidth = 0.5 , edgecolor = ec or fc , alpha = 0.8 ) #**kwargs for ellipse styling ax . add_artist ( ellip ) if label_scores : scores_f = scores . iloc [ [ pc1 , pc2 ] ] idxs = get_index_list ( scores_f . columns . names , label_scores ) texts = [ ] for n , ( x , y ) in enumerate ( scores_f . T . values ) : t = ax . text ( x , y , build_combined_label ( scores_f . columns . values [ n ] , idxs , ', ' ) , bbox = dict ( boxstyle = 'round,pad=0.3' , fc = '#ffffff' , ec = 'none' , alpha = 0.6 ) ) texts . append ( t ) if texts and optimize_label_iter : adjust_text ( texts , lim = optimize_label_iter ) ax . set_xlabel ( scores . index [ pc1 ] , fontsize = 16 ) ax . set_ylabel ( scores . index [ pc2 ] , fontsize = 16 ) fig . tight_layout ( ) return ax
Plot a scores plot for two principal components as AxB scatter plot .
623
14
237,873
def modifiedaminoacids ( df , kind = 'pie' ) : colors = [ '#6baed6' , '#c6dbef' , '#bdbdbd' ] total_aas , quants = analysis . modifiedaminoacids ( df ) df = pd . DataFrame ( ) for a , n in quants . items ( ) : df [ a ] = [ n ] df . sort_index ( axis = 1 , inplace = True ) if kind == 'bar' or kind == 'both' : ax1 = df . plot ( kind = 'bar' , figsize = ( 7 , 7 ) , color = colors ) ax1 . set_ylabel ( 'Number of phosphorylated amino acids' ) ax1 . set_xlabel ( 'Amino acid' ) ax1 . set_xticks ( [ ] ) ylim = np . max ( df . values ) + 1000 ax1 . set_ylim ( 0 , ylim ) _bartoplabel ( ax1 , 100 * df . values [ 0 ] , total_aas , ylim ) ax1 . set_xlim ( ( - 0.3 , 0.3 ) ) return ax if kind == 'pie' or kind == 'both' : dfp = df . T residues = dfp . index . values dfp . index = [ "%.2f%% (%d)" % ( 100 * df [ i ] . values [ 0 ] / total_aas , df [ i ] . values [ 0 ] ) for i in dfp . index . values ] ax2 = dfp . plot ( kind = 'pie' , y = 0 , colors = colors ) ax2 . legend ( residues , loc = 'upper left' , bbox_to_anchor = ( 1.0 , 1.0 ) ) ax2 . set_ylabel ( '' ) ax2 . set_xlabel ( '' ) ax2 . figure . set_size_inches ( 6 , 6 ) for t in ax2 . texts : t . set_fontsize ( 15 ) return ax2 return ax1 , ax2
Generate a plot of relative numbers of modified amino acids in source DataFrame .
455
16
237,874
def venn ( df1 , df2 , df3 = None , labels = None , ix1 = None , ix2 = None , ix3 = None , return_intersection = False , fcols = None ) : try : import matplotlib_venn as mplv except ImportError : raise ImportError ( "To plot venn diagrams, install matplotlib-venn package: pip install matplotlib-venn" ) plt . gcf ( ) . clear ( ) if labels is None : labels = [ "A" , "B" , "C" ] s1 = _process_ix ( df1 . index , ix1 ) s2 = _process_ix ( df2 . index , ix2 ) if df3 is not None : s3 = _process_ix ( df3 . index , ix3 ) kwargs = { } if fcols : kwargs [ 'set_colors' ] = [ fcols [ l ] for l in labels ] if df3 is not None : vn = mplv . venn3 ( [ s1 , s2 , s3 ] , set_labels = labels , * * kwargs ) intersection = s1 & s2 & s3 else : vn = mplv . venn2 ( [ s1 , s2 ] , set_labels = labels , * * kwargs ) intersection = s1 & s2 ax = plt . gca ( ) if return_intersection : return ax , list ( intersection ) else : return ax
Plot a 2 or 3 - part venn diagram showing the overlap between 2 or 3 pandas DataFrames .
341
22
237,875
def sitespeptidesproteins ( df , labels = None , colors = None , site_localization_probability = 0.75 ) : fig = plt . figure ( figsize = ( 4 , 6 ) ) ax = fig . add_subplot ( 1 , 1 , 1 ) shift = 0.5 values = analysis . sitespeptidesproteins ( df , site_localization_probability ) if labels is None : labels = [ 'Sites (Class I)' , 'Peptides' , 'Proteins' ] if colors is None : colors = [ '#756bb1' , '#bcbddc' , '#dadaeb' ] for n , ( c , l , v ) in enumerate ( zip ( colors , labels , values ) ) : ax . fill_between ( [ 0 , 1 , 2 ] , np . array ( [ shift , 0 , shift ] ) + n , np . array ( [ 1 + shift , 1 , 1 + shift ] ) + n , color = c , alpha = 0.5 ) ax . text ( 1 , 0.5 + n , "{}\n{:,}" . format ( l , v ) , ha = 'center' , color = 'k' , fontsize = 16 ) ax . set_xticks ( [ ] ) ax . set_yticks ( [ ] ) ax . set_axis_off ( ) return ax
Plot the number of sites peptides and proteins in the dataset .
306
13
237,876
def _areadist ( ax , v , xr , c , bins = 100 , by = None , alpha = 1 , label = None ) : y , x = np . histogram ( v [ ~ np . isnan ( v ) ] , bins ) x = x [ : - 1 ] if by is None : by = np . zeros ( ( bins , ) ) ax . fill_between ( x , y , by , facecolor = c , alpha = alpha , label = label ) return y
Plot the histogram distribution but as an area plot
107
10
237,877
def hierarchical_timecourse ( df , cluster_cols = True , cluster_rows = False , n_col_clusters = False , n_row_clusters = False , fcol = None , z_score = 0 , method = 'ward' , cmap = cm . PuOr_r , return_clusters = False , rdistance_fn = distance . pdist , cdistance_fn = distance . pdist , xlabel = 'Timepoint' , ylabel = 'log$_2$ Fold Change' ) : dfc , row_clusters , row_denD , col_clusters , col_denD , edges = _cluster ( df , cluster_cols = cluster_cols , cluster_rows = cluster_rows , n_col_clusters = n_col_clusters , n_row_clusters = n_row_clusters , z_score = z_score , method = 'ward' , rdistance_fn = rdistance_fn , cdistance_fn = cdistance_fn ) # FIXME: Need to apply a sort function to the DataFrame to order by the clustering # so we can slice the edges. dfh = dfc . iloc [ row_denD [ 'leaves' ] , col_denD [ 'leaves' ] ] dfh = dfh . mean ( axis = 0 , level = [ 0 , 1 ] ) vmax = np . max ( dfh . values ) color = ScalarMappable ( norm = Normalize ( vmin = 0 , vmax = vmax ) , cmap = viridis ) fig = plt . figure ( figsize = ( 12 , 6 ) ) edges = [ 0 ] + edges + [ dfh . shape [ 1 ] ] for n in range ( len ( edges ) - 1 ) : ax = fig . add_subplot ( 2 , 4 , n + 1 ) dfhf = dfh . iloc [ : , edges [ n ] : edges [ n + 1 ] ] xpos = dfhf . index . get_level_values ( 1 ) mv = dfhf . mean ( axis = 1 ) distances = [ distance . euclidean ( mv , dfhf . values [ : , n ] ) for n in range ( dfhf . shape [ 1 ] ) ] colors = [ color . to_rgba ( v ) for v in distances ] order = np . argsort ( distances ) [ : : - 1 ] for y in order : ax . plot ( xpos , dfhf . values [ : , y ] , c = colors [ y ] , alpha = 0.5 , lw = 1 ) # dfhf.index.get_level_values(1), ax . set_xticks ( xpos ) if n > 3 : ax . set_xticklabels ( xpos ) ax . set_xlabel ( xlabel ) else : ax . set_xticklabels ( [ ] ) if n % 4 != 0 : ax . set_yticklabels ( [ ] ) else : ax . set_ylabel ( ylabel ) ax . set_ylim ( ( - 3 , + 3 ) ) fig . subplots_adjust ( hspace = 0.15 , wspace = 0.15 ) if return_clusters : return fig , dfh , edges else : return fig
Hierarchical clustering of samples across timecourse experiment .
728
13
237,878
def subtract_column_median ( df , prefix = 'Intensity ' ) : df = df . copy ( ) df . replace ( [ np . inf , - np . inf ] , np . nan , inplace = True ) mask = [ l . startswith ( prefix ) for l in df . columns . values ] df . iloc [ : , mask ] = df . iloc [ : , mask ] - df . iloc [ : , mask ] . median ( axis = 0 ) return df
Apply column - wise normalisation to expression columns .
107
10
237,879
def get_protein_id_list ( df , level = 0 ) : protein_list = [ ] for s in df . index . get_level_values ( level ) : protein_list . extend ( get_protein_ids ( s ) ) return list ( set ( protein_list ) )
Return a complete list of shortform IDs from a DataFrame
63
12
237,880
def hierarchical_match ( d , k , default = None ) : if d is None : return default if type ( k ) != list and type ( k ) != tuple : k = [ k ] for n , _ in enumerate ( k ) : key = tuple ( k [ 0 : len ( k ) - n ] ) if len ( key ) == 1 : key = key [ 0 ] try : d [ key ] except : pass else : return d [ key ] return default
Match a key against a dict simplifying element at a time
100
12
237,881
def calculate_s0_curve ( s0 , minpval , maxpval , minratio , maxratio , curve_interval = 0.1 ) : mminpval = - np . log10 ( minpval ) mmaxpval = - np . log10 ( maxpval ) maxpval_adjust = mmaxpval - mminpval ax0 = ( s0 + maxpval_adjust * minratio ) / maxpval_adjust edge_offset = ( maxratio - ax0 ) % curve_interval max_x = maxratio - edge_offset if ( max_x > ax0 ) : x = np . arange ( ax0 , max_x , curve_interval ) else : x = np . arange ( max_x , ax0 , curve_interval ) fn = lambda x : 10 ** ( - s0 / ( x - minratio ) - mminpval ) y = fn ( x ) return x , y , fn
Calculate s0 curve for volcano plot .
221
10
237,882
def correlation ( df , rowvar = False ) : # Create a correlation matrix for all correlations # of the columns (filled with na for all values) df = df . copy ( ) maskv = np . ma . masked_where ( np . isnan ( df . values ) , df . values ) cdf = np . ma . corrcoef ( maskv , rowvar = False ) cdf = pd . DataFrame ( np . array ( cdf ) ) cdf . columns = df . columns cdf . index = df . columns cdf = cdf . sort_index ( level = 0 , axis = 1 ) cdf = cdf . sort_index ( level = 0 ) return cdf
Calculate column - wise Pearson correlations using numpy . ma . corrcoef
151
18
237,883
def pca ( df , n_components = 2 , mean_center = False , * * kwargs ) : if not sklearn : assert ( 'This library depends on scikit-learn (sklearn) to perform PCA analysis' ) from sklearn . decomposition import PCA df = df . copy ( ) # We have to zero fill, nan errors in PCA df [ np . isnan ( df ) ] = 0 if mean_center : mean = np . mean ( df . values , axis = 0 ) df = df - mean pca = PCA ( n_components = n_components , * * kwargs ) pca . fit ( df . values . T ) scores = pd . DataFrame ( pca . transform ( df . values . T ) ) . T scores . index = [ 'Principal Component %d (%.2f%%)' % ( ( n + 1 ) , pca . explained_variance_ratio_ [ n ] * 100 ) for n in range ( 0 , scores . shape [ 0 ] ) ] scores . columns = df . columns weights = pd . DataFrame ( pca . components_ ) . T weights . index = df . index weights . columns = [ 'Weights on Principal Component %d' % ( n + 1 ) for n in range ( 0 , weights . shape [ 1 ] ) ] return scores , weights
Principal Component Analysis based on sklearn . decomposition . PCA
300
14
237,884
def plsda ( df , a , b , n_components = 2 , mean_center = False , scale = True , * * kwargs ) : if not sklearn : assert ( 'This library depends on scikit-learn (sklearn) to perform PLS-DA' ) from sklearn . cross_decomposition import PLSRegression df = df . copy ( ) # We have to zero fill, nan errors in PLSRegression df [ np . isnan ( df ) ] = 0 if mean_center : mean = np . mean ( df . values , axis = 0 ) df = df - mean sxa , _ = df . columns . get_loc_level ( a ) sxb , _ = df . columns . get_loc_level ( b ) dfa = df . iloc [ : , sxa ] dfb = df . iloc [ : , sxb ] dff = pd . concat ( [ dfa , dfb ] , axis = 1 ) y = np . ones ( dff . shape [ 1 ] ) y [ np . arange ( dfa . shape [ 1 ] ) ] = 0 plsr = PLSRegression ( n_components = n_components , scale = scale , * * kwargs ) plsr . fit ( dff . values . T , y ) # Apply the generated model to the original data x_scores = plsr . transform ( df . values . T ) scores = pd . DataFrame ( x_scores . T ) scores . index = [ 'Latent Variable %d' % ( n + 1 ) for n in range ( 0 , scores . shape [ 0 ] ) ] scores . columns = df . columns weights = pd . DataFrame ( plsr . x_weights_ ) weights . index = df . index weights . columns = [ 'Weights on Latent Variable %d' % ( n + 1 ) for n in range ( 0 , weights . shape [ 1 ] ) ] loadings = pd . DataFrame ( plsr . x_loadings_ ) loadings . index = df . index loadings . columns = [ 'Loadings on Latent Variable %d' % ( n + 1 ) for n in range ( 0 , loadings . shape [ 1 ] ) ] return scores , weights , loadings
Partial Least Squares Discriminant Analysis based on sklearn . cross_decomposition . PLSRegression
500
25
237,885
def enrichment_from_evidence ( dfe , modification = "Phospho (STY)" ) : dfe = dfe . reset_index ( ) . set_index ( 'Experiment' ) dfe [ 'Modifications' ] = np . array ( [ modification in m for m in dfe [ 'Modifications' ] ] ) dfe = dfe . set_index ( 'Modifications' , append = True ) dfes = dfe . sum ( axis = 0 , level = [ 0 , 1 ] ) . T columns = dfes . sum ( axis = 1 , level = 0 ) . columns total = dfes . sum ( axis = 1 , level = 0 ) . values . flatten ( ) # Total values modified = dfes . iloc [ 0 , dfes . columns . get_level_values ( 'Modifications' ) . values ] . values # Modified enrichment = modified / total return pd . DataFrame ( [ enrichment ] , columns = columns , index = [ '% Enrichment' ] )
Calculate relative enrichment of peptide modifications from evidence . txt .
220
15
237,886
def enrichment_from_msp ( dfmsp , modification = "Phospho (STY)" ) : dfmsp [ 'Modifications' ] = np . array ( [ modification in m for m in dfmsp [ 'Modifications' ] ] ) dfmsp = dfmsp . set_index ( [ 'Modifications' ] ) dfmsp = dfmsp . filter ( regex = 'Intensity ' ) dfmsp [ dfmsp == 0 ] = np . nan df_r = dfmsp . sum ( axis = 0 , level = 0 ) modified = df_r . loc [ True ] . values total = df_r . sum ( axis = 0 ) . values enrichment = modified / total return pd . DataFrame ( [ enrichment ] , columns = dfmsp . columns , index = [ '% Enrichment' ] )
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides . txt .
185
19
237,887
def sitespeptidesproteins ( df , site_localization_probability = 0.75 ) : sites = filters . filter_localization_probability ( df , site_localization_probability ) [ 'Sequence window' ] peptides = set ( df [ 'Sequence window' ] ) proteins = set ( [ str ( p ) . split ( ';' ) [ 0 ] for p in df [ 'Proteins' ] ] ) return len ( sites ) , len ( peptides ) , len ( proteins )
Generate summary count of modified sites peptides and proteins in a processed dataset DataFrame .
118
18
237,888
def modifiedaminoacids ( df ) : amino_acids = list ( df [ 'Amino acid' ] . values ) aas = set ( amino_acids ) quants = { } for aa in aas : quants [ aa ] = amino_acids . count ( aa ) total_aas = len ( amino_acids ) return total_aas , quants
Calculate the number of modified amino acids in supplied DataFrame .
88
14
237,889
def build_index_from_design ( df , design , remove_prefix = None , types = None , axis = 1 , auto_convert_numeric = True , unmatched_columns = 'index' ) : df = df . copy ( ) if 'Label' not in design . index . names : design = design . set_index ( 'Label' ) if remove_prefix is None : remove_prefix = [ ] if type ( remove_prefix ) is str : remove_prefix = [ remove_prefix ] unmatched_for_index = [ ] names = design . columns . values idx_levels = len ( names ) indexes = [ ] # Convert numeric only columns_to_combine; except index if auto_convert_numeric : design = design . apply ( pd . to_numeric , errors = "ignore" ) # The match columns are always strings, so the index must also be design . index = design . index . astype ( str ) # Apply type settings if types : for n , t in types . items ( ) : if n in design . columns . values : design [ n ] = design [ n ] . astype ( t ) # Build the index for lo in df . columns . values : l = copy ( lo ) for s in remove_prefix : l = l . replace ( s , '' ) # Remove trailing/forward spaces l = l . strip ( ) # Convert to numeric if possible l = numeric ( l ) # Attempt to match to the labels try : # Index idx = design . loc [ str ( l ) ] except : if unmatched_columns : unmatched_for_index . append ( lo ) else : # No match, fill with None idx = tuple ( [ None ] * idx_levels ) indexes . append ( idx ) else : # We have a matched row, store it idx = tuple ( idx . values ) indexes . append ( idx ) if axis == 0 : df . index = pd . MultiIndex . from_tuples ( indexes , names = names ) else : # If using unmatched for index, append if unmatched_columns == 'index' : df = df . set_index ( unmatched_for_index , append = True ) elif unmatched_columns == 'drop' : df = df . drop ( unmatched_for_index , axis = 1 ) df . columns = pd . MultiIndex . from_tuples ( indexes , names = names ) df = df . sort_index ( axis = 1 ) return df
Build a MultiIndex from a design table .
535
9
237,890
def build_index_from_labels ( df , indices , remove_prefix = None , types = None , axis = 1 ) : df = df . copy ( ) if remove_prefix is None : remove_prefix = [ ] if types is None : types = { } idx = [ df . index , df . columns ] [ axis ] indexes = [ ] for l in idx . get_level_values ( 0 ) : for s in remove_prefix : l = l . replace ( s + " " , '' ) ixr = [ ] for n , m in indices : m = re . search ( m , l ) if m : r = m . group ( 1 ) if n in types : # Map this value to a new type r = types [ n ] ( r ) else : r = None ixr . append ( r ) indexes . append ( tuple ( ixr ) ) if axis == 0 : df . index = pd . MultiIndex . from_tuples ( indexes , names = [ n for n , _ in indices ] ) else : df . columns = pd . MultiIndex . from_tuples ( indexes , names = [ n for n , _ in indices ] ) return df
Build a MultiIndex from a list of labels and matching regex
259
12
237,891
def combine_expression_columns ( df , columns_to_combine , remove_combined = True ) : df = df . copy ( ) for ca , cb in columns_to_combine : df [ "%s_(x+y)/2_%s" % ( ca , cb ) ] = ( df [ ca ] + df [ cb ] ) / 2 if remove_combined : for ca , cb in columns_to_combine : df . drop ( [ ca , cb ] , inplace = True , axis = 1 ) return df
Combine expression columns calculating the mean for 2 columns
123
10
237,892
def expand_side_table ( df ) : df = df . copy ( ) idx = df . index . names df . reset_index ( inplace = True ) def strip_multiplicity ( df ) : df . columns = [ c [ : - 4 ] for c in df . columns ] return df def strip_multiple ( s ) : for sr in [ '___1' , '___2' , '___3' ] : if s . endswith ( sr ) : s = s [ : - 4 ] return s base = df . filter ( regex = '.*(?<!___\d)$' ) # Remove columns that will match ripped multiplicity columns for c in df . columns . values : if strip_multiple ( c ) != c and strip_multiple ( c ) in list ( base . columns . values ) : base . drop ( strip_multiple ( c ) , axis = 1 , inplace = True ) multi1 = df . filter ( regex = '^.*___1$' ) multi1 = strip_multiplicity ( multi1 ) multi1 [ 'Multiplicity' ] = '___1' multi1 = pd . concat ( [ multi1 , base ] , axis = 1 ) multi2 = df . filter ( regex = '^.*___2$' ) multi2 = strip_multiplicity ( multi2 ) multi2 [ 'Multiplicity' ] = '___2' multi2 = pd . concat ( [ multi2 , base ] , axis = 1 ) multi3 = df . filter ( regex = '^.*___3$' ) multi3 = strip_multiplicity ( multi3 ) multi3 [ 'Multiplicity' ] = '___3' multi3 = pd . concat ( [ multi3 , base ] , axis = 1 ) df = pd . concat ( [ multi1 , multi2 , multi3 ] , axis = 0 ) df [ 'id' ] = [ "%s%s" % ( a , b ) for a , b in zip ( df [ 'id' ] , df [ 'Multiplicity' ] ) ] if idx [ 0 ] is not None : df . set_index ( idx , inplace = True ) return df
Perform equivalent of expand side table in Perseus by folding Multiplicity columns down onto duplicate rows
477
20
237,893
def apply_experimental_design ( df , f , prefix = 'Intensity ' ) : df = df . copy ( ) edt = pd . read_csv ( f , sep = '\t' , header = 0 ) edt . set_index ( 'Experiment' , inplace = True ) new_column_labels = [ ] for l in df . columns . values : try : l = edt . loc [ l . replace ( prefix , '' ) ] [ 'Name' ] except ( IndexError , KeyError ) : pass new_column_labels . append ( l ) df . columns = new_column_labels return df
Load the experimental design template from MaxQuant and use it to apply the label names to the data columns .
141
21
237,894
def transform_expression_columns ( df , fn = np . log2 , prefix = 'Intensity ' ) : df = df . copy ( ) mask = np . array ( [ l . startswith ( prefix ) for l in df . columns . values ] ) df . iloc [ : , mask ] = fn ( df . iloc [ : , mask ] ) df . replace ( [ np . inf , - np . inf ] , np . nan , inplace = True ) return df
Apply transformation to expression columns .
105
6
237,895
def fold_columns_to_rows ( df , levels_from = 2 ) : df = df . copy ( ) df . reset_index ( inplace = True , drop = True ) # Wipe out the current index df = df . T # Build all index combinations a = [ list ( set ( df . index . get_level_values ( i ) ) ) for i in range ( 0 , levels_from ) ] combinations = list ( itertools . product ( * a ) ) names = df . index . names [ : levels_from ] concats = [ ] for c in combinations : try : dfcc = df . loc [ c ] except KeyError : continue else : # Silly pandas if len ( dfcc . shape ) == 1 : continue dfcc . columns = pd . MultiIndex . from_tuples ( [ c ] * dfcc . shape [ 1 ] , names = names ) concats . append ( dfcc ) # Concatenate dfc = pd . concat ( concats , axis = 1 ) dfc . sort_index ( axis = 1 , inplace = True ) # Fix name if collapsed if dfc . index . name is None : dfc . index . name = df . index . names [ - 1 ] return dfc
Take a levels from the columns and fold down into the row index . This destroys the existing index ; existing rows will appear as columns under the new column index
274
31
237,896
def args ( self , args ) : self . _args = args self . _logger . log ( 'debug' , 'Args set to {}' . format ( args ) )
Set additional arguments to be passed to the fitness function
38
10
237,897
def minimize ( self , minimize ) : self . _minimize = minimize self . _logger . log ( 'debug' , 'Minimize set to {}' . format ( minimize ) )
Configures the ABC to minimize fitness function return value or derived score
40
13
237,898
def num_employers ( self , num_employers ) : if num_employers < 2 : self . _logger . log ( 'warn' , 'Two employers are needed: setting to two' ) num_employers = 2 self . _num_employers = num_employers self . _logger . log ( 'debug' , 'Number of employers set to {}' . format ( num_employers ) ) self . _limit = num_employers * len ( self . _value_ranges ) self . _logger . log ( 'debug' , 'Limit set to {}' . format ( self . _limit ) )
Sets the number of employer bees ; at least two are required
139
13
237,899
def processes ( self , processes ) : if self . _processes > 1 : self . _pool . close ( ) self . _pool . join ( ) self . _pool = multiprocessing . Pool ( processes ) else : self . _pool = None self . _logger . log ( 'debug' , 'Number of processes set to {}' . format ( processes ) )
Set the number of concurrent processes the ABC will utilize for fitness function evaluation ; if < = 1 single process is used
81
23