repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_createBitpattern
|
def _createBitpattern(functioncode, value):
"""Create the bit pattern that is used for writing single bits.
This is basically a storage of numerical constants.
Args:
* functioncode (int): can be 5 or 15
* value (int): can be 0 or 1
Returns:
The bit pattern (string).
Raises:
TypeError, ValueError
"""
_checkFunctioncode(functioncode, [5, 15])
_checkInt(value, minvalue=0, maxvalue=1, description='inputvalue')
if functioncode == 5:
if value == 0:
return '\x00\x00'
else:
return '\xff\x00'
elif functioncode == 15:
if value == 0:
return '\x00'
else:
return '\x01'
|
python
|
def _createBitpattern(functioncode, value):
"""Create the bit pattern that is used for writing single bits.
This is basically a storage of numerical constants.
Args:
* functioncode (int): can be 5 or 15
* value (int): can be 0 or 1
Returns:
The bit pattern (string).
Raises:
TypeError, ValueError
"""
_checkFunctioncode(functioncode, [5, 15])
_checkInt(value, minvalue=0, maxvalue=1, description='inputvalue')
if functioncode == 5:
if value == 0:
return '\x00\x00'
else:
return '\xff\x00'
elif functioncode == 15:
if value == 0:
return '\x00'
else:
return '\x01'
|
[
"def",
"_createBitpattern",
"(",
"functioncode",
",",
"value",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"5",
",",
"15",
"]",
")",
"_checkInt",
"(",
"value",
",",
"minvalue",
"=",
"0",
",",
"maxvalue",
"=",
"1",
",",
"description",
"=",
"'inputvalue'",
")",
"if",
"functioncode",
"==",
"5",
":",
"if",
"value",
"==",
"0",
":",
"return",
"'\\x00\\x00'",
"else",
":",
"return",
"'\\xff\\x00'",
"elif",
"functioncode",
"==",
"15",
":",
"if",
"value",
"==",
"0",
":",
"return",
"'\\x00'",
"else",
":",
"return",
"'\\x01'"
] |
Create the bit pattern that is used for writing single bits.
This is basically a storage of numerical constants.
Args:
* functioncode (int): can be 5 or 15
* value (int): can be 0 or 1
Returns:
The bit pattern (string).
Raises:
TypeError, ValueError
|
[
"Create",
"the",
"bit",
"pattern",
"that",
"is",
"used",
"for",
"writing",
"single",
"bits",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1773-L1802
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_twosComplement
|
def _twosComplement(x, bits=16):
"""Calculate the two's complement of an integer.
Then also negative values can be represented by an upper range of positive values.
See https://en.wikipedia.org/wiki/Two%27s_complement
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the two's complement of the input.
Example for bits=8:
==== =======
x returns
==== =======
0 0
1 1
127 127
-128 128
-127 129
-1 255
==== =======
"""
_checkInt(bits, minvalue=0, description='number of bits')
_checkInt(x, description='input')
upperlimit = 2 ** (bits - 1) - 1
lowerlimit = -2 ** (bits - 1)
if x > upperlimit or x < lowerlimit:
raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \
.format(x, lowerlimit, upperlimit, bits))
# Calculate two'2 complement
if x >= 0:
return x
return x + 2 ** bits
|
python
|
def _twosComplement(x, bits=16):
"""Calculate the two's complement of an integer.
Then also negative values can be represented by an upper range of positive values.
See https://en.wikipedia.org/wiki/Two%27s_complement
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the two's complement of the input.
Example for bits=8:
==== =======
x returns
==== =======
0 0
1 1
127 127
-128 128
-127 129
-1 255
==== =======
"""
_checkInt(bits, minvalue=0, description='number of bits')
_checkInt(x, description='input')
upperlimit = 2 ** (bits - 1) - 1
lowerlimit = -2 ** (bits - 1)
if x > upperlimit or x < lowerlimit:
raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \
.format(x, lowerlimit, upperlimit, bits))
# Calculate two'2 complement
if x >= 0:
return x
return x + 2 ** bits
|
[
"def",
"_twosComplement",
"(",
"x",
",",
"bits",
"=",
"16",
")",
":",
"_checkInt",
"(",
"bits",
",",
"minvalue",
"=",
"0",
",",
"description",
"=",
"'number of bits'",
")",
"_checkInt",
"(",
"x",
",",
"description",
"=",
"'input'",
")",
"upperlimit",
"=",
"2",
"**",
"(",
"bits",
"-",
"1",
")",
"-",
"1",
"lowerlimit",
"=",
"-",
"2",
"**",
"(",
"bits",
"-",
"1",
")",
"if",
"x",
">",
"upperlimit",
"or",
"x",
"<",
"lowerlimit",
":",
"raise",
"ValueError",
"(",
"'The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.'",
".",
"format",
"(",
"x",
",",
"lowerlimit",
",",
"upperlimit",
",",
"bits",
")",
")",
"# Calculate two'2 complement",
"if",
"x",
">=",
"0",
":",
"return",
"x",
"return",
"x",
"+",
"2",
"**",
"bits"
] |
Calculate the two's complement of an integer.
Then also negative values can be represented by an upper range of positive values.
See https://en.wikipedia.org/wiki/Two%27s_complement
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the two's complement of the input.
Example for bits=8:
==== =======
x returns
==== =======
0 0
1 1
127 127
-128 128
-127 129
-1 255
==== =======
|
[
"Calculate",
"the",
"two",
"s",
"complement",
"of",
"an",
"integer",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1809-L1847
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_setBitOn
|
def _setBitOn(x, bitNum):
"""Set bit 'bitNum' to True.
Args:
* x (int): The value before.
* bitNum (int): The bit number that should be set to True.
Returns:
The value after setting the bit. This is an integer.
For example:
For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
"""
_checkInt(x, minvalue=0, description='input value')
_checkInt(bitNum, minvalue=0, description='bitnumber')
return x | (1 << bitNum)
|
python
|
def _setBitOn(x, bitNum):
"""Set bit 'bitNum' to True.
Args:
* x (int): The value before.
* bitNum (int): The bit number that should be set to True.
Returns:
The value after setting the bit. This is an integer.
For example:
For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
"""
_checkInt(x, minvalue=0, description='input value')
_checkInt(bitNum, minvalue=0, description='bitnumber')
return x | (1 << bitNum)
|
[
"def",
"_setBitOn",
"(",
"x",
",",
"bitNum",
")",
":",
"_checkInt",
"(",
"x",
",",
"minvalue",
"=",
"0",
",",
"description",
"=",
"'input value'",
")",
"_checkInt",
"(",
"bitNum",
",",
"minvalue",
"=",
"0",
",",
"description",
"=",
"'bitnumber'",
")",
"return",
"x",
"|",
"(",
"1",
"<<",
"bitNum",
")"
] |
Set bit 'bitNum' to True.
Args:
* x (int): The value before.
* bitNum (int): The bit number that should be set to True.
Returns:
The value after setting the bit. This is an integer.
For example:
For x = 4 (dec) = 0100 (bin), setting bit number 0 results in 0101 (bin) = 5 (dec).
|
[
"Set",
"bit",
"bitNum",
"to",
"True",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1893-L1910
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_calculateCrcString
|
def _calculateCrcString(inputstring):
"""Calculate CRC-16 for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the CRC).
Returns:
A two-byte CRC string, where the least significant byte is first.
"""
_checkString(inputstring, description='input CRC string')
# Preload a 16-bit register with ones
register = 0xFFFF
for char in inputstring:
register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF]
return _numToTwoByteString(register, LsbFirst=True)
|
python
|
def _calculateCrcString(inputstring):
"""Calculate CRC-16 for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the CRC).
Returns:
A two-byte CRC string, where the least significant byte is first.
"""
_checkString(inputstring, description='input CRC string')
# Preload a 16-bit register with ones
register = 0xFFFF
for char in inputstring:
register = (register >> 8) ^ _CRC16TABLE[(register ^ ord(char)) & 0xFF]
return _numToTwoByteString(register, LsbFirst=True)
|
[
"def",
"_calculateCrcString",
"(",
"inputstring",
")",
":",
"_checkString",
"(",
"inputstring",
",",
"description",
"=",
"'input CRC string'",
")",
"# Preload a 16-bit register with ones",
"register",
"=",
"0xFFFF",
"for",
"char",
"in",
"inputstring",
":",
"register",
"=",
"(",
"register",
">>",
"8",
")",
"^",
"_CRC16TABLE",
"[",
"(",
"register",
"^",
"ord",
"(",
"char",
")",
")",
"&",
"0xFF",
"]",
"return",
"_numToTwoByteString",
"(",
"register",
",",
"LsbFirst",
"=",
"True",
")"
] |
Calculate CRC-16 for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the CRC).
Returns:
A two-byte CRC string, where the least significant byte is first.
|
[
"Calculate",
"CRC",
"-",
"16",
"for",
"Modbus",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1965-L1983
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_calculateLrcString
|
def _calculateLrcString(inputstring):
"""Calculate LRC for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the beginning
colon and terminating CRLF). It should already be decoded from hex-string.
Returns:
A one-byte LRC bytestring (not encoded to hex-string)
Algorithm from the document 'MODBUS over serial line specification and implementation guide V1.02'.
The LRC is calculated as 8 bits (one byte).
For example a LRC 0110 0001 (bin) = 61 (hex) = 97 (dec) = 'a'. This function will
then return 'a'.
In Modbus ASCII mode, this should be transmitted using two characters. This
example should be transmitted '61', which is a string of length two. This function
does not handle that conversion for transmission.
"""
_checkString(inputstring, description='input LRC string')
register = 0
for character in inputstring:
register += ord(character)
lrc = ((register ^ 0xFF) + 1) & 0xFF
lrcString = _numToOneByteString(lrc)
return lrcString
|
python
|
def _calculateLrcString(inputstring):
"""Calculate LRC for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the beginning
colon and terminating CRLF). It should already be decoded from hex-string.
Returns:
A one-byte LRC bytestring (not encoded to hex-string)
Algorithm from the document 'MODBUS over serial line specification and implementation guide V1.02'.
The LRC is calculated as 8 bits (one byte).
For example a LRC 0110 0001 (bin) = 61 (hex) = 97 (dec) = 'a'. This function will
then return 'a'.
In Modbus ASCII mode, this should be transmitted using two characters. This
example should be transmitted '61', which is a string of length two. This function
does not handle that conversion for transmission.
"""
_checkString(inputstring, description='input LRC string')
register = 0
for character in inputstring:
register += ord(character)
lrc = ((register ^ 0xFF) + 1) & 0xFF
lrcString = _numToOneByteString(lrc)
return lrcString
|
[
"def",
"_calculateLrcString",
"(",
"inputstring",
")",
":",
"_checkString",
"(",
"inputstring",
",",
"description",
"=",
"'input LRC string'",
")",
"register",
"=",
"0",
"for",
"character",
"in",
"inputstring",
":",
"register",
"+=",
"ord",
"(",
"character",
")",
"lrc",
"=",
"(",
"(",
"register",
"^",
"0xFF",
")",
"+",
"1",
")",
"&",
"0xFF",
"lrcString",
"=",
"_numToOneByteString",
"(",
"lrc",
")",
"return",
"lrcString"
] |
Calculate LRC for Modbus.
Args:
inputstring (str): An arbitrary-length message (without the beginning
colon and terminating CRLF). It should already be decoded from hex-string.
Returns:
A one-byte LRC bytestring (not encoded to hex-string)
Algorithm from the document 'MODBUS over serial line specification and implementation guide V1.02'.
The LRC is calculated as 8 bits (one byte).
For example a LRC 0110 0001 (bin) = 61 (hex) = 97 (dec) = 'a'. This function will
then return 'a'.
In Modbus ASCII mode, this should be transmitted using two characters. This
example should be transmitted '61', which is a string of length two. This function
does not handle that conversion for transmission.
|
[
"Calculate",
"LRC",
"for",
"Modbus",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1986-L2016
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkMode
|
def _checkMode(mode):
"""Check that the Modbus mode is valie.
Args:
mode (string): The Modbus mode (MODE_RTU or MODE_ASCII)
Raises:
TypeError, ValueError
"""
if not isinstance(mode, str):
raise TypeError('The {0} should be a string. Given: {1!r}'.format("mode", mode))
if mode not in [MODE_RTU, MODE_ASCII]:
raise ValueError("Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.".format(mode))
|
python
|
def _checkMode(mode):
"""Check that the Modbus mode is valie.
Args:
mode (string): The Modbus mode (MODE_RTU or MODE_ASCII)
Raises:
TypeError, ValueError
"""
if not isinstance(mode, str):
raise TypeError('The {0} should be a string. Given: {1!r}'.format("mode", mode))
if mode not in [MODE_RTU, MODE_ASCII]:
raise ValueError("Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.".format(mode))
|
[
"def",
"_checkMode",
"(",
"mode",
")",
":",
"if",
"not",
"isinstance",
"(",
"mode",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'The {0} should be a string. Given: {1!r}'",
".",
"format",
"(",
"\"mode\"",
",",
"mode",
")",
")",
"if",
"mode",
"not",
"in",
"[",
"MODE_RTU",
",",
"MODE_ASCII",
"]",
":",
"raise",
"ValueError",
"(",
"\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\"",
".",
"format",
"(",
"mode",
")",
")"
] |
Check that the Modbus mode is valie.
Args:
mode (string): The Modbus mode (MODE_RTU or MODE_ASCII)
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"Modbus",
"mode",
"is",
"valie",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2019-L2034
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkFunctioncode
|
def _checkFunctioncode(functioncode, listOfAllowedValues=[]):
"""Check that the given functioncode is in the listOfAllowedValues.
Also verifies that 1 <= function code <= 127.
Args:
* functioncode (int): The function code
* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.
Raises:
TypeError, ValueError
"""
FUNCTIONCODE_MIN = 1
FUNCTIONCODE_MAX = 127
_checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')
if listOfAllowedValues is None:
return
if not isinstance(listOfAllowedValues, list):
raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))
for value in listOfAllowedValues:
_checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')
if functioncode not in listOfAllowedValues:
raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))
|
python
|
def _checkFunctioncode(functioncode, listOfAllowedValues=[]):
"""Check that the given functioncode is in the listOfAllowedValues.
Also verifies that 1 <= function code <= 127.
Args:
* functioncode (int): The function code
* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.
Raises:
TypeError, ValueError
"""
FUNCTIONCODE_MIN = 1
FUNCTIONCODE_MAX = 127
_checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')
if listOfAllowedValues is None:
return
if not isinstance(listOfAllowedValues, list):
raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))
for value in listOfAllowedValues:
_checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')
if functioncode not in listOfAllowedValues:
raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))
|
[
"def",
"_checkFunctioncode",
"(",
"functioncode",
",",
"listOfAllowedValues",
"=",
"[",
"]",
")",
":",
"FUNCTIONCODE_MIN",
"=",
"1",
"FUNCTIONCODE_MAX",
"=",
"127",
"_checkInt",
"(",
"functioncode",
",",
"FUNCTIONCODE_MIN",
",",
"FUNCTIONCODE_MAX",
",",
"description",
"=",
"'functioncode'",
")",
"if",
"listOfAllowedValues",
"is",
"None",
":",
"return",
"if",
"not",
"isinstance",
"(",
"listOfAllowedValues",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'The listOfAllowedValues should be a list. Given: {0!r}'",
".",
"format",
"(",
"listOfAllowedValues",
")",
")",
"for",
"value",
"in",
"listOfAllowedValues",
":",
"_checkInt",
"(",
"value",
",",
"FUNCTIONCODE_MIN",
",",
"FUNCTIONCODE_MAX",
",",
"description",
"=",
"'functioncode inside listOfAllowedValues'",
")",
"if",
"functioncode",
"not",
"in",
"listOfAllowedValues",
":",
"raise",
"ValueError",
"(",
"'Wrong function code: {0}, allowed values are {1!r}'",
".",
"format",
"(",
"functioncode",
",",
"listOfAllowedValues",
")",
")"
] |
Check that the given functioncode is in the listOfAllowedValues.
Also verifies that 1 <= function code <= 127.
Args:
* functioncode (int): The function code
* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"given",
"functioncode",
"is",
"in",
"the",
"listOfAllowedValues",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2037-L2065
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkResponseByteCount
|
def _checkResponseByteCount(payload):
"""Check that the number of bytes as given in the response is correct.
The first byte in the payload indicates the length of the payload (first byte not counted).
Args:
payload (string): The payload
Raises:
TypeError, ValueError
"""
POSITION_FOR_GIVEN_NUMBER = 0
NUMBER_OF_BYTES_TO_SKIP = 1
_checkString(payload, minlength=1, description='payload')
givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])
countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP
if givenNumberOfDatabytes != countedNumberOfDatabytes:
errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \
' The data payload is: {3!r}'
errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)
raise ValueError(errortext)
|
python
|
def _checkResponseByteCount(payload):
"""Check that the number of bytes as given in the response is correct.
The first byte in the payload indicates the length of the payload (first byte not counted).
Args:
payload (string): The payload
Raises:
TypeError, ValueError
"""
POSITION_FOR_GIVEN_NUMBER = 0
NUMBER_OF_BYTES_TO_SKIP = 1
_checkString(payload, minlength=1, description='payload')
givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])
countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP
if givenNumberOfDatabytes != countedNumberOfDatabytes:
errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \
' The data payload is: {3!r}'
errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)
raise ValueError(errortext)
|
[
"def",
"_checkResponseByteCount",
"(",
"payload",
")",
":",
"POSITION_FOR_GIVEN_NUMBER",
"=",
"0",
"NUMBER_OF_BYTES_TO_SKIP",
"=",
"1",
"_checkString",
"(",
"payload",
",",
"minlength",
"=",
"1",
",",
"description",
"=",
"'payload'",
")",
"givenNumberOfDatabytes",
"=",
"ord",
"(",
"payload",
"[",
"POSITION_FOR_GIVEN_NUMBER",
"]",
")",
"countedNumberOfDatabytes",
"=",
"len",
"(",
"payload",
")",
"-",
"NUMBER_OF_BYTES_TO_SKIP",
"if",
"givenNumberOfDatabytes",
"!=",
"countedNumberOfDatabytes",
":",
"errortemplate",
"=",
"'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.'",
"+",
"' The data payload is: {3!r}'",
"errortext",
"=",
"errortemplate",
".",
"format",
"(",
"givenNumberOfDatabytes",
",",
"countedNumberOfDatabytes",
",",
"len",
"(",
"payload",
")",
",",
"payload",
")",
"raise",
"ValueError",
"(",
"errortext",
")"
] |
Check that the number of bytes as given in the response is correct.
The first byte in the payload indicates the length of the payload (first byte not counted).
Args:
payload (string): The payload
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"number",
"of",
"bytes",
"as",
"given",
"in",
"the",
"response",
"is",
"correct",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2100-L2124
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkResponseRegisterAddress
|
def _checkResponseRegisterAddress(payload, registeraddress):
"""Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=2, description='payload')
_checkRegisteraddress(registeraddress)
BYTERANGE_FOR_STARTADDRESS = slice(0, 2)
bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]
receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)
if receivedStartAddress != registeraddress:
raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedStartAddress, registeraddress, payload))
|
python
|
def _checkResponseRegisterAddress(payload, registeraddress):
"""Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=2, description='payload')
_checkRegisteraddress(registeraddress)
BYTERANGE_FOR_STARTADDRESS = slice(0, 2)
bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]
receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)
if receivedStartAddress != registeraddress:
raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedStartAddress, registeraddress, payload))
|
[
"def",
"_checkResponseRegisterAddress",
"(",
"payload",
",",
"registeraddress",
")",
":",
"_checkString",
"(",
"payload",
",",
"minlength",
"=",
"2",
",",
"description",
"=",
"'payload'",
")",
"_checkRegisteraddress",
"(",
"registeraddress",
")",
"BYTERANGE_FOR_STARTADDRESS",
"=",
"slice",
"(",
"0",
",",
"2",
")",
"bytesForStartAddress",
"=",
"payload",
"[",
"BYTERANGE_FOR_STARTADDRESS",
"]",
"receivedStartAddress",
"=",
"_twoByteStringToNum",
"(",
"bytesForStartAddress",
")",
"if",
"receivedStartAddress",
"!=",
"registeraddress",
":",
"raise",
"ValueError",
"(",
"'Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'",
".",
"format",
"(",
"receivedStartAddress",
",",
"registeraddress",
",",
"payload",
")",
")"
] |
Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"start",
"adress",
"as",
"given",
"in",
"the",
"response",
"is",
"correct",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2127-L2150
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkResponseNumberOfRegisters
|
def _checkResponseNumberOfRegisters(payload, numberOfRegisters):
"""Check that the number of written registers as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the value.
Args:
* payload (string): The payload
* numberOfRegisters (int): Number of registers that have been written
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=4, description='payload')
_checkInt(numberOfRegisters, minvalue=1, maxvalue=0xFFFF, description='numberOfRegisters')
BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice(2, 4)
bytesForNumberOfRegisters = payload[BYTERANGE_FOR_NUMBER_OF_REGISTERS]
receivedNumberOfWrittenReisters = _twoByteStringToNum(bytesForNumberOfRegisters)
if receivedNumberOfWrittenReisters != numberOfRegisters:
raise ValueError('Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedNumberOfWrittenReisters, numberOfRegisters, payload))
|
python
|
def _checkResponseNumberOfRegisters(payload, numberOfRegisters):
"""Check that the number of written registers as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the value.
Args:
* payload (string): The payload
* numberOfRegisters (int): Number of registers that have been written
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=4, description='payload')
_checkInt(numberOfRegisters, minvalue=1, maxvalue=0xFFFF, description='numberOfRegisters')
BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice(2, 4)
bytesForNumberOfRegisters = payload[BYTERANGE_FOR_NUMBER_OF_REGISTERS]
receivedNumberOfWrittenReisters = _twoByteStringToNum(bytesForNumberOfRegisters)
if receivedNumberOfWrittenReisters != numberOfRegisters:
raise ValueError('Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedNumberOfWrittenReisters, numberOfRegisters, payload))
|
[
"def",
"_checkResponseNumberOfRegisters",
"(",
"payload",
",",
"numberOfRegisters",
")",
":",
"_checkString",
"(",
"payload",
",",
"minlength",
"=",
"4",
",",
"description",
"=",
"'payload'",
")",
"_checkInt",
"(",
"numberOfRegisters",
",",
"minvalue",
"=",
"1",
",",
"maxvalue",
"=",
"0xFFFF",
",",
"description",
"=",
"'numberOfRegisters'",
")",
"BYTERANGE_FOR_NUMBER_OF_REGISTERS",
"=",
"slice",
"(",
"2",
",",
"4",
")",
"bytesForNumberOfRegisters",
"=",
"payload",
"[",
"BYTERANGE_FOR_NUMBER_OF_REGISTERS",
"]",
"receivedNumberOfWrittenReisters",
"=",
"_twoByteStringToNum",
"(",
"bytesForNumberOfRegisters",
")",
"if",
"receivedNumberOfWrittenReisters",
"!=",
"numberOfRegisters",
":",
"raise",
"ValueError",
"(",
"'Wrong number of registers to write in the response: {0}, but commanded is {1}. The data payload is: {2!r}'",
".",
"format",
"(",
"receivedNumberOfWrittenReisters",
",",
"numberOfRegisters",
",",
"payload",
")",
")"
] |
Check that the number of written registers as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the value.
Args:
* payload (string): The payload
* numberOfRegisters (int): Number of registers that have been written
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"number",
"of",
"written",
"registers",
"as",
"given",
"in",
"the",
"response",
"is",
"correct",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2153-L2176
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkResponseWriteData
|
def _checkResponseWriteData(payload, writedata):
"""Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=4, description='payload')
_checkString(writedata, minlength=2, maxlength=2, description='writedata')
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedata != writedata:
raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \
receivedWritedata, writedata, payload))
|
python
|
def _checkResponseWriteData(payload, writedata):
"""Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=4, description='payload')
_checkString(writedata, minlength=2, maxlength=2, description='writedata')
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedata != writedata:
raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \
receivedWritedata, writedata, payload))
|
[
"def",
"_checkResponseWriteData",
"(",
"payload",
",",
"writedata",
")",
":",
"_checkString",
"(",
"payload",
",",
"minlength",
"=",
"4",
",",
"description",
"=",
"'payload'",
")",
"_checkString",
"(",
"writedata",
",",
"minlength",
"=",
"2",
",",
"maxlength",
"=",
"2",
",",
"description",
"=",
"'writedata'",
")",
"BYTERANGE_FOR_WRITEDATA",
"=",
"slice",
"(",
"2",
",",
"4",
")",
"receivedWritedata",
"=",
"payload",
"[",
"BYTERANGE_FOR_WRITEDATA",
"]",
"if",
"receivedWritedata",
"!=",
"writedata",
":",
"raise",
"ValueError",
"(",
"'Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'",
".",
"format",
"(",
"receivedWritedata",
",",
"writedata",
",",
"payload",
")",
")"
] |
Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"write",
"data",
"as",
"given",
"in",
"the",
"response",
"is",
"correct",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2179-L2201
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkString
|
def _checkString(inputstring, description, minlength=0, maxlength=None):
"""Check that the given string is valid.
Args:
* inputstring (string): The string to be checked
* description (string): Used in error messages for the checked inputstring
* minlength (int): Minimum length of the string
* maxlength (int or None): Maximum length of the string
Raises:
TypeError, ValueError
Uses the function :func:`_checkInt` internally.
"""
# Type checking
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputstring, str):
raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))
if not isinstance(maxlength, (int, type(None))):
raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))
# Check values
_checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')
if len(inputstring) < minlength:
raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), minlength, inputstring))
if not maxlength is None:
if maxlength < 0:
raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))
if maxlength < minlength:
raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format( \
maxlength, minlength))
if len(inputstring) > maxlength:
raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), maxlength, inputstring))
|
python
|
def _checkString(inputstring, description, minlength=0, maxlength=None):
"""Check that the given string is valid.
Args:
* inputstring (string): The string to be checked
* description (string): Used in error messages for the checked inputstring
* minlength (int): Minimum length of the string
* maxlength (int or None): Maximum length of the string
Raises:
TypeError, ValueError
Uses the function :func:`_checkInt` internally.
"""
# Type checking
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputstring, str):
raise TypeError('The {0} should be a string. Given: {1!r}'.format(description, inputstring))
if not isinstance(maxlength, (int, type(None))):
raise TypeError('The maxlength must be an integer or None. Given: {0!r}'.format(maxlength))
# Check values
_checkInt(minlength, minvalue=0, maxvalue=None, description='minlength')
if len(inputstring) < minlength:
raise ValueError('The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), minlength, inputstring))
if not maxlength is None:
if maxlength < 0:
raise ValueError('The maxlength must be positive. Given: {0}'.format(maxlength))
if maxlength < minlength:
raise ValueError('The maxlength must not be smaller than minlength. Given: {0} and {1}'.format( \
maxlength, minlength))
if len(inputstring) > maxlength:
raise ValueError('The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'.format( \
description, len(inputstring), maxlength, inputstring))
|
[
"def",
"_checkString",
"(",
"inputstring",
",",
"description",
",",
"minlength",
"=",
"0",
",",
"maxlength",
"=",
"None",
")",
":",
"# Type checking",
"if",
"not",
"isinstance",
"(",
"description",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'The description should be a string. Given: {0!r}'",
".",
"format",
"(",
"description",
")",
")",
"if",
"not",
"isinstance",
"(",
"inputstring",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'The {0} should be a string. Given: {1!r}'",
".",
"format",
"(",
"description",
",",
"inputstring",
")",
")",
"if",
"not",
"isinstance",
"(",
"maxlength",
",",
"(",
"int",
",",
"type",
"(",
"None",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The maxlength must be an integer or None. Given: {0!r}'",
".",
"format",
"(",
"maxlength",
")",
")",
"# Check values",
"_checkInt",
"(",
"minlength",
",",
"minvalue",
"=",
"0",
",",
"maxvalue",
"=",
"None",
",",
"description",
"=",
"'minlength'",
")",
"if",
"len",
"(",
"inputstring",
")",
"<",
"minlength",
":",
"raise",
"ValueError",
"(",
"'The {0} is too short: {1}, but minimum value is {2}. Given: {3!r}'",
".",
"format",
"(",
"description",
",",
"len",
"(",
"inputstring",
")",
",",
"minlength",
",",
"inputstring",
")",
")",
"if",
"not",
"maxlength",
"is",
"None",
":",
"if",
"maxlength",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'The maxlength must be positive. Given: {0}'",
".",
"format",
"(",
"maxlength",
")",
")",
"if",
"maxlength",
"<",
"minlength",
":",
"raise",
"ValueError",
"(",
"'The maxlength must not be smaller than minlength. Given: {0} and {1}'",
".",
"format",
"(",
"maxlength",
",",
"minlength",
")",
")",
"if",
"len",
"(",
"inputstring",
")",
">",
"maxlength",
":",
"raise",
"ValueError",
"(",
"'The {0} is too long: {1}, but maximum value is {2}. Given: {3!r}'",
".",
"format",
"(",
"description",
",",
"len",
"(",
"inputstring",
")",
",",
"maxlength",
",",
"inputstring",
")",
")"
] |
Check that the given string is valid.
Args:
* inputstring (string): The string to be checked
* description (string): Used in error messages for the checked inputstring
* minlength (int): Minimum length of the string
* maxlength (int or None): Maximum length of the string
Raises:
TypeError, ValueError
Uses the function :func:`_checkInt` internally.
|
[
"Check",
"that",
"the",
"given",
"string",
"is",
"valid",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2204-L2246
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkInt
|
def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
"""Check that the given integer is valid.
Args:
* inputvalue (int or long): The integer to be checked
* minvalue (int or long, or None): Minimum value of the integer
* maxvalue (int or long, or None): Maximum value of the integer
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as that function uses this function internally.
"""
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputvalue, (int, long)):
raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))
if not isinstance(minvalue, (int, long, type(None))):
raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))
if not isinstance(maxvalue, (int, long, type(None))):
raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))
_checkNumerical(inputvalue, minvalue, maxvalue, description)
|
python
|
def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
"""Check that the given integer is valid.
Args:
* inputvalue (int or long): The integer to be checked
* minvalue (int or long, or None): Minimum value of the integer
* maxvalue (int or long, or None): Maximum value of the integer
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as that function uses this function internally.
"""
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputvalue, (int, long)):
raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))
if not isinstance(minvalue, (int, long, type(None))):
raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))
if not isinstance(maxvalue, (int, long, type(None))):
raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))
_checkNumerical(inputvalue, minvalue, maxvalue, description)
|
[
"def",
"_checkInt",
"(",
"inputvalue",
",",
"minvalue",
"=",
"None",
",",
"maxvalue",
"=",
"None",
",",
"description",
"=",
"'inputvalue'",
")",
":",
"if",
"not",
"isinstance",
"(",
"description",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'The description should be a string. Given: {0!r}'",
".",
"format",
"(",
"description",
")",
")",
"if",
"not",
"isinstance",
"(",
"inputvalue",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The {0} must be an integer. Given: {1!r}'",
".",
"format",
"(",
"description",
",",
"inputvalue",
")",
")",
"if",
"not",
"isinstance",
"(",
"minvalue",
",",
"(",
"int",
",",
"long",
",",
"type",
"(",
"None",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The minvalue must be an integer or None. Given: {0!r}'",
".",
"format",
"(",
"minvalue",
")",
")",
"if",
"not",
"isinstance",
"(",
"maxvalue",
",",
"(",
"int",
",",
"long",
",",
"type",
"(",
"None",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The maxvalue must be an integer or None. Given: {0!r}'",
".",
"format",
"(",
"maxvalue",
")",
")",
"_checkNumerical",
"(",
"inputvalue",
",",
"minvalue",
",",
"maxvalue",
",",
"description",
")"
] |
Check that the given integer is valid.
Args:
* inputvalue (int or long): The integer to be checked
* minvalue (int or long, or None): Minimum value of the integer
* maxvalue (int or long, or None): Maximum value of the integer
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as that function uses this function internally.
|
[
"Check",
"that",
"the",
"given",
"integer",
"is",
"valid",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2249-L2276
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkNumerical
|
def _checkNumerical(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
"""Check that the given numerical value is valid.
Args:
* inputvalue (numerical): The value to be checked.
* minvalue (numerical): Minimum value Use None to skip this part of the test.
* maxvalue (numerical): Maximum value. Use None to skip this part of the test.
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as it uses this function internally.
"""
# Type checking
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputvalue, (int, long, float)):
raise TypeError('The {0} must be numerical. Given: {1!r}'.format(description, inputvalue))
if not isinstance(minvalue, (int, float, long, type(None))):
raise TypeError('The minvalue must be numeric or None. Given: {0!r}'.format(minvalue))
if not isinstance(maxvalue, (int, float, long, type(None))):
raise TypeError('The maxvalue must be numeric or None. Given: {0!r}'.format(maxvalue))
# Consistency checking
if (not minvalue is None) and (not maxvalue is None):
if maxvalue < minvalue:
raise ValueError('The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'.format( \
maxvalue, minvalue))
# Value checking
if not minvalue is None:
if inputvalue < minvalue:
raise ValueError('The {0} is too small: {1}, but minimum value is {2}.'.format( \
description, inputvalue, minvalue))
if not maxvalue is None:
if inputvalue > maxvalue:
raise ValueError('The {0} is too large: {1}, but maximum value is {2}.'.format( \
description, inputvalue, maxvalue))
|
python
|
def _checkNumerical(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
"""Check that the given numerical value is valid.
Args:
* inputvalue (numerical): The value to be checked.
* minvalue (numerical): Minimum value Use None to skip this part of the test.
* maxvalue (numerical): Maximum value. Use None to skip this part of the test.
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as it uses this function internally.
"""
# Type checking
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputvalue, (int, long, float)):
raise TypeError('The {0} must be numerical. Given: {1!r}'.format(description, inputvalue))
if not isinstance(minvalue, (int, float, long, type(None))):
raise TypeError('The minvalue must be numeric or None. Given: {0!r}'.format(minvalue))
if not isinstance(maxvalue, (int, float, long, type(None))):
raise TypeError('The maxvalue must be numeric or None. Given: {0!r}'.format(maxvalue))
# Consistency checking
if (not minvalue is None) and (not maxvalue is None):
if maxvalue < minvalue:
raise ValueError('The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'.format( \
maxvalue, minvalue))
# Value checking
if not minvalue is None:
if inputvalue < minvalue:
raise ValueError('The {0} is too small: {1}, but minimum value is {2}.'.format( \
description, inputvalue, minvalue))
if not maxvalue is None:
if inputvalue > maxvalue:
raise ValueError('The {0} is too large: {1}, but maximum value is {2}.'.format( \
description, inputvalue, maxvalue))
|
[
"def",
"_checkNumerical",
"(",
"inputvalue",
",",
"minvalue",
"=",
"None",
",",
"maxvalue",
"=",
"None",
",",
"description",
"=",
"'inputvalue'",
")",
":",
"# Type checking",
"if",
"not",
"isinstance",
"(",
"description",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'The description should be a string. Given: {0!r}'",
".",
"format",
"(",
"description",
")",
")",
"if",
"not",
"isinstance",
"(",
"inputvalue",
",",
"(",
"int",
",",
"long",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The {0} must be numerical. Given: {1!r}'",
".",
"format",
"(",
"description",
",",
"inputvalue",
")",
")",
"if",
"not",
"isinstance",
"(",
"minvalue",
",",
"(",
"int",
",",
"float",
",",
"long",
",",
"type",
"(",
"None",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The minvalue must be numeric or None. Given: {0!r}'",
".",
"format",
"(",
"minvalue",
")",
")",
"if",
"not",
"isinstance",
"(",
"maxvalue",
",",
"(",
"int",
",",
"float",
",",
"long",
",",
"type",
"(",
"None",
")",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The maxvalue must be numeric or None. Given: {0!r}'",
".",
"format",
"(",
"maxvalue",
")",
")",
"# Consistency checking",
"if",
"(",
"not",
"minvalue",
"is",
"None",
")",
"and",
"(",
"not",
"maxvalue",
"is",
"None",
")",
":",
"if",
"maxvalue",
"<",
"minvalue",
":",
"raise",
"ValueError",
"(",
"'The maxvalue must not be smaller than minvalue. Given: {0} and {1}, respectively.'",
".",
"format",
"(",
"maxvalue",
",",
"minvalue",
")",
")",
"# Value checking",
"if",
"not",
"minvalue",
"is",
"None",
":",
"if",
"inputvalue",
"<",
"minvalue",
":",
"raise",
"ValueError",
"(",
"'The {0} is too small: {1}, but minimum value is {2}.'",
".",
"format",
"(",
"description",
",",
"inputvalue",
",",
"minvalue",
")",
")",
"if",
"not",
"maxvalue",
"is",
"None",
":",
"if",
"inputvalue",
">",
"maxvalue",
":",
"raise",
"ValueError",
"(",
"'The {0} is too large: {1}, but maximum value is {2}.'",
".",
"format",
"(",
"description",
",",
"inputvalue",
",",
"maxvalue",
")",
")"
] |
Check that the given numerical value is valid.
Args:
* inputvalue (numerical): The value to be checked.
* minvalue (numerical): Minimum value Use None to skip this part of the test.
* maxvalue (numerical): Maximum value. Use None to skip this part of the test.
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as it uses this function internally.
|
[
"Check",
"that",
"the",
"given",
"numerical",
"value",
"is",
"valid",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2279-L2322
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_checkBool
|
def _checkBool(inputvalue, description='inputvalue'):
"""Check that the given inputvalue is a boolean.
Args:
* inputvalue (boolean): The value to be checked.
* description (string): Used in error messages for the checked inputvalue.
Raises:
TypeError, ValueError
"""
_checkString(description, minlength=1, description='description string')
if not isinstance(inputvalue, bool):
raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))
|
python
|
def _checkBool(inputvalue, description='inputvalue'):
"""Check that the given inputvalue is a boolean.
Args:
* inputvalue (boolean): The value to be checked.
* description (string): Used in error messages for the checked inputvalue.
Raises:
TypeError, ValueError
"""
_checkString(description, minlength=1, description='description string')
if not isinstance(inputvalue, bool):
raise TypeError('The {0} must be boolean. Given: {1!r}'.format(description, inputvalue))
|
[
"def",
"_checkBool",
"(",
"inputvalue",
",",
"description",
"=",
"'inputvalue'",
")",
":",
"_checkString",
"(",
"description",
",",
"minlength",
"=",
"1",
",",
"description",
"=",
"'description string'",
")",
"if",
"not",
"isinstance",
"(",
"inputvalue",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"'The {0} must be boolean. Given: {1!r}'",
".",
"format",
"(",
"description",
",",
"inputvalue",
")",
")"
] |
Check that the given inputvalue is a boolean.
Args:
* inputvalue (boolean): The value to be checked.
* description (string): Used in error messages for the checked inputvalue.
Raises:
TypeError, ValueError
|
[
"Check",
"that",
"the",
"given",
"inputvalue",
"is",
"a",
"boolean",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2325-L2338
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
_getDiagnosticString
|
def _getDiagnosticString():
"""Generate a diagnostic string, showing the module version, the platform, current directory etc.
Returns:
A descriptive string.
"""
text = '\n## Diagnostic output from minimalmodbus ## \n\n'
text += 'Minimalmodbus version: ' + __version__ + '\n'
text += 'Minimalmodbus status: ' + __status__ + '\n'
text += 'File name (with relative path): ' + __file__ + '\n'
text += 'Full file path: ' + os.path.abspath(__file__) + '\n\n'
text += 'pySerial version: ' + serial.VERSION + '\n'
text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\n\n'
text += 'Platform: ' + sys.platform + '\n'
text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\n'
text += 'Byteorder: ' + sys.byteorder + '\n'
text += 'Python version: ' + sys.version + '\n'
text += 'Python version info: ' + repr(sys.version_info) + '\n'
text += 'Python flags: ' + repr(sys.flags) + '\n'
text += 'Python argv: ' + repr(sys.argv) + '\n'
text += 'Python prefix: ' + repr(sys.prefix) + '\n'
text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\n'
text += 'Python executable: ' + repr(sys.executable) + '\n'
try:
text += 'Long info: ' + repr(sys.long_info) + '\n'
except:
text += 'Long info: (none)\n' # For Python3 compatibility
try:
text += 'Float repr style: ' + repr(sys.float_repr_style) + '\n\n'
except:
text += 'Float repr style: (none) \n\n' # For Python 2.6 compatibility
text += 'Variable __name__: ' + __name__ + '\n'
text += 'Current directory: ' + os.getcwd() + '\n\n'
text += 'Python path: \n'
text += '\n'.join(sys.path) + '\n'
text += '\n## End of diagnostic output ## \n'
return text
|
python
|
def _getDiagnosticString():
"""Generate a diagnostic string, showing the module version, the platform, current directory etc.
Returns:
A descriptive string.
"""
text = '\n## Diagnostic output from minimalmodbus ## \n\n'
text += 'Minimalmodbus version: ' + __version__ + '\n'
text += 'Minimalmodbus status: ' + __status__ + '\n'
text += 'File name (with relative path): ' + __file__ + '\n'
text += 'Full file path: ' + os.path.abspath(__file__) + '\n\n'
text += 'pySerial version: ' + serial.VERSION + '\n'
text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\n\n'
text += 'Platform: ' + sys.platform + '\n'
text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\n'
text += 'Byteorder: ' + sys.byteorder + '\n'
text += 'Python version: ' + sys.version + '\n'
text += 'Python version info: ' + repr(sys.version_info) + '\n'
text += 'Python flags: ' + repr(sys.flags) + '\n'
text += 'Python argv: ' + repr(sys.argv) + '\n'
text += 'Python prefix: ' + repr(sys.prefix) + '\n'
text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\n'
text += 'Python executable: ' + repr(sys.executable) + '\n'
try:
text += 'Long info: ' + repr(sys.long_info) + '\n'
except:
text += 'Long info: (none)\n' # For Python3 compatibility
try:
text += 'Float repr style: ' + repr(sys.float_repr_style) + '\n\n'
except:
text += 'Float repr style: (none) \n\n' # For Python 2.6 compatibility
text += 'Variable __name__: ' + __name__ + '\n'
text += 'Current directory: ' + os.getcwd() + '\n\n'
text += 'Python path: \n'
text += '\n'.join(sys.path) + '\n'
text += '\n## End of diagnostic output ## \n'
return text
|
[
"def",
"_getDiagnosticString",
"(",
")",
":",
"text",
"=",
"'\\n## Diagnostic output from minimalmodbus ## \\n\\n'",
"text",
"+=",
"'Minimalmodbus version: '",
"+",
"__version__",
"+",
"'\\n'",
"text",
"+=",
"'Minimalmodbus status: '",
"+",
"__status__",
"+",
"'\\n'",
"text",
"+=",
"'File name (with relative path): '",
"+",
"__file__",
"+",
"'\\n'",
"text",
"+=",
"'Full file path: '",
"+",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
"+",
"'\\n\\n'",
"text",
"+=",
"'pySerial version: '",
"+",
"serial",
".",
"VERSION",
"+",
"'\\n'",
"text",
"+=",
"'pySerial full file path: '",
"+",
"os",
".",
"path",
".",
"abspath",
"(",
"serial",
".",
"__file__",
")",
"+",
"'\\n\\n'",
"text",
"+=",
"'Platform: '",
"+",
"sys",
".",
"platform",
"+",
"'\\n'",
"text",
"+=",
"'Filesystem encoding: '",
"+",
"repr",
"(",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"+",
"'\\n'",
"text",
"+=",
"'Byteorder: '",
"+",
"sys",
".",
"byteorder",
"+",
"'\\n'",
"text",
"+=",
"'Python version: '",
"+",
"sys",
".",
"version",
"+",
"'\\n'",
"text",
"+=",
"'Python version info: '",
"+",
"repr",
"(",
"sys",
".",
"version_info",
")",
"+",
"'\\n'",
"text",
"+=",
"'Python flags: '",
"+",
"repr",
"(",
"sys",
".",
"flags",
")",
"+",
"'\\n'",
"text",
"+=",
"'Python argv: '",
"+",
"repr",
"(",
"sys",
".",
"argv",
")",
"+",
"'\\n'",
"text",
"+=",
"'Python prefix: '",
"+",
"repr",
"(",
"sys",
".",
"prefix",
")",
"+",
"'\\n'",
"text",
"+=",
"'Python exec prefix: '",
"+",
"repr",
"(",
"sys",
".",
"exec_prefix",
")",
"+",
"'\\n'",
"text",
"+=",
"'Python executable: '",
"+",
"repr",
"(",
"sys",
".",
"executable",
")",
"+",
"'\\n'",
"try",
":",
"text",
"+=",
"'Long info: '",
"+",
"repr",
"(",
"sys",
".",
"long_info",
")",
"+",
"'\\n'",
"except",
":",
"text",
"+=",
"'Long info: (none)\\n'",
"# For Python3 compatibility",
"try",
":",
"text",
"+=",
"'Float repr style: '",
"+",
"repr",
"(",
"sys",
".",
"float_repr_style",
")",
"+",
"'\\n\\n'",
"except",
":",
"text",
"+=",
"'Float repr style: (none) \\n\\n'",
"# For Python 2.6 compatibility",
"text",
"+=",
"'Variable __name__: '",
"+",
"__name__",
"+",
"'\\n'",
"text",
"+=",
"'Current directory: '",
"+",
"os",
".",
"getcwd",
"(",
")",
"+",
"'\\n\\n'",
"text",
"+=",
"'Python path: \\n'",
"text",
"+=",
"'\\n'",
".",
"join",
"(",
"sys",
".",
"path",
")",
"+",
"'\\n'",
"text",
"+=",
"'\\n## End of diagnostic output ## \\n'",
"return",
"text"
] |
Generate a diagnostic string, showing the module version, the platform, current directory etc.
Returns:
A descriptive string.
|
[
"Generate",
"a",
"diagnostic",
"string",
"showing",
"the",
"module",
"version",
"the",
"platform",
"current",
"directory",
"etc",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L2513-L2550
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.read_bit
|
def read_bit(self, registeraddress, functioncode=2):
"""Read one bit from the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 1 or 2.
Returns:
The bit value 0 or 1 (int).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [1, 2])
return self._genericCommand(functioncode, registeraddress)
|
python
|
def read_bit(self, registeraddress, functioncode=2):
"""Read one bit from the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 1 or 2.
Returns:
The bit value 0 or 1 (int).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [1, 2])
return self._genericCommand(functioncode, registeraddress)
|
[
"def",
"read_bit",
"(",
"self",
",",
"registeraddress",
",",
"functioncode",
"=",
"2",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"1",
",",
"2",
"]",
")",
"return",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
")"
] |
Read one bit from the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 1 or 2.
Returns:
The bit value 0 or 1 (int).
Raises:
ValueError, TypeError, IOError
|
[
"Read",
"one",
"bit",
"from",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L178-L193
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.write_bit
|
def write_bit(self, registeraddress, value, functioncode=5):
"""Write one bit to the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int): 0 or 1
* functioncode (int): Modbus function code. Can be 5 or 15.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [5, 15])
_checkInt(value, minvalue=0, maxvalue=1, description='input value')
self._genericCommand(functioncode, registeraddress, value)
|
python
|
def write_bit(self, registeraddress, value, functioncode=5):
"""Write one bit to the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int): 0 or 1
* functioncode (int): Modbus function code. Can be 5 or 15.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [5, 15])
_checkInt(value, minvalue=0, maxvalue=1, description='input value')
self._genericCommand(functioncode, registeraddress, value)
|
[
"def",
"write_bit",
"(",
"self",
",",
"registeraddress",
",",
"value",
",",
"functioncode",
"=",
"5",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"5",
",",
"15",
"]",
")",
"_checkInt",
"(",
"value",
",",
"minvalue",
"=",
"0",
",",
"maxvalue",
"=",
"1",
",",
"description",
"=",
"'input value'",
")",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
",",
"value",
")"
] |
Write one bit to the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int): 0 or 1
* functioncode (int): Modbus function code. Can be 5 or 15.
Returns:
None
Raises:
ValueError, TypeError, IOError
|
[
"Write",
"one",
"bit",
"to",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L196-L213
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.read_register
|
def read_register(self, registeraddress, numberOfDecimals=0, functioncode=3, signed=False):
"""Read an integer from one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
If a value of 77.0 is stored internally in the slave register as 770, then use ``numberOfDecimals=1``
which will divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
Some manufacturers allow negative values for some registers. Instead of
an allowed integer range 0 to 65535, a range -32768 to 32767 is allowed. This is
implemented as any received value in the upper range (32768 to 65535) is
interpreted as negative value (in the range -32768 to -1).
Use the parameter ``signed=True`` if reading from a register that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
============== ================== ================ ===============
``signed`` Data type in slave Alternative name Range
============== ================== ================ ===============
:const:`False` Unsigned INT16 Unsigned short 0 to 65535
:const:`True` INT16 Short -32768 to 32767
============== ================== ================ ===============
Returns:
The register data in numerical value (int or float).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')
_checkBool(signed, description='signed')
return self._genericCommand(functioncode, registeraddress, numberOfDecimals=numberOfDecimals, signed=signed)
|
python
|
def read_register(self, registeraddress, numberOfDecimals=0, functioncode=3, signed=False):
"""Read an integer from one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
If a value of 77.0 is stored internally in the slave register as 770, then use ``numberOfDecimals=1``
which will divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
Some manufacturers allow negative values for some registers. Instead of
an allowed integer range 0 to 65535, a range -32768 to 32767 is allowed. This is
implemented as any received value in the upper range (32768 to 65535) is
interpreted as negative value (in the range -32768 to -1).
Use the parameter ``signed=True`` if reading from a register that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
============== ================== ================ ===============
``signed`` Data type in slave Alternative name Range
============== ================== ================ ===============
:const:`False` Unsigned INT16 Unsigned short 0 to 65535
:const:`True` INT16 Short -32768 to 32767
============== ================== ================ ===============
Returns:
The register data in numerical value (int or float).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')
_checkBool(signed, description='signed')
return self._genericCommand(functioncode, registeraddress, numberOfDecimals=numberOfDecimals, signed=signed)
|
[
"def",
"read_register",
"(",
"self",
",",
"registeraddress",
",",
"numberOfDecimals",
"=",
"0",
",",
"functioncode",
"=",
"3",
",",
"signed",
"=",
"False",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"3",
",",
"4",
"]",
")",
"_checkInt",
"(",
"numberOfDecimals",
",",
"minvalue",
"=",
"0",
",",
"maxvalue",
"=",
"10",
",",
"description",
"=",
"'number of decimals'",
")",
"_checkBool",
"(",
"signed",
",",
"description",
"=",
"'signed'",
")",
"return",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
",",
"numberOfDecimals",
"=",
"numberOfDecimals",
",",
"signed",
"=",
"signed",
")"
] |
Read an integer from one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 3 or 4.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
If a value of 77.0 is stored internally in the slave register as 770, then use ``numberOfDecimals=1``
which will divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
Some manufacturers allow negative values for some registers. Instead of
an allowed integer range 0 to 65535, a range -32768 to 32767 is allowed. This is
implemented as any received value in the upper range (32768 to 65535) is
interpreted as negative value (in the range -32768 to -1).
Use the parameter ``signed=True`` if reading from a register that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
============== ================== ================ ===============
``signed`` Data type in slave Alternative name Range
============== ================== ================ ===============
:const:`False` Unsigned INT16 Unsigned short 0 to 65535
:const:`True` INT16 Short -32768 to 32767
============== ================== ================ ===============
Returns:
The register data in numerical value (int or float).
Raises:
ValueError, TypeError, IOError
|
[
"Read",
"an",
"integer",
"from",
"one",
"16",
"-",
"bit",
"register",
"in",
"the",
"slave",
"possibly",
"scaling",
"it",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L216-L258
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.write_register
|
def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):
"""Write an integer to one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int or float): The value to store in the slave register (might be scaled before sending).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 6 or 16.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally.
This will multiply ``value`` by 10 before sending it to the slave register.
Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.
For discussion on negative values, the range and on alternative names, see :meth:`.read_register`.
Use the parameter ``signed=True`` if writing to a register that can hold
negative values. Then negative input will be automatically converted into
upper range data (two's complement).
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [6, 16])
_checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')
_checkBool(signed, description='signed')
_checkNumerical(value, description='input value')
self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)
|
python
|
def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):
"""Write an integer to one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int or float): The value to store in the slave register (might be scaled before sending).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 6 or 16.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally.
This will multiply ``value`` by 10 before sending it to the slave register.
Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.
For discussion on negative values, the range and on alternative names, see :meth:`.read_register`.
Use the parameter ``signed=True`` if writing to a register that can hold
negative values. Then negative input will be automatically converted into
upper range data (two's complement).
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [6, 16])
_checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')
_checkBool(signed, description='signed')
_checkNumerical(value, description='input value')
self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)
|
[
"def",
"write_register",
"(",
"self",
",",
"registeraddress",
",",
"value",
",",
"numberOfDecimals",
"=",
"0",
",",
"functioncode",
"=",
"16",
",",
"signed",
"=",
"False",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"6",
",",
"16",
"]",
")",
"_checkInt",
"(",
"numberOfDecimals",
",",
"minvalue",
"=",
"0",
",",
"maxvalue",
"=",
"10",
",",
"description",
"=",
"'number of decimals'",
")",
"_checkBool",
"(",
"signed",
",",
"description",
"=",
"'signed'",
")",
"_checkNumerical",
"(",
"value",
",",
"description",
"=",
"'input value'",
")",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
",",
"value",
",",
"numberOfDecimals",
",",
"signed",
"=",
"signed",
")"
] |
Write an integer to one 16-bit register in the slave, possibly scaling it.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* value (int or float): The value to store in the slave register (might be scaled before sending).
* numberOfDecimals (int): The number of decimals for content conversion.
* functioncode (int): Modbus function code. Can be 6 or 16.
* signed (bool): Whether the data should be interpreted as unsigned or signed.
To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally.
This will multiply ``value`` by 10 before sending it to the slave register.
Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.
For discussion on negative values, the range and on alternative names, see :meth:`.read_register`.
Use the parameter ``signed=True`` if writing to a register that can hold
negative values. Then negative input will be automatically converted into
upper range data (two's complement).
Returns:
None
Raises:
ValueError, TypeError, IOError
|
[
"Write",
"an",
"integer",
"to",
"one",
"16",
"-",
"bit",
"register",
"in",
"the",
"slave",
"possibly",
"scaling",
"it",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L261-L296
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.read_float
|
def read_float(self, registeraddress, functioncode=3, numberOfRegisters=2):
"""Read a floating point number from the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave. The
encoding is according to the standard IEEE 754.
There are differences in the byte order used by different manufacturers. A floating
point value of 1.0 is encoded (in single precision) as 3f800000 (hex). In this
implementation the data will be sent as ``'\\x3f\\x80'`` and ``'\\x00\\x00'``
to two consecutetive registers . Make sure to test that it makes sense for your instrument.
It is pretty straight-forward to change this code if some other byte order is
required by anyone (see support section).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
====================================== ================= =========== =================
Type of floating point number in slave Size Registers Range
====================================== ================= =========== =================
Single precision (binary32) 32 bits (4 bytes) 2 registers 1.4E-45 to 3.4E38
Double precision (binary64) 64 bits (8 bytes) 4 registers 5E-324 to 1.8E308
====================================== ================= =========== =================
Returns:
The numerical value (float).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')
return self._genericCommand(functioncode, registeraddress, numberOfRegisters=numberOfRegisters, payloadformat='float')
|
python
|
def read_float(self, registeraddress, functioncode=3, numberOfRegisters=2):
"""Read a floating point number from the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave. The
encoding is according to the standard IEEE 754.
There are differences in the byte order used by different manufacturers. A floating
point value of 1.0 is encoded (in single precision) as 3f800000 (hex). In this
implementation the data will be sent as ``'\\x3f\\x80'`` and ``'\\x00\\x00'``
to two consecutetive registers . Make sure to test that it makes sense for your instrument.
It is pretty straight-forward to change this code if some other byte order is
required by anyone (see support section).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
====================================== ================= =========== =================
Type of floating point number in slave Size Registers Range
====================================== ================= =========== =================
Single precision (binary32) 32 bits (4 bytes) 2 registers 1.4E-45 to 3.4E38
Double precision (binary64) 64 bits (8 bytes) 4 registers 5E-324 to 1.8E308
====================================== ================= =========== =================
Returns:
The numerical value (float).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')
return self._genericCommand(functioncode, registeraddress, numberOfRegisters=numberOfRegisters, payloadformat='float')
|
[
"def",
"read_float",
"(",
"self",
",",
"registeraddress",
",",
"functioncode",
"=",
"3",
",",
"numberOfRegisters",
"=",
"2",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"3",
",",
"4",
"]",
")",
"_checkInt",
"(",
"numberOfRegisters",
",",
"minvalue",
"=",
"2",
",",
"maxvalue",
"=",
"4",
",",
"description",
"=",
"'number of registers'",
")",
"return",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
",",
"numberOfRegisters",
"=",
"numberOfRegisters",
",",
"payloadformat",
"=",
"'float'",
")"
] |
Read a floating point number from the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave. The
encoding is according to the standard IEEE 754.
There are differences in the byte order used by different manufacturers. A floating
point value of 1.0 is encoded (in single precision) as 3f800000 (hex). In this
implementation the data will be sent as ``'\\x3f\\x80'`` and ``'\\x00\\x00'``
to two consecutetive registers . Make sure to test that it makes sense for your instrument.
It is pretty straight-forward to change this code if some other byte order is
required by anyone (see support section).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 3 or 4.
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
====================================== ================= =========== =================
Type of floating point number in slave Size Registers Range
====================================== ================= =========== =================
Single precision (binary32) 32 bits (4 bytes) 2 registers 1.4E-45 to 3.4E38
Double precision (binary64) 64 bits (8 bytes) 4 registers 5E-324 to 1.8E308
====================================== ================= =========== =================
Returns:
The numerical value (float).
Raises:
ValueError, TypeError, IOError
|
[
"Read",
"a",
"floating",
"point",
"number",
"from",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L358-L392
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.write_float
|
def write_float(self, registeraddress, value, numberOfRegisters=2):
"""Write a floating point number to the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave.
Uses Modbus function code 16.
For discussion on precision, number of registers and on byte order, see :meth:`.read_float`.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* value (float or int): The value to store in the slave
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkNumerical(value, description='input value')
_checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')
self._genericCommand(16, registeraddress, value, \
numberOfRegisters=numberOfRegisters, payloadformat='float')
|
python
|
def write_float(self, registeraddress, value, numberOfRegisters=2):
"""Write a floating point number to the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave.
Uses Modbus function code 16.
For discussion on precision, number of registers and on byte order, see :meth:`.read_float`.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* value (float or int): The value to store in the slave
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkNumerical(value, description='input value')
_checkInt(numberOfRegisters, minvalue=2, maxvalue=4, description='number of registers')
self._genericCommand(16, registeraddress, value, \
numberOfRegisters=numberOfRegisters, payloadformat='float')
|
[
"def",
"write_float",
"(",
"self",
",",
"registeraddress",
",",
"value",
",",
"numberOfRegisters",
"=",
"2",
")",
":",
"_checkNumerical",
"(",
"value",
",",
"description",
"=",
"'input value'",
")",
"_checkInt",
"(",
"numberOfRegisters",
",",
"minvalue",
"=",
"2",
",",
"maxvalue",
"=",
"4",
",",
"description",
"=",
"'number of registers'",
")",
"self",
".",
"_genericCommand",
"(",
"16",
",",
"registeraddress",
",",
"value",
",",
"numberOfRegisters",
"=",
"numberOfRegisters",
",",
"payloadformat",
"=",
"'float'",
")"
] |
Write a floating point number to the slave.
Floats are stored in two or more consecutive 16-bit registers in the slave.
Uses Modbus function code 16.
For discussion on precision, number of registers and on byte order, see :meth:`.read_float`.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* value (float or int): The value to store in the slave
* numberOfRegisters (int): The number of registers allocated for the float. Can be 2 or 4.
Returns:
None
Raises:
ValueError, TypeError, IOError
|
[
"Write",
"a",
"floating",
"point",
"number",
"to",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L395-L419
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.read_string
|
def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3):
"""Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for read string')
return self._genericCommand(functioncode, registeraddress, \
numberOfRegisters=numberOfRegisters, payloadformat='string')
|
python
|
def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3):
"""Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
"""
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for read string')
return self._genericCommand(functioncode, registeraddress, \
numberOfRegisters=numberOfRegisters, payloadformat='string')
|
[
"def",
"read_string",
"(",
"self",
",",
"registeraddress",
",",
"numberOfRegisters",
"=",
"16",
",",
"functioncode",
"=",
"3",
")",
":",
"_checkFunctioncode",
"(",
"functioncode",
",",
"[",
"3",
",",
"4",
"]",
")",
"_checkInt",
"(",
"numberOfRegisters",
",",
"minvalue",
"=",
"1",
",",
"description",
"=",
"'number of registers for read string'",
")",
"return",
"self",
".",
"_genericCommand",
"(",
"functioncode",
",",
"registeraddress",
",",
"numberOfRegisters",
"=",
"numberOfRegisters",
",",
"payloadformat",
"=",
"'string'",
")"
] |
Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
|
[
"Read",
"a",
"string",
"from",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L422-L443
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.write_string
|
def write_string(self, registeraddress, textstring, numberOfRegisters=16):
"""Write a string to the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Uses Modbus function code 16.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* textstring (str): The string to store in the slave
* numberOfRegisters (int): The number of registers allocated for the string.
If the textstring is longer than the 2*numberOfRegisters, an error is raised.
Shorter strings are padded with spaces.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for write string')
_checkString(textstring, 'input string', minlength=1, maxlength=2 * numberOfRegisters)
self._genericCommand(16, registeraddress, textstring, \
numberOfRegisters=numberOfRegisters, payloadformat='string')
|
python
|
def write_string(self, registeraddress, textstring, numberOfRegisters=16):
"""Write a string to the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Uses Modbus function code 16.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* textstring (str): The string to store in the slave
* numberOfRegisters (int): The number of registers allocated for the string.
If the textstring is longer than the 2*numberOfRegisters, an error is raised.
Shorter strings are padded with spaces.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for write string')
_checkString(textstring, 'input string', minlength=1, maxlength=2 * numberOfRegisters)
self._genericCommand(16, registeraddress, textstring, \
numberOfRegisters=numberOfRegisters, payloadformat='string')
|
[
"def",
"write_string",
"(",
"self",
",",
"registeraddress",
",",
"textstring",
",",
"numberOfRegisters",
"=",
"16",
")",
":",
"_checkInt",
"(",
"numberOfRegisters",
",",
"minvalue",
"=",
"1",
",",
"description",
"=",
"'number of registers for write string'",
")",
"_checkString",
"(",
"textstring",
",",
"'input string'",
",",
"minlength",
"=",
"1",
",",
"maxlength",
"=",
"2",
"*",
"numberOfRegisters",
")",
"self",
".",
"_genericCommand",
"(",
"16",
",",
"registeraddress",
",",
"textstring",
",",
"numberOfRegisters",
"=",
"numberOfRegisters",
",",
"payloadformat",
"=",
"'string'",
")"
] |
Write a string to the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Uses Modbus function code 16.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* textstring (str): The string to store in the slave
* numberOfRegisters (int): The number of registers allocated for the string.
If the textstring is longer than the 2*numberOfRegisters, an error is raised.
Shorter strings are padded with spaces.
Returns:
None
Raises:
ValueError, TypeError, IOError
|
[
"Write",
"a",
"string",
"to",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L446-L472
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument.write_registers
|
def write_registers(self, registeraddress, values):
"""Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
if not isinstance(values, list):
raise TypeError('The "values parameter" must be a list. Given: {0!r}'.format(values))
_checkInt(len(values), minvalue=1, description='length of input list')
# Note: The content of the list is checked at content conversion.
self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')
|
python
|
def write_registers(self, registeraddress, values):
"""Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
if not isinstance(values, list):
raise TypeError('The "values parameter" must be a list. Given: {0!r}'.format(values))
_checkInt(len(values), minvalue=1, description='length of input list')
# Note: The content of the list is checked at content conversion.
self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers')
|
[
"def",
"write_registers",
"(",
"self",
",",
"registeraddress",
",",
"values",
")",
":",
"if",
"not",
"isinstance",
"(",
"values",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"'The \"values parameter\" must be a list. Given: {0!r}'",
".",
"format",
"(",
"values",
")",
")",
"_checkInt",
"(",
"len",
"(",
"values",
")",
",",
"minvalue",
"=",
"1",
",",
"description",
"=",
"'length of input list'",
")",
"# Note: The content of the list is checked at content conversion.",
"self",
".",
"_genericCommand",
"(",
"16",
",",
"registeraddress",
",",
"values",
",",
"numberOfRegisters",
"=",
"len",
"(",
"values",
")",
",",
"payloadformat",
"=",
"'registers'",
")"
] |
Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError
|
[
"Write",
"integers",
"to",
"16",
"-",
"bit",
"registers",
"in",
"the",
"slave",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L501-L529
|
train
|
pyhys/minimalmodbus
|
minimalmodbus.py
|
Instrument._communicate
|
def _communicate(self, request, number_of_bytes_to_read):
"""Talk to the slave via a serial port.
Args:
request (str): The raw request that is to be sent to the slave.
number_of_bytes_to_read (int): number of bytes to read
Returns:
The raw data (string) returned from the slave.
Raises:
TypeError, ValueError, IOError
Note that the answer might have strange ASCII control signs, which
makes it difficult to print it in the promt (messes up a bit).
Use repr() to make the string printable (shows ASCII values for control signs.)
Will block until reaching *number_of_bytes_to_read* or timeout.
If the attribute :attr:`Instrument.debug` is :const:`True`, the communication details are printed.
If the attribute :attr:`Instrument.close_port_after_each_call` is :const:`True` the
serial port is closed after each call.
Timing::
Request from master (Master is writing)
|
| Response from slave (Master is reading)
| |
----W----R----------------------------W-------R----------------------------------------
| | |
|<----- Silent period ------>| |
| |
Roundtrip time ---->|-------|<--
The resolution for Python's time.time() is lower on Windows than on Linux.
It is about 16 ms on Windows according to
http://stackoverflow.com/questions/157359/accurate-timestamping-in-python
For Python3, the information sent to and from pySerial should be of the type bytes.
This is taken care of automatically by MinimalModbus.
"""
_checkString(request, minlength=1, description='request')
_checkInt(number_of_bytes_to_read)
if self.debug:
_print_out('\nMinimalModbus debug mode. Writing to instrument (expecting {} bytes back): {!r} ({})'. \
format(number_of_bytes_to_read, request, _hexlify(request)))
if self.close_port_after_each_call:
self.serial.open()
#self.serial.flushInput() TODO
if sys.version_info[0] > 2:
request = bytes(request, encoding='latin1') # Convert types to make it Python3 compatible
# Sleep to make sure 3.5 character times have passed
minimum_silent_period = _calculate_minimum_silent_period(self.serial.baudrate)
time_since_read = time.time() - _LATEST_READ_TIMES.get(self.serial.port, 0)
if time_since_read < minimum_silent_period:
sleep_time = minimum_silent_period - time_since_read
if self.debug:
template = 'MinimalModbus debug mode. Sleeping for {:.1f} ms. ' + \
'Minimum silent period: {:.1f} ms, time since read: {:.1f} ms.'
text = template.format(
sleep_time * _SECONDS_TO_MILLISECONDS,
minimum_silent_period * _SECONDS_TO_MILLISECONDS,
time_since_read * _SECONDS_TO_MILLISECONDS)
_print_out(text)
time.sleep(sleep_time)
elif self.debug:
template = 'MinimalModbus debug mode. No sleep required before write. ' + \
'Time since previous read: {:.1f} ms, minimum silent period: {:.2f} ms.'
text = template.format(
time_since_read * _SECONDS_TO_MILLISECONDS,
minimum_silent_period * _SECONDS_TO_MILLISECONDS)
_print_out(text)
# Write request
latest_write_time = time.time()
self.serial.write(request)
# Read and discard local echo
if self.handle_local_echo:
localEchoToDiscard = self.serial.read(len(request))
if self.debug:
template = 'MinimalModbus debug mode. Discarding this local echo: {!r} ({} bytes).'
text = template.format(localEchoToDiscard, len(localEchoToDiscard))
_print_out(text)
if localEchoToDiscard != request:
template = 'Local echo handling is enabled, but the local echo does not match the sent request. ' + \
'Request: {!r} ({} bytes), local echo: {!r} ({} bytes).'
text = template.format(request, len(request), localEchoToDiscard, len(localEchoToDiscard))
raise IOError(text)
# Read response
answer = self.serial.read(number_of_bytes_to_read)
_LATEST_READ_TIMES[self.serial.port] = time.time()
if self.close_port_after_each_call:
self.serial.close()
if sys.version_info[0] > 2:
answer = str(answer, encoding='latin1') # Convert types to make it Python3 compatible
if self.debug:
template = 'MinimalModbus debug mode. Response from instrument: {!r} ({}) ({} bytes), ' + \
'roundtrip time: {:.1f} ms. Timeout setting: {:.1f} ms.\n'
text = template.format(
answer,
_hexlify(answer),
len(answer),
(_LATEST_READ_TIMES.get(self.serial.port, 0) - latest_write_time) * _SECONDS_TO_MILLISECONDS,
self.serial.timeout * _SECONDS_TO_MILLISECONDS)
_print_out(text)
if len(answer) == 0:
raise IOError('No communication with the instrument (no answer)')
return answer
|
python
|
def _communicate(self, request, number_of_bytes_to_read):
"""Talk to the slave via a serial port.
Args:
request (str): The raw request that is to be sent to the slave.
number_of_bytes_to_read (int): number of bytes to read
Returns:
The raw data (string) returned from the slave.
Raises:
TypeError, ValueError, IOError
Note that the answer might have strange ASCII control signs, which
makes it difficult to print it in the promt (messes up a bit).
Use repr() to make the string printable (shows ASCII values for control signs.)
Will block until reaching *number_of_bytes_to_read* or timeout.
If the attribute :attr:`Instrument.debug` is :const:`True`, the communication details are printed.
If the attribute :attr:`Instrument.close_port_after_each_call` is :const:`True` the
serial port is closed after each call.
Timing::
Request from master (Master is writing)
|
| Response from slave (Master is reading)
| |
----W----R----------------------------W-------R----------------------------------------
| | |
|<----- Silent period ------>| |
| |
Roundtrip time ---->|-------|<--
The resolution for Python's time.time() is lower on Windows than on Linux.
It is about 16 ms on Windows according to
http://stackoverflow.com/questions/157359/accurate-timestamping-in-python
For Python3, the information sent to and from pySerial should be of the type bytes.
This is taken care of automatically by MinimalModbus.
"""
_checkString(request, minlength=1, description='request')
_checkInt(number_of_bytes_to_read)
if self.debug:
_print_out('\nMinimalModbus debug mode. Writing to instrument (expecting {} bytes back): {!r} ({})'. \
format(number_of_bytes_to_read, request, _hexlify(request)))
if self.close_port_after_each_call:
self.serial.open()
#self.serial.flushInput() TODO
if sys.version_info[0] > 2:
request = bytes(request, encoding='latin1') # Convert types to make it Python3 compatible
# Sleep to make sure 3.5 character times have passed
minimum_silent_period = _calculate_minimum_silent_period(self.serial.baudrate)
time_since_read = time.time() - _LATEST_READ_TIMES.get(self.serial.port, 0)
if time_since_read < minimum_silent_period:
sleep_time = minimum_silent_period - time_since_read
if self.debug:
template = 'MinimalModbus debug mode. Sleeping for {:.1f} ms. ' + \
'Minimum silent period: {:.1f} ms, time since read: {:.1f} ms.'
text = template.format(
sleep_time * _SECONDS_TO_MILLISECONDS,
minimum_silent_period * _SECONDS_TO_MILLISECONDS,
time_since_read * _SECONDS_TO_MILLISECONDS)
_print_out(text)
time.sleep(sleep_time)
elif self.debug:
template = 'MinimalModbus debug mode. No sleep required before write. ' + \
'Time since previous read: {:.1f} ms, minimum silent period: {:.2f} ms.'
text = template.format(
time_since_read * _SECONDS_TO_MILLISECONDS,
minimum_silent_period * _SECONDS_TO_MILLISECONDS)
_print_out(text)
# Write request
latest_write_time = time.time()
self.serial.write(request)
# Read and discard local echo
if self.handle_local_echo:
localEchoToDiscard = self.serial.read(len(request))
if self.debug:
template = 'MinimalModbus debug mode. Discarding this local echo: {!r} ({} bytes).'
text = template.format(localEchoToDiscard, len(localEchoToDiscard))
_print_out(text)
if localEchoToDiscard != request:
template = 'Local echo handling is enabled, but the local echo does not match the sent request. ' + \
'Request: {!r} ({} bytes), local echo: {!r} ({} bytes).'
text = template.format(request, len(request), localEchoToDiscard, len(localEchoToDiscard))
raise IOError(text)
# Read response
answer = self.serial.read(number_of_bytes_to_read)
_LATEST_READ_TIMES[self.serial.port] = time.time()
if self.close_port_after_each_call:
self.serial.close()
if sys.version_info[0] > 2:
answer = str(answer, encoding='latin1') # Convert types to make it Python3 compatible
if self.debug:
template = 'MinimalModbus debug mode. Response from instrument: {!r} ({}) ({} bytes), ' + \
'roundtrip time: {:.1f} ms. Timeout setting: {:.1f} ms.\n'
text = template.format(
answer,
_hexlify(answer),
len(answer),
(_LATEST_READ_TIMES.get(self.serial.port, 0) - latest_write_time) * _SECONDS_TO_MILLISECONDS,
self.serial.timeout * _SECONDS_TO_MILLISECONDS)
_print_out(text)
if len(answer) == 0:
raise IOError('No communication with the instrument (no answer)')
return answer
|
[
"def",
"_communicate",
"(",
"self",
",",
"request",
",",
"number_of_bytes_to_read",
")",
":",
"_checkString",
"(",
"request",
",",
"minlength",
"=",
"1",
",",
"description",
"=",
"'request'",
")",
"_checkInt",
"(",
"number_of_bytes_to_read",
")",
"if",
"self",
".",
"debug",
":",
"_print_out",
"(",
"'\\nMinimalModbus debug mode. Writing to instrument (expecting {} bytes back): {!r} ({})'",
".",
"format",
"(",
"number_of_bytes_to_read",
",",
"request",
",",
"_hexlify",
"(",
"request",
")",
")",
")",
"if",
"self",
".",
"close_port_after_each_call",
":",
"self",
".",
"serial",
".",
"open",
"(",
")",
"#self.serial.flushInput() TODO",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">",
"2",
":",
"request",
"=",
"bytes",
"(",
"request",
",",
"encoding",
"=",
"'latin1'",
")",
"# Convert types to make it Python3 compatible",
"# Sleep to make sure 3.5 character times have passed",
"minimum_silent_period",
"=",
"_calculate_minimum_silent_period",
"(",
"self",
".",
"serial",
".",
"baudrate",
")",
"time_since_read",
"=",
"time",
".",
"time",
"(",
")",
"-",
"_LATEST_READ_TIMES",
".",
"get",
"(",
"self",
".",
"serial",
".",
"port",
",",
"0",
")",
"if",
"time_since_read",
"<",
"minimum_silent_period",
":",
"sleep_time",
"=",
"minimum_silent_period",
"-",
"time_since_read",
"if",
"self",
".",
"debug",
":",
"template",
"=",
"'MinimalModbus debug mode. Sleeping for {:.1f} ms. '",
"+",
"'Minimum silent period: {:.1f} ms, time since read: {:.1f} ms.'",
"text",
"=",
"template",
".",
"format",
"(",
"sleep_time",
"*",
"_SECONDS_TO_MILLISECONDS",
",",
"minimum_silent_period",
"*",
"_SECONDS_TO_MILLISECONDS",
",",
"time_since_read",
"*",
"_SECONDS_TO_MILLISECONDS",
")",
"_print_out",
"(",
"text",
")",
"time",
".",
"sleep",
"(",
"sleep_time",
")",
"elif",
"self",
".",
"debug",
":",
"template",
"=",
"'MinimalModbus debug mode. No sleep required before write. '",
"+",
"'Time since previous read: {:.1f} ms, minimum silent period: {:.2f} ms.'",
"text",
"=",
"template",
".",
"format",
"(",
"time_since_read",
"*",
"_SECONDS_TO_MILLISECONDS",
",",
"minimum_silent_period",
"*",
"_SECONDS_TO_MILLISECONDS",
")",
"_print_out",
"(",
"text",
")",
"# Write request",
"latest_write_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"serial",
".",
"write",
"(",
"request",
")",
"# Read and discard local echo",
"if",
"self",
".",
"handle_local_echo",
":",
"localEchoToDiscard",
"=",
"self",
".",
"serial",
".",
"read",
"(",
"len",
"(",
"request",
")",
")",
"if",
"self",
".",
"debug",
":",
"template",
"=",
"'MinimalModbus debug mode. Discarding this local echo: {!r} ({} bytes).'",
"text",
"=",
"template",
".",
"format",
"(",
"localEchoToDiscard",
",",
"len",
"(",
"localEchoToDiscard",
")",
")",
"_print_out",
"(",
"text",
")",
"if",
"localEchoToDiscard",
"!=",
"request",
":",
"template",
"=",
"'Local echo handling is enabled, but the local echo does not match the sent request. '",
"+",
"'Request: {!r} ({} bytes), local echo: {!r} ({} bytes).'",
"text",
"=",
"template",
".",
"format",
"(",
"request",
",",
"len",
"(",
"request",
")",
",",
"localEchoToDiscard",
",",
"len",
"(",
"localEchoToDiscard",
")",
")",
"raise",
"IOError",
"(",
"text",
")",
"# Read response",
"answer",
"=",
"self",
".",
"serial",
".",
"read",
"(",
"number_of_bytes_to_read",
")",
"_LATEST_READ_TIMES",
"[",
"self",
".",
"serial",
".",
"port",
"]",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"close_port_after_each_call",
":",
"self",
".",
"serial",
".",
"close",
"(",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">",
"2",
":",
"answer",
"=",
"str",
"(",
"answer",
",",
"encoding",
"=",
"'latin1'",
")",
"# Convert types to make it Python3 compatible",
"if",
"self",
".",
"debug",
":",
"template",
"=",
"'MinimalModbus debug mode. Response from instrument: {!r} ({}) ({} bytes), '",
"+",
"'roundtrip time: {:.1f} ms. Timeout setting: {:.1f} ms.\\n'",
"text",
"=",
"template",
".",
"format",
"(",
"answer",
",",
"_hexlify",
"(",
"answer",
")",
",",
"len",
"(",
"answer",
")",
",",
"(",
"_LATEST_READ_TIMES",
".",
"get",
"(",
"self",
".",
"serial",
".",
"port",
",",
"0",
")",
"-",
"latest_write_time",
")",
"*",
"_SECONDS_TO_MILLISECONDS",
",",
"self",
".",
"serial",
".",
"timeout",
"*",
"_SECONDS_TO_MILLISECONDS",
")",
"_print_out",
"(",
"text",
")",
"if",
"len",
"(",
"answer",
")",
"==",
"0",
":",
"raise",
"IOError",
"(",
"'No communication with the instrument (no answer)'",
")",
"return",
"answer"
] |
Talk to the slave via a serial port.
Args:
request (str): The raw request that is to be sent to the slave.
number_of_bytes_to_read (int): number of bytes to read
Returns:
The raw data (string) returned from the slave.
Raises:
TypeError, ValueError, IOError
Note that the answer might have strange ASCII control signs, which
makes it difficult to print it in the promt (messes up a bit).
Use repr() to make the string printable (shows ASCII values for control signs.)
Will block until reaching *number_of_bytes_to_read* or timeout.
If the attribute :attr:`Instrument.debug` is :const:`True`, the communication details are printed.
If the attribute :attr:`Instrument.close_port_after_each_call` is :const:`True` the
serial port is closed after each call.
Timing::
Request from master (Master is writing)
|
| Response from slave (Master is reading)
| |
----W----R----------------------------W-------R----------------------------------------
| | |
|<----- Silent period ------>| |
| |
Roundtrip time ---->|-------|<--
The resolution for Python's time.time() is lower on Windows than on Linux.
It is about 16 ms on Windows according to
http://stackoverflow.com/questions/157359/accurate-timestamping-in-python
For Python3, the information sent to and from pySerial should be of the type bytes.
This is taken care of automatically by MinimalModbus.
|
[
"Talk",
"to",
"the",
"slave",
"via",
"a",
"serial",
"port",
"."
] |
e99f4d74c83258c6039073082955ac9bed3f2155
|
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L802-L932
|
train
|
TaylorSMarks/playsound
|
playsound.py
|
_playsoundWin
|
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
alias = 'playsound_' + str(random())
winCommand('open "' + sound + '" alias', alias)
winCommand('set', alias, 'time format milliseconds')
durationInMS = winCommand('status', alias, 'length')
winCommand('play', alias, 'from 0 to', durationInMS.decode())
if block:
sleep(float(durationInMS) / 1000.0)
|
python
|
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
alias = 'playsound_' + str(random())
winCommand('open "' + sound + '" alias', alias)
winCommand('set', alias, 'time format milliseconds')
durationInMS = winCommand('status', alias, 'length')
winCommand('play', alias, 'from 0 to', durationInMS.decode())
if block:
sleep(float(durationInMS) / 1000.0)
|
[
"def",
"_playsoundWin",
"(",
"sound",
",",
"block",
"=",
"True",
")",
":",
"from",
"ctypes",
"import",
"c_buffer",
",",
"windll",
"from",
"random",
"import",
"random",
"from",
"time",
"import",
"sleep",
"from",
"sys",
"import",
"getfilesystemencoding",
"def",
"winCommand",
"(",
"*",
"command",
")",
":",
"buf",
"=",
"c_buffer",
"(",
"255",
")",
"command",
"=",
"' '",
".",
"join",
"(",
"command",
")",
".",
"encode",
"(",
"getfilesystemencoding",
"(",
")",
")",
"errorCode",
"=",
"int",
"(",
"windll",
".",
"winmm",
".",
"mciSendStringA",
"(",
"command",
",",
"buf",
",",
"254",
",",
"0",
")",
")",
"if",
"errorCode",
":",
"errorBuffer",
"=",
"c_buffer",
"(",
"255",
")",
"windll",
".",
"winmm",
".",
"mciGetErrorStringA",
"(",
"errorCode",
",",
"errorBuffer",
",",
"254",
")",
"exceptionMessage",
"=",
"(",
"'\\n Error '",
"+",
"str",
"(",
"errorCode",
")",
"+",
"' for command:'",
"'\\n '",
"+",
"command",
".",
"decode",
"(",
")",
"+",
"'\\n '",
"+",
"errorBuffer",
".",
"value",
".",
"decode",
"(",
")",
")",
"raise",
"PlaysoundException",
"(",
"exceptionMessage",
")",
"return",
"buf",
".",
"value",
"alias",
"=",
"'playsound_'",
"+",
"str",
"(",
"random",
"(",
")",
")",
"winCommand",
"(",
"'open \"'",
"+",
"sound",
"+",
"'\" alias'",
",",
"alias",
")",
"winCommand",
"(",
"'set'",
",",
"alias",
",",
"'time format milliseconds'",
")",
"durationInMS",
"=",
"winCommand",
"(",
"'status'",
",",
"alias",
",",
"'length'",
")",
"winCommand",
"(",
"'play'",
",",
"alias",
",",
"'from 0 to'",
",",
"durationInMS",
".",
"decode",
"(",
")",
")",
"if",
"block",
":",
"sleep",
"(",
"float",
"(",
"durationInMS",
")",
"/",
"1000.0",
")"
] |
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
|
[
"Utilizes",
"windll",
".",
"winmm",
".",
"Tested",
"and",
"known",
"to",
"work",
"with",
"MP3",
"and",
"WAVE",
"on",
"Windows",
"7",
"with",
"Python",
"2",
".",
"7",
".",
"Probably",
"works",
"with",
"more",
"file",
"formats",
".",
"Probably",
"works",
"on",
"Windows",
"XP",
"thru",
"Windows",
"10",
".",
"Probably",
"works",
"with",
"all",
"versions",
"of",
"Python",
"."
] |
907f1fe73375a2156f7e0900c4b42c0a60fa1d00
|
https://github.com/TaylorSMarks/playsound/blob/907f1fe73375a2156f7e0900c4b42c0a60fa1d00/playsound.py#L4-L41
|
train
|
TaylorSMarks/playsound
|
playsound.py
|
_playsoundOSX
|
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError('Unable to load sound named: ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
|
python
|
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError('Unable to load sound named: ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
|
[
"def",
"_playsoundOSX",
"(",
"sound",
",",
"block",
"=",
"True",
")",
":",
"from",
"AppKit",
"import",
"NSSound",
"from",
"Foundation",
"import",
"NSURL",
"from",
"time",
"import",
"sleep",
"if",
"'://'",
"not",
"in",
"sound",
":",
"if",
"not",
"sound",
".",
"startswith",
"(",
"'/'",
")",
":",
"from",
"os",
"import",
"getcwd",
"sound",
"=",
"getcwd",
"(",
")",
"+",
"'/'",
"+",
"sound",
"sound",
"=",
"'file://'",
"+",
"sound",
"url",
"=",
"NSURL",
".",
"URLWithString_",
"(",
"sound",
")",
"nssound",
"=",
"NSSound",
".",
"alloc",
"(",
")",
".",
"initWithContentsOfURL_byReference_",
"(",
"url",
",",
"True",
")",
"if",
"not",
"nssound",
":",
"raise",
"IOError",
"(",
"'Unable to load sound named: '",
"+",
"sound",
")",
"nssound",
".",
"play",
"(",
")",
"if",
"block",
":",
"sleep",
"(",
"nssound",
".",
"duration",
"(",
")",
")"
] |
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
|
[
"Utilizes",
"AppKit",
".",
"NSSound",
".",
"Tested",
"and",
"known",
"to",
"work",
"with",
"MP3",
"and",
"WAVE",
"on",
"OS",
"X",
"10",
".",
"11",
"with",
"Python",
"2",
".",
"7",
".",
"Probably",
"works",
"with",
"anything",
"QuickTime",
"supports",
".",
"Probably",
"works",
"on",
"OS",
"X",
"10",
".",
"5",
"and",
"newer",
".",
"Probably",
"works",
"with",
"all",
"versions",
"of",
"Python",
"."
] |
907f1fe73375a2156f7e0900c4b42c0a60fa1d00
|
https://github.com/TaylorSMarks/playsound/blob/907f1fe73375a2156f7e0900c4b42c0a60fa1d00/playsound.py#L43-L71
|
train
|
TaylorSMarks/playsound
|
playsound.py
|
_playsoundNix
|
def _playsoundNix(sound, block=True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
if not block:
raise NotImplementedError(
"block=False cannot be used on this platform yet")
# pathname2url escapes non-URL-safe characters
import os
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
playbin.props.uri = 'file://' + pathname2url(os.path.abspath(sound))
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
bus = playbin.get_bus()
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
playbin.set_state(Gst.State.NULL)
|
python
|
def _playsoundNix(sound, block=True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
if not block:
raise NotImplementedError(
"block=False cannot be used on this platform yet")
# pathname2url escapes non-URL-safe characters
import os
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
playbin.props.uri = 'file://' + pathname2url(os.path.abspath(sound))
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
bus = playbin.get_bus()
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
playbin.set_state(Gst.State.NULL)
|
[
"def",
"_playsoundNix",
"(",
"sound",
",",
"block",
"=",
"True",
")",
":",
"if",
"not",
"block",
":",
"raise",
"NotImplementedError",
"(",
"\"block=False cannot be used on this platform yet\"",
")",
"# pathname2url escapes non-URL-safe characters",
"import",
"os",
"try",
":",
"from",
"urllib",
".",
"request",
"import",
"pathname2url",
"except",
"ImportError",
":",
"# python 2",
"from",
"urllib",
"import",
"pathname2url",
"import",
"gi",
"gi",
".",
"require_version",
"(",
"'Gst'",
",",
"'1.0'",
")",
"from",
"gi",
".",
"repository",
"import",
"Gst",
"Gst",
".",
"init",
"(",
"None",
")",
"playbin",
"=",
"Gst",
".",
"ElementFactory",
".",
"make",
"(",
"'playbin'",
",",
"'playbin'",
")",
"if",
"sound",
".",
"startswith",
"(",
"(",
"'http://'",
",",
"'https://'",
")",
")",
":",
"playbin",
".",
"props",
".",
"uri",
"=",
"sound",
"else",
":",
"playbin",
".",
"props",
".",
"uri",
"=",
"'file://'",
"+",
"pathname2url",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"sound",
")",
")",
"set_result",
"=",
"playbin",
".",
"set_state",
"(",
"Gst",
".",
"State",
".",
"PLAYING",
")",
"if",
"set_result",
"!=",
"Gst",
".",
"StateChangeReturn",
".",
"ASYNC",
":",
"raise",
"PlaysoundException",
"(",
"\"playbin.set_state returned \"",
"+",
"repr",
"(",
"set_result",
")",
")",
"# FIXME: use some other bus method than poll() with block=False",
"# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html",
"bus",
"=",
"playbin",
".",
"get_bus",
"(",
")",
"bus",
".",
"poll",
"(",
"Gst",
".",
"MessageType",
".",
"EOS",
",",
"Gst",
".",
"CLOCK_TIME_NONE",
")",
"playbin",
".",
"set_state",
"(",
"Gst",
".",
"State",
".",
"NULL",
")"
] |
Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
|
[
"Play",
"a",
"sound",
"using",
"GStreamer",
"."
] |
907f1fe73375a2156f7e0900c4b42c0a60fa1d00
|
https://github.com/TaylorSMarks/playsound/blob/907f1fe73375a2156f7e0900c4b42c0a60fa1d00/playsound.py#L73-L112
|
train
|
mfitzp/padua
|
padua/filters.py
|
remove_rows_matching
|
def remove_rows_matching(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values match `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that match are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = df[column].values != match
return df.iloc[mask, :]
|
python
|
def remove_rows_matching(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values match `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that match are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = df[column].values != match
return df.iloc[mask, :]
|
[
"def",
"remove_rows_matching",
"(",
"df",
",",
"column",
",",
"match",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"mask",
"=",
"df",
"[",
"column",
"]",
".",
"values",
"!=",
"match",
"return",
"df",
".",
"iloc",
"[",
"mask",
",",
":",
"]"
] |
Return a ``DataFrame`` with rows where `column` values match `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that match are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
|
[
"Return",
"a",
"DataFrame",
"with",
"rows",
"where",
"column",
"values",
"match",
"match",
"are",
"removed",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L4-L18
|
train
|
mfitzp/padua
|
padua/filters.py
|
remove_rows_containing
|
def remove_rows_containing(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = [match not in str(v) for v in df[column].values]
return df.iloc[mask, :]
|
python
|
def remove_rows_containing(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = [match not in str(v) for v in df[column].values]
return df.iloc[mask, :]
|
[
"def",
"remove_rows_containing",
"(",
"df",
",",
"column",
",",
"match",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"mask",
"=",
"[",
"match",
"not",
"in",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"df",
"[",
"column",
"]",
".",
"values",
"]",
"return",
"df",
".",
"iloc",
"[",
"mask",
",",
":",
"]"
] |
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
|
[
"Return",
"a",
"DataFrame",
"with",
"rows",
"where",
"column",
"values",
"containing",
"match",
"are",
"removed",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L21-L35
|
train
|
mfitzp/padua
|
padua/filters.py
|
filter_localization_probability
|
def filter_localization_probability(df, threshold=0.75):
"""
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
"""
df = df.copy()
localization_probability_mask = df['Localization prob'].values >= threshold
return df.iloc[localization_probability_mask, :]
|
python
|
def filter_localization_probability(df, threshold=0.75):
"""
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
"""
df = df.copy()
localization_probability_mask = df['Localization prob'].values >= threshold
return df.iloc[localization_probability_mask, :]
|
[
"def",
"filter_localization_probability",
"(",
"df",
",",
"threshold",
"=",
"0.75",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"localization_probability_mask",
"=",
"df",
"[",
"'Localization prob'",
"]",
".",
"values",
">=",
"threshold",
"return",
"df",
".",
"iloc",
"[",
"localization_probability_mask",
",",
":",
"]"
] |
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
|
[
"Remove",
"rows",
"with",
"a",
"localization",
"probability",
"below",
"0",
".",
"75"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L77-L90
|
train
|
mfitzp/padua
|
padua/filters.py
|
minimum_valid_values_in_any_group
|
def minimum_valid_values_in_any_group(df, levels=None, n=1, invalid=np.nan):
"""
Filter ``DataFrame`` by at least n valid values in at least one group.
Taking a Pandas ``DataFrame`` with a ``MultiIndex`` column index, filters rows to remove
rows where there are less than `n` valid values per group. Groups are defined by the `levels` parameter indexing
into the column index. For example, a ``MultiIndex`` with top and second level Group (A,B,C) and Replicate (1,2,3) using
``levels=[0,1]`` would filter on `n` valid values per replicate. Alternatively, ``levels=[0]`` would filter on `n`
valid values at the Group level only, e.g. A, B or C.
By default valid values are determined by `np.nan`. However, alternatives can be supplied via `invalid`.
:param df: Pandas ``DataFrame``
:param levels: ``list`` of ``int`` specifying levels of column ``MultiIndex`` to group by
:param n: ``int`` minimum number of valid values threshold
:param invalid: matching invalid value
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
if levels is None:
if 'Group' in df.columns.names:
levels = [df.columns.names.index('Group')]
# Filter by at least 7 (values in class:timepoint) at least in at least one group
if invalid is np.nan:
dfx = ~np.isnan(df)
else:
dfx = df != invalid
dfc = dfx.astype(int).sum(axis=1, level=levels)
dfm = dfc.max(axis=1) >= n
mask = dfm.values
return df.iloc[mask, :]
|
python
|
def minimum_valid_values_in_any_group(df, levels=None, n=1, invalid=np.nan):
"""
Filter ``DataFrame`` by at least n valid values in at least one group.
Taking a Pandas ``DataFrame`` with a ``MultiIndex`` column index, filters rows to remove
rows where there are less than `n` valid values per group. Groups are defined by the `levels` parameter indexing
into the column index. For example, a ``MultiIndex`` with top and second level Group (A,B,C) and Replicate (1,2,3) using
``levels=[0,1]`` would filter on `n` valid values per replicate. Alternatively, ``levels=[0]`` would filter on `n`
valid values at the Group level only, e.g. A, B or C.
By default valid values are determined by `np.nan`. However, alternatives can be supplied via `invalid`.
:param df: Pandas ``DataFrame``
:param levels: ``list`` of ``int`` specifying levels of column ``MultiIndex`` to group by
:param n: ``int`` minimum number of valid values threshold
:param invalid: matching invalid value
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
if levels is None:
if 'Group' in df.columns.names:
levels = [df.columns.names.index('Group')]
# Filter by at least 7 (values in class:timepoint) at least in at least one group
if invalid is np.nan:
dfx = ~np.isnan(df)
else:
dfx = df != invalid
dfc = dfx.astype(int).sum(axis=1, level=levels)
dfm = dfc.max(axis=1) >= n
mask = dfm.values
return df.iloc[mask, :]
|
[
"def",
"minimum_valid_values_in_any_group",
"(",
"df",
",",
"levels",
"=",
"None",
",",
"n",
"=",
"1",
",",
"invalid",
"=",
"np",
".",
"nan",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"if",
"levels",
"is",
"None",
":",
"if",
"'Group'",
"in",
"df",
".",
"columns",
".",
"names",
":",
"levels",
"=",
"[",
"df",
".",
"columns",
".",
"names",
".",
"index",
"(",
"'Group'",
")",
"]",
"# Filter by at least 7 (values in class:timepoint) at least in at least one group",
"if",
"invalid",
"is",
"np",
".",
"nan",
":",
"dfx",
"=",
"~",
"np",
".",
"isnan",
"(",
"df",
")",
"else",
":",
"dfx",
"=",
"df",
"!=",
"invalid",
"dfc",
"=",
"dfx",
".",
"astype",
"(",
"int",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
",",
"level",
"=",
"levels",
")",
"dfm",
"=",
"dfc",
".",
"max",
"(",
"axis",
"=",
"1",
")",
">=",
"n",
"mask",
"=",
"dfm",
".",
"values",
"return",
"df",
".",
"iloc",
"[",
"mask",
",",
":",
"]"
] |
Filter ``DataFrame`` by at least n valid values in at least one group.
Taking a Pandas ``DataFrame`` with a ``MultiIndex`` column index, filters rows to remove
rows where there are less than `n` valid values per group. Groups are defined by the `levels` parameter indexing
into the column index. For example, a ``MultiIndex`` with top and second level Group (A,B,C) and Replicate (1,2,3) using
``levels=[0,1]`` would filter on `n` valid values per replicate. Alternatively, ``levels=[0]`` would filter on `n`
valid values at the Group level only, e.g. A, B or C.
By default valid values are determined by `np.nan`. However, alternatives can be supplied via `invalid`.
:param df: Pandas ``DataFrame``
:param levels: ``list`` of ``int`` specifying levels of column ``MultiIndex`` to group by
:param n: ``int`` minimum number of valid values threshold
:param invalid: matching invalid value
:return: filtered Pandas ``DataFrame``
|
[
"Filter",
"DataFrame",
"by",
"at",
"least",
"n",
"valid",
"values",
"in",
"at",
"least",
"one",
"group",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L93-L129
|
train
|
mfitzp/padua
|
padua/filters.py
|
search
|
def search(df, match, columns=['Proteins','Protein names','Gene names']):
"""
Search for a given string in a set of columns in a processed ``DataFrame``.
Returns a filtered ``DataFrame`` where `match` is contained in one of the `columns`.
:param df: Pandas ``DataFrame``
:param match: ``str`` to search for in columns
:param columns: ``list`` of ``str`` to search for match
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
dft = df.reset_index()
mask = np.zeros((dft.shape[0],), dtype=bool)
idx = ['Proteins','Protein names','Gene names']
for i in idx:
if i in dft.columns:
mask = mask | np.array([match in str(l) for l in dft[i].values])
return df.iloc[mask]
|
python
|
def search(df, match, columns=['Proteins','Protein names','Gene names']):
"""
Search for a given string in a set of columns in a processed ``DataFrame``.
Returns a filtered ``DataFrame`` where `match` is contained in one of the `columns`.
:param df: Pandas ``DataFrame``
:param match: ``str`` to search for in columns
:param columns: ``list`` of ``str`` to search for match
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
dft = df.reset_index()
mask = np.zeros((dft.shape[0],), dtype=bool)
idx = ['Proteins','Protein names','Gene names']
for i in idx:
if i in dft.columns:
mask = mask | np.array([match in str(l) for l in dft[i].values])
return df.iloc[mask]
|
[
"def",
"search",
"(",
"df",
",",
"match",
",",
"columns",
"=",
"[",
"'Proteins'",
",",
"'Protein names'",
",",
"'Gene names'",
"]",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"dft",
"=",
"df",
".",
"reset_index",
"(",
")",
"mask",
"=",
"np",
".",
"zeros",
"(",
"(",
"dft",
".",
"shape",
"[",
"0",
"]",
",",
")",
",",
"dtype",
"=",
"bool",
")",
"idx",
"=",
"[",
"'Proteins'",
",",
"'Protein names'",
",",
"'Gene names'",
"]",
"for",
"i",
"in",
"idx",
":",
"if",
"i",
"in",
"dft",
".",
"columns",
":",
"mask",
"=",
"mask",
"|",
"np",
".",
"array",
"(",
"[",
"match",
"in",
"str",
"(",
"l",
")",
"for",
"l",
"in",
"dft",
"[",
"i",
"]",
".",
"values",
"]",
")",
"return",
"df",
".",
"iloc",
"[",
"mask",
"]"
] |
Search for a given string in a set of columns in a processed ``DataFrame``.
Returns a filtered ``DataFrame`` where `match` is contained in one of the `columns`.
:param df: Pandas ``DataFrame``
:param match: ``str`` to search for in columns
:param columns: ``list`` of ``str`` to search for match
:return: filtered Pandas ``DataFrame``
|
[
"Search",
"for",
"a",
"given",
"string",
"in",
"a",
"set",
"of",
"columns",
"in",
"a",
"processed",
"DataFrame",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L132-L152
|
train
|
mfitzp/padua
|
padua/filters.py
|
filter_select_columns_intensity
|
def filter_select_columns_intensity(df, prefix, columns):
"""
Filter dataframe to include specified columns, retaining any Intensity columns.
"""
# Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something.
return df.filter(regex='^(%s.+|%s)$' % (prefix, '|'.join(columns)) )
|
python
|
def filter_select_columns_intensity(df, prefix, columns):
"""
Filter dataframe to include specified columns, retaining any Intensity columns.
"""
# Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something.
return df.filter(regex='^(%s.+|%s)$' % (prefix, '|'.join(columns)) )
|
[
"def",
"filter_select_columns_intensity",
"(",
"df",
",",
"prefix",
",",
"columns",
")",
":",
"# Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something.",
"return",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^(%s.+|%s)$'",
"%",
"(",
"prefix",
",",
"'|'",
".",
"join",
"(",
"columns",
")",
")",
")"
] |
Filter dataframe to include specified columns, retaining any Intensity columns.
|
[
"Filter",
"dataframe",
"to",
"include",
"specified",
"columns",
"retaining",
"any",
"Intensity",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L163-L168
|
train
|
mfitzp/padua
|
padua/filters.py
|
filter_intensity
|
def filter_intensity(df, label="", with_multiplicity=False):
"""
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Intensity).*$")
dfi = df.filter(regex='^(.*Intensity.*%s.*__\d)$' % label)
return pd.concat([dft,dfi], axis=1)
|
python
|
def filter_intensity(df, label="", with_multiplicity=False):
"""
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Intensity).*$")
dfi = df.filter(regex='^(.*Intensity.*%s.*__\d)$' % label)
return pd.concat([dft,dfi], axis=1)
|
[
"def",
"filter_intensity",
"(",
"df",
",",
"label",
"=",
"\"\"",
",",
"with_multiplicity",
"=",
"False",
")",
":",
"label",
"+=",
"\".*__\\d\"",
"if",
"with_multiplicity",
"else",
"\"\"",
"dft",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"\"^(?!Intensity).*$\"",
")",
"dfi",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^(.*Intensity.*%s.*__\\d)$'",
"%",
"label",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"dft",
",",
"dfi",
"]",
",",
"axis",
"=",
"1",
")"
] |
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
|
[
"Filter",
"to",
"include",
"only",
"the",
"Intensity",
"values",
"with",
"optional",
"specified",
"label",
"excluding",
"other",
"Intensity",
"measurements",
"but",
"retaining",
"all",
"other",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L177-L187
|
train
|
mfitzp/padua
|
padua/filters.py
|
filter_ratio
|
def filter_ratio(df, label="", with_multiplicity=False):
"""
Filter to include only the Ratio values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Ratio).*$")
dfr = df.filter(regex='^(.*Ratio.*%s)$' % label)
return pd.concat([dft,dfr], axis=1)
|
python
|
def filter_ratio(df, label="", with_multiplicity=False):
"""
Filter to include only the Ratio values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Ratio).*$")
dfr = df.filter(regex='^(.*Ratio.*%s)$' % label)
return pd.concat([dft,dfr], axis=1)
|
[
"def",
"filter_ratio",
"(",
"df",
",",
"label",
"=",
"\"\"",
",",
"with_multiplicity",
"=",
"False",
")",
":",
"label",
"+=",
"\".*__\\d\"",
"if",
"with_multiplicity",
"else",
"\"\"",
"dft",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"\"^(?!Ratio).*$\"",
")",
"dfr",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^(.*Ratio.*%s)$'",
"%",
"label",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"dft",
",",
"dfr",
"]",
",",
"axis",
"=",
"1",
")"
] |
Filter to include only the Ratio values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
|
[
"Filter",
"to",
"include",
"only",
"the",
"Ratio",
"values",
"with",
"optional",
"specified",
"label",
"excluding",
"other",
"Intensity",
"measurements",
"but",
"retaining",
"all",
"other",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/filters.py#L201-L211
|
train
|
mfitzp/padua
|
padua/io.py
|
read_perseus
|
def read_perseus(f):
"""
Load a Perseus processed data table
:param f: Source file
:return: Pandas dataframe of imported data
"""
df = pd.read_csv(f, delimiter='\t', header=[0,1,2,3], low_memory=False)
df.columns = pd.MultiIndex.from_tuples([(x,) for x in df.columns.get_level_values(0)])
return df
|
python
|
def read_perseus(f):
"""
Load a Perseus processed data table
:param f: Source file
:return: Pandas dataframe of imported data
"""
df = pd.read_csv(f, delimiter='\t', header=[0,1,2,3], low_memory=False)
df.columns = pd.MultiIndex.from_tuples([(x,) for x in df.columns.get_level_values(0)])
return df
|
[
"def",
"read_perseus",
"(",
"f",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"header",
"=",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
"]",
",",
"low_memory",
"=",
"False",
")",
"df",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"[",
"(",
"x",
",",
")",
"for",
"x",
"in",
"df",
".",
"columns",
".",
"get_level_values",
"(",
"0",
")",
"]",
")",
"return",
"df"
] |
Load a Perseus processed data table
:param f: Source file
:return: Pandas dataframe of imported data
|
[
"Load",
"a",
"Perseus",
"processed",
"data",
"table"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/io.py#L21-L30
|
train
|
mfitzp/padua
|
padua/io.py
|
write_perseus
|
def write_perseus(f, df):
"""
Export a dataframe to Perseus; recreating the format
:param f:
:param df:
:return:
"""
### Generate the Perseus like type index
FIELD_TYPE_MAP = {
'Amino acid':'C',
'Charge':'C',
'Reverse':'C',
'Potential contaminant':'C',
'Multiplicity':'C',
'Localization prob':'N',
'PEP':'N',
'Score':'N',
'Delta score':'N',
'Score for localization':'N',
'Mass error [ppm]':'N',
'Intensity':'N',
'Position':'N',
'Proteins':'T',
'Positions within proteins':'T',
'Leading proteins':'T',
'Protein names':'T',
'Gene names':'T',
'Sequence window':'T',
'Unique identifier':'T',
}
def map_field_type(n, c):
try:
t = FIELD_TYPE_MAP[c]
except:
t = "E"
# In the first element, add type indicator
if n == 0:
t = "#!{Type}%s" % t
return t
df = df.copy()
df.columns = pd.MultiIndex.from_tuples([(k, map_field_type(n, k)) for n, k in enumerate(df.columns)], names=["Label","Type"])
df = df.transpose().reset_index().transpose()
df.to_csv(f, index=False, header=False)
|
python
|
def write_perseus(f, df):
"""
Export a dataframe to Perseus; recreating the format
:param f:
:param df:
:return:
"""
### Generate the Perseus like type index
FIELD_TYPE_MAP = {
'Amino acid':'C',
'Charge':'C',
'Reverse':'C',
'Potential contaminant':'C',
'Multiplicity':'C',
'Localization prob':'N',
'PEP':'N',
'Score':'N',
'Delta score':'N',
'Score for localization':'N',
'Mass error [ppm]':'N',
'Intensity':'N',
'Position':'N',
'Proteins':'T',
'Positions within proteins':'T',
'Leading proteins':'T',
'Protein names':'T',
'Gene names':'T',
'Sequence window':'T',
'Unique identifier':'T',
}
def map_field_type(n, c):
try:
t = FIELD_TYPE_MAP[c]
except:
t = "E"
# In the first element, add type indicator
if n == 0:
t = "#!{Type}%s" % t
return t
df = df.copy()
df.columns = pd.MultiIndex.from_tuples([(k, map_field_type(n, k)) for n, k in enumerate(df.columns)], names=["Label","Type"])
df = df.transpose().reset_index().transpose()
df.to_csv(f, index=False, header=False)
|
[
"def",
"write_perseus",
"(",
"f",
",",
"df",
")",
":",
"### Generate the Perseus like type index",
"FIELD_TYPE_MAP",
"=",
"{",
"'Amino acid'",
":",
"'C'",
",",
"'Charge'",
":",
"'C'",
",",
"'Reverse'",
":",
"'C'",
",",
"'Potential contaminant'",
":",
"'C'",
",",
"'Multiplicity'",
":",
"'C'",
",",
"'Localization prob'",
":",
"'N'",
",",
"'PEP'",
":",
"'N'",
",",
"'Score'",
":",
"'N'",
",",
"'Delta score'",
":",
"'N'",
",",
"'Score for localization'",
":",
"'N'",
",",
"'Mass error [ppm]'",
":",
"'N'",
",",
"'Intensity'",
":",
"'N'",
",",
"'Position'",
":",
"'N'",
",",
"'Proteins'",
":",
"'T'",
",",
"'Positions within proteins'",
":",
"'T'",
",",
"'Leading proteins'",
":",
"'T'",
",",
"'Protein names'",
":",
"'T'",
",",
"'Gene names'",
":",
"'T'",
",",
"'Sequence window'",
":",
"'T'",
",",
"'Unique identifier'",
":",
"'T'",
",",
"}",
"def",
"map_field_type",
"(",
"n",
",",
"c",
")",
":",
"try",
":",
"t",
"=",
"FIELD_TYPE_MAP",
"[",
"c",
"]",
"except",
":",
"t",
"=",
"\"E\"",
"# In the first element, add type indicator",
"if",
"n",
"==",
"0",
":",
"t",
"=",
"\"#!{Type}%s\"",
"%",
"t",
"return",
"t",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"[",
"(",
"k",
",",
"map_field_type",
"(",
"n",
",",
"k",
")",
")",
"for",
"n",
",",
"k",
"in",
"enumerate",
"(",
"df",
".",
"columns",
")",
"]",
",",
"names",
"=",
"[",
"\"Label\"",
",",
"\"Type\"",
"]",
")",
"df",
"=",
"df",
".",
"transpose",
"(",
")",
".",
"reset_index",
"(",
")",
".",
"transpose",
"(",
")",
"df",
".",
"to_csv",
"(",
"f",
",",
"index",
"=",
"False",
",",
"header",
"=",
"False",
")"
] |
Export a dataframe to Perseus; recreating the format
:param f:
:param df:
:return:
|
[
"Export",
"a",
"dataframe",
"to",
"Perseus",
";",
"recreating",
"the",
"format"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/io.py#L33-L82
|
train
|
mfitzp/padua
|
padua/io.py
|
write_phosphopath_ratio
|
def write_phosphopath_ratio(df, f, a, *args, **kwargs):
"""
Write out the data frame ratio between two groups
protein-Rsite-multiplicity-timepoint
ID Ratio
Q13619-S10-1-1 0.5
Q9H3Z4-S10-1-1 0.502
Q6GQQ9-S100-1-1 0.504
Q86YP4-S100-1-1 0.506
Q9H307-S100-1-1 0.508
Q8NEY1-S1000-1-1 0.51
Q13541-S101-1-1 0.512
O95785-S1012-2-1 0.514
O95785-S1017-2-1 0.516
Q9Y4G8-S1022-1-1 0.518
P35658-S1023-1-1 0.52
Provide a dataframe, filename for output and a control selector. A series of
selectors following this will be compared (ratio mean) to the first. If you
provide a kwargs timepoint_idx the timepoint information from your selection will
be added from the selector index, e.g. timepoint_idx=1 will use the first level
of the selector as timepoint information, so ("Control", 30) would give timepoint 30.
:param df:
:param a:
:param *args
:param **kwargs: use timepoint= to define column index for timepoint information, extracted from args.
:return:
"""
timepoint_idx = kwargs.get('timepoint_idx', None)
proteins = [get_protein_id(k) for k in df.index.get_level_values('Proteins')]
amino_acids = df.index.get_level_values('Amino acid')
positions = _get_positions(df)
multiplicity = [int(k[-1]) for k in df.index.get_level_values('Multiplicity')]
apos = ["%s%s" % x for x in zip(amino_acids, positions)]
phdfs = []
# Convert timepoints to 1-based ordinal.
tp_map = set()
for c in args:
tp_map.add(c[timepoint_idx])
tp_map = sorted(tp_map)
for c in args:
v = df[a].mean(axis=1).values / df[c].mean(axis=1).values
tp = [1 + tp_map.index(c[timepoint_idx])]
tps = tp * len(proteins) if timepoint_idx else [1] * len(proteins)
prar = ["%s-%s-%d-%d" % x for x in zip(proteins, apos, multiplicity, tps)]
phdf = pd.DataFrame(np.array(list(zip(prar, v))))
phdf.columns = ["ID", "Ratio"]
phdfs.append(phdf)
pd.concat(phdfs).to_csv(f, sep='\t', index=None)
|
python
|
def write_phosphopath_ratio(df, f, a, *args, **kwargs):
"""
Write out the data frame ratio between two groups
protein-Rsite-multiplicity-timepoint
ID Ratio
Q13619-S10-1-1 0.5
Q9H3Z4-S10-1-1 0.502
Q6GQQ9-S100-1-1 0.504
Q86YP4-S100-1-1 0.506
Q9H307-S100-1-1 0.508
Q8NEY1-S1000-1-1 0.51
Q13541-S101-1-1 0.512
O95785-S1012-2-1 0.514
O95785-S1017-2-1 0.516
Q9Y4G8-S1022-1-1 0.518
P35658-S1023-1-1 0.52
Provide a dataframe, filename for output and a control selector. A series of
selectors following this will be compared (ratio mean) to the first. If you
provide a kwargs timepoint_idx the timepoint information from your selection will
be added from the selector index, e.g. timepoint_idx=1 will use the first level
of the selector as timepoint information, so ("Control", 30) would give timepoint 30.
:param df:
:param a:
:param *args
:param **kwargs: use timepoint= to define column index for timepoint information, extracted from args.
:return:
"""
timepoint_idx = kwargs.get('timepoint_idx', None)
proteins = [get_protein_id(k) for k in df.index.get_level_values('Proteins')]
amino_acids = df.index.get_level_values('Amino acid')
positions = _get_positions(df)
multiplicity = [int(k[-1]) for k in df.index.get_level_values('Multiplicity')]
apos = ["%s%s" % x for x in zip(amino_acids, positions)]
phdfs = []
# Convert timepoints to 1-based ordinal.
tp_map = set()
for c in args:
tp_map.add(c[timepoint_idx])
tp_map = sorted(tp_map)
for c in args:
v = df[a].mean(axis=1).values / df[c].mean(axis=1).values
tp = [1 + tp_map.index(c[timepoint_idx])]
tps = tp * len(proteins) if timepoint_idx else [1] * len(proteins)
prar = ["%s-%s-%d-%d" % x for x in zip(proteins, apos, multiplicity, tps)]
phdf = pd.DataFrame(np.array(list(zip(prar, v))))
phdf.columns = ["ID", "Ratio"]
phdfs.append(phdf)
pd.concat(phdfs).to_csv(f, sep='\t', index=None)
|
[
"def",
"write_phosphopath_ratio",
"(",
"df",
",",
"f",
",",
"a",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"timepoint_idx",
"=",
"kwargs",
".",
"get",
"(",
"'timepoint_idx'",
",",
"None",
")",
"proteins",
"=",
"[",
"get_protein_id",
"(",
"k",
")",
"for",
"k",
"in",
"df",
".",
"index",
".",
"get_level_values",
"(",
"'Proteins'",
")",
"]",
"amino_acids",
"=",
"df",
".",
"index",
".",
"get_level_values",
"(",
"'Amino acid'",
")",
"positions",
"=",
"_get_positions",
"(",
"df",
")",
"multiplicity",
"=",
"[",
"int",
"(",
"k",
"[",
"-",
"1",
"]",
")",
"for",
"k",
"in",
"df",
".",
"index",
".",
"get_level_values",
"(",
"'Multiplicity'",
")",
"]",
"apos",
"=",
"[",
"\"%s%s\"",
"%",
"x",
"for",
"x",
"in",
"zip",
"(",
"amino_acids",
",",
"positions",
")",
"]",
"phdfs",
"=",
"[",
"]",
"# Convert timepoints to 1-based ordinal.",
"tp_map",
"=",
"set",
"(",
")",
"for",
"c",
"in",
"args",
":",
"tp_map",
".",
"add",
"(",
"c",
"[",
"timepoint_idx",
"]",
")",
"tp_map",
"=",
"sorted",
"(",
"tp_map",
")",
"for",
"c",
"in",
"args",
":",
"v",
"=",
"df",
"[",
"a",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
".",
"values",
"/",
"df",
"[",
"c",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
".",
"values",
"tp",
"=",
"[",
"1",
"+",
"tp_map",
".",
"index",
"(",
"c",
"[",
"timepoint_idx",
"]",
")",
"]",
"tps",
"=",
"tp",
"*",
"len",
"(",
"proteins",
")",
"if",
"timepoint_idx",
"else",
"[",
"1",
"]",
"*",
"len",
"(",
"proteins",
")",
"prar",
"=",
"[",
"\"%s-%s-%d-%d\"",
"%",
"x",
"for",
"x",
"in",
"zip",
"(",
"proteins",
",",
"apos",
",",
"multiplicity",
",",
"tps",
")",
"]",
"phdf",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"prar",
",",
"v",
")",
")",
")",
")",
"phdf",
".",
"columns",
"=",
"[",
"\"ID\"",
",",
"\"Ratio\"",
"]",
"phdfs",
".",
"append",
"(",
"phdf",
")",
"pd",
".",
"concat",
"(",
"phdfs",
")",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
",",
"index",
"=",
"None",
")"
] |
Write out the data frame ratio between two groups
protein-Rsite-multiplicity-timepoint
ID Ratio
Q13619-S10-1-1 0.5
Q9H3Z4-S10-1-1 0.502
Q6GQQ9-S100-1-1 0.504
Q86YP4-S100-1-1 0.506
Q9H307-S100-1-1 0.508
Q8NEY1-S1000-1-1 0.51
Q13541-S101-1-1 0.512
O95785-S1012-2-1 0.514
O95785-S1017-2-1 0.516
Q9Y4G8-S1022-1-1 0.518
P35658-S1023-1-1 0.52
Provide a dataframe, filename for output and a control selector. A series of
selectors following this will be compared (ratio mean) to the first. If you
provide a kwargs timepoint_idx the timepoint information from your selection will
be added from the selector index, e.g. timepoint_idx=1 will use the first level
of the selector as timepoint information, so ("Control", 30) would give timepoint 30.
:param df:
:param a:
:param *args
:param **kwargs: use timepoint= to define column index for timepoint information, extracted from args.
:return:
|
[
"Write",
"out",
"the",
"data",
"frame",
"ratio",
"between",
"two",
"groups",
"protein",
"-",
"Rsite",
"-",
"multiplicity",
"-",
"timepoint",
"ID",
"Ratio",
"Q13619",
"-",
"S10",
"-",
"1",
"-",
"1",
"0",
".",
"5",
"Q9H3Z4",
"-",
"S10",
"-",
"1",
"-",
"1",
"0",
".",
"502",
"Q6GQQ9",
"-",
"S100",
"-",
"1",
"-",
"1",
"0",
".",
"504",
"Q86YP4",
"-",
"S100",
"-",
"1",
"-",
"1",
"0",
".",
"506",
"Q9H307",
"-",
"S100",
"-",
"1",
"-",
"1",
"0",
".",
"508",
"Q8NEY1",
"-",
"S1000",
"-",
"1",
"-",
"1",
"0",
".",
"51",
"Q13541",
"-",
"S101",
"-",
"1",
"-",
"1",
"0",
".",
"512",
"O95785",
"-",
"S1012",
"-",
"2",
"-",
"1",
"0",
".",
"514",
"O95785",
"-",
"S1017",
"-",
"2",
"-",
"1",
"0",
".",
"516",
"Q9Y4G8",
"-",
"S1022",
"-",
"1",
"-",
"1",
"0",
".",
"518",
"P35658",
"-",
"S1023",
"-",
"1",
"-",
"1",
"0",
".",
"52"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/io.py#L129-L185
|
train
|
mfitzp/padua
|
padua/io.py
|
write_r
|
def write_r(df, f, sep=",", index_join="@", columns_join="."):
"""
Export dataframe in a format easily importable to R
Index fields are joined with "@" and column fields by "." by default.
:param df:
:param f:
:param index_join:
:param columns_join:
:return:
"""
df = df.copy()
df.index = ["@".join([str(s) for s in v]) for v in df.index.values]
df.columns = [".".join([str(s) for s in v]) for v in df.index.values]
df.to_csv(f, sep=sep)
|
python
|
def write_r(df, f, sep=",", index_join="@", columns_join="."):
"""
Export dataframe in a format easily importable to R
Index fields are joined with "@" and column fields by "." by default.
:param df:
:param f:
:param index_join:
:param columns_join:
:return:
"""
df = df.copy()
df.index = ["@".join([str(s) for s in v]) for v in df.index.values]
df.columns = [".".join([str(s) for s in v]) for v in df.index.values]
df.to_csv(f, sep=sep)
|
[
"def",
"write_r",
"(",
"df",
",",
"f",
",",
"sep",
"=",
"\",\"",
",",
"index_join",
"=",
"\"@\"",
",",
"columns_join",
"=",
"\".\"",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"index",
"=",
"[",
"\"@\"",
".",
"join",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"v",
"]",
")",
"for",
"v",
"in",
"df",
".",
"index",
".",
"values",
"]",
"df",
".",
"columns",
"=",
"[",
"\".\"",
".",
"join",
"(",
"[",
"str",
"(",
"s",
")",
"for",
"s",
"in",
"v",
"]",
")",
"for",
"v",
"in",
"df",
".",
"index",
".",
"values",
"]",
"df",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"sep",
")"
] |
Export dataframe in a format easily importable to R
Index fields are joined with "@" and column fields by "." by default.
:param df:
:param f:
:param index_join:
:param columns_join:
:return:
|
[
"Export",
"dataframe",
"in",
"a",
"format",
"easily",
"importable",
"to",
"R"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/io.py#L188-L203
|
train
|
mfitzp/padua
|
padua/imputation.py
|
gaussian
|
def gaussian(df, width=0.3, downshift=-1.8, prefix=None):
"""
Impute missing values by drawing from a normal distribution
:param df:
:param width: Scale factor for the imputed distribution relative to the standard deviation of measured values. Can be a single number or list of one per column.
:param downshift: Shift the imputed values down, in units of std. dev. Can be a single number or list of one per column
:param prefix: The column prefix for imputed columns
:return:
"""
df = df.copy()
imputed = df.isnull() # Keep track of what's real
if prefix:
mask = np.array([l.startswith(prefix) for l in df.columns.values])
mycols = np.arange(0, df.shape[1])[mask]
else:
mycols = np.arange(0, df.shape[1])
if type(width) is not list:
width = [width] * len(mycols)
elif len(mycols) != len(width):
raise ValueError("Length of iterable 'width' does not match # of columns")
if type(downshift) is not list:
downshift = [downshift] * len(mycols)
elif len(mycols) != len(downshift):
raise ValueError("Length of iterable 'downshift' does not match # of columns")
for i in mycols:
data = df.iloc[:, i]
mask = data.isnull().values
mean = data.mean(axis=0)
stddev = data.std(axis=0)
m = mean + downshift[i]*stddev
s = stddev*width[i]
# Generate a list of random numbers for filling in
values = np.random.normal(loc=m, scale=s, size=df.shape[0])
# Now fill them in
df.iloc[mask, i] = values[mask]
return df, imputed
|
python
|
def gaussian(df, width=0.3, downshift=-1.8, prefix=None):
"""
Impute missing values by drawing from a normal distribution
:param df:
:param width: Scale factor for the imputed distribution relative to the standard deviation of measured values. Can be a single number or list of one per column.
:param downshift: Shift the imputed values down, in units of std. dev. Can be a single number or list of one per column
:param prefix: The column prefix for imputed columns
:return:
"""
df = df.copy()
imputed = df.isnull() # Keep track of what's real
if prefix:
mask = np.array([l.startswith(prefix) for l in df.columns.values])
mycols = np.arange(0, df.shape[1])[mask]
else:
mycols = np.arange(0, df.shape[1])
if type(width) is not list:
width = [width] * len(mycols)
elif len(mycols) != len(width):
raise ValueError("Length of iterable 'width' does not match # of columns")
if type(downshift) is not list:
downshift = [downshift] * len(mycols)
elif len(mycols) != len(downshift):
raise ValueError("Length of iterable 'downshift' does not match # of columns")
for i in mycols:
data = df.iloc[:, i]
mask = data.isnull().values
mean = data.mean(axis=0)
stddev = data.std(axis=0)
m = mean + downshift[i]*stddev
s = stddev*width[i]
# Generate a list of random numbers for filling in
values = np.random.normal(loc=m, scale=s, size=df.shape[0])
# Now fill them in
df.iloc[mask, i] = values[mask]
return df, imputed
|
[
"def",
"gaussian",
"(",
"df",
",",
"width",
"=",
"0.3",
",",
"downshift",
"=",
"-",
"1.8",
",",
"prefix",
"=",
"None",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"imputed",
"=",
"df",
".",
"isnull",
"(",
")",
"# Keep track of what's real",
"if",
"prefix",
":",
"mask",
"=",
"np",
".",
"array",
"(",
"[",
"l",
".",
"startswith",
"(",
"prefix",
")",
"for",
"l",
"in",
"df",
".",
"columns",
".",
"values",
"]",
")",
"mycols",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"df",
".",
"shape",
"[",
"1",
"]",
")",
"[",
"mask",
"]",
"else",
":",
"mycols",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"df",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"type",
"(",
"width",
")",
"is",
"not",
"list",
":",
"width",
"=",
"[",
"width",
"]",
"*",
"len",
"(",
"mycols",
")",
"elif",
"len",
"(",
"mycols",
")",
"!=",
"len",
"(",
"width",
")",
":",
"raise",
"ValueError",
"(",
"\"Length of iterable 'width' does not match # of columns\"",
")",
"if",
"type",
"(",
"downshift",
")",
"is",
"not",
"list",
":",
"downshift",
"=",
"[",
"downshift",
"]",
"*",
"len",
"(",
"mycols",
")",
"elif",
"len",
"(",
"mycols",
")",
"!=",
"len",
"(",
"downshift",
")",
":",
"raise",
"ValueError",
"(",
"\"Length of iterable 'downshift' does not match # of columns\"",
")",
"for",
"i",
"in",
"mycols",
":",
"data",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"i",
"]",
"mask",
"=",
"data",
".",
"isnull",
"(",
")",
".",
"values",
"mean",
"=",
"data",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"stddev",
"=",
"data",
".",
"std",
"(",
"axis",
"=",
"0",
")",
"m",
"=",
"mean",
"+",
"downshift",
"[",
"i",
"]",
"*",
"stddev",
"s",
"=",
"stddev",
"*",
"width",
"[",
"i",
"]",
"# Generate a list of random numbers for filling in",
"values",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"loc",
"=",
"m",
",",
"scale",
"=",
"s",
",",
"size",
"=",
"df",
".",
"shape",
"[",
"0",
"]",
")",
"# Now fill them in",
"df",
".",
"iloc",
"[",
"mask",
",",
"i",
"]",
"=",
"values",
"[",
"mask",
"]",
"return",
"df",
",",
"imputed"
] |
Impute missing values by drawing from a normal distribution
:param df:
:param width: Scale factor for the imputed distribution relative to the standard deviation of measured values. Can be a single number or list of one per column.
:param downshift: Shift the imputed values down, in units of std. dev. Can be a single number or list of one per column
:param prefix: The column prefix for imputed columns
:return:
|
[
"Impute",
"missing",
"values",
"by",
"drawing",
"from",
"a",
"normal",
"distribution"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/imputation.py#L14-L63
|
train
|
mfitzp/padua
|
padua/visualize.py
|
_pca_scores
|
def _pca_scores(
scores,
pc1=0,
pc2=1,
fcol=None,
ecol=None,
marker='o',
markersize=30,
label_scores=None,
show_covariance_ellipse=True,
optimize_label_iter=OPTIMIZE_LABEL_ITER_DEFAULT,
**kwargs
):
"""
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,1,1)
levels = [0,1]
for c in set(scores.columns.values):
try:
data = scores[c].values.reshape(2,-1)
except:
continue
fc = hierarchical_match(fcol, c, 'k')
ec = hierarchical_match(ecol, c)
if ec is None:
ec = fc
if type(markersize) == str:
# Use as a key vs. index value in this levels
idx = scores.columns.names.index(markersize)
s = c[idx]
elif callable(markersize):
s = markersize(c)
else:
s = markersize
ax.scatter(data[pc1,:], data[pc2,:], s=s, marker=marker, edgecolors=ec, c=fc)
if show_covariance_ellipse and data.shape[1] > 2:
cov = data[[pc1, pc2], :].T
ellip = plot_point_cov(cov, nstd=2, linestyle='dashed', linewidth=0.5, edgecolor=ec or fc,
alpha=0.8) #**kwargs for ellipse styling
ax.add_artist(ellip)
if label_scores:
scores_f = scores.iloc[ [pc1, pc2] ]
idxs = get_index_list( scores_f.columns.names, label_scores )
texts = []
for n, (x, y) in enumerate(scores_f.T.values):
t = ax.text(x, y, build_combined_label( scores_f.columns.values[n], idxs, ', '), bbox=dict(boxstyle='round,pad=0.3', fc='#ffffff', ec='none', alpha=0.6))
texts.append(t)
if texts and optimize_label_iter:
adjust_text(
texts,
lim=optimize_label_iter
)
ax.set_xlabel(scores.index[pc1], fontsize=16)
ax.set_ylabel(scores.index[pc2], fontsize=16)
fig.tight_layout()
return ax
|
python
|
def _pca_scores(
scores,
pc1=0,
pc2=1,
fcol=None,
ecol=None,
marker='o',
markersize=30,
label_scores=None,
show_covariance_ellipse=True,
optimize_label_iter=OPTIMIZE_LABEL_ITER_DEFAULT,
**kwargs
):
"""
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,1,1)
levels = [0,1]
for c in set(scores.columns.values):
try:
data = scores[c].values.reshape(2,-1)
except:
continue
fc = hierarchical_match(fcol, c, 'k')
ec = hierarchical_match(ecol, c)
if ec is None:
ec = fc
if type(markersize) == str:
# Use as a key vs. index value in this levels
idx = scores.columns.names.index(markersize)
s = c[idx]
elif callable(markersize):
s = markersize(c)
else:
s = markersize
ax.scatter(data[pc1,:], data[pc2,:], s=s, marker=marker, edgecolors=ec, c=fc)
if show_covariance_ellipse and data.shape[1] > 2:
cov = data[[pc1, pc2], :].T
ellip = plot_point_cov(cov, nstd=2, linestyle='dashed', linewidth=0.5, edgecolor=ec or fc,
alpha=0.8) #**kwargs for ellipse styling
ax.add_artist(ellip)
if label_scores:
scores_f = scores.iloc[ [pc1, pc2] ]
idxs = get_index_list( scores_f.columns.names, label_scores )
texts = []
for n, (x, y) in enumerate(scores_f.T.values):
t = ax.text(x, y, build_combined_label( scores_f.columns.values[n], idxs, ', '), bbox=dict(boxstyle='round,pad=0.3', fc='#ffffff', ec='none', alpha=0.6))
texts.append(t)
if texts and optimize_label_iter:
adjust_text(
texts,
lim=optimize_label_iter
)
ax.set_xlabel(scores.index[pc1], fontsize=16)
ax.set_ylabel(scores.index[pc2], fontsize=16)
fig.tight_layout()
return ax
|
[
"def",
"_pca_scores",
"(",
"scores",
",",
"pc1",
"=",
"0",
",",
"pc2",
"=",
"1",
",",
"fcol",
"=",
"None",
",",
"ecol",
"=",
"None",
",",
"marker",
"=",
"'o'",
",",
"markersize",
"=",
"30",
",",
"label_scores",
"=",
"None",
",",
"show_covariance_ellipse",
"=",
"True",
",",
"optimize_label_iter",
"=",
"OPTIMIZE_LABEL_ITER_DEFAULT",
",",
"*",
"*",
"kwargs",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"8",
",",
"8",
")",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"levels",
"=",
"[",
"0",
",",
"1",
"]",
"for",
"c",
"in",
"set",
"(",
"scores",
".",
"columns",
".",
"values",
")",
":",
"try",
":",
"data",
"=",
"scores",
"[",
"c",
"]",
".",
"values",
".",
"reshape",
"(",
"2",
",",
"-",
"1",
")",
"except",
":",
"continue",
"fc",
"=",
"hierarchical_match",
"(",
"fcol",
",",
"c",
",",
"'k'",
")",
"ec",
"=",
"hierarchical_match",
"(",
"ecol",
",",
"c",
")",
"if",
"ec",
"is",
"None",
":",
"ec",
"=",
"fc",
"if",
"type",
"(",
"markersize",
")",
"==",
"str",
":",
"# Use as a key vs. index value in this levels",
"idx",
"=",
"scores",
".",
"columns",
".",
"names",
".",
"index",
"(",
"markersize",
")",
"s",
"=",
"c",
"[",
"idx",
"]",
"elif",
"callable",
"(",
"markersize",
")",
":",
"s",
"=",
"markersize",
"(",
"c",
")",
"else",
":",
"s",
"=",
"markersize",
"ax",
".",
"scatter",
"(",
"data",
"[",
"pc1",
",",
":",
"]",
",",
"data",
"[",
"pc2",
",",
":",
"]",
",",
"s",
"=",
"s",
",",
"marker",
"=",
"marker",
",",
"edgecolors",
"=",
"ec",
",",
"c",
"=",
"fc",
")",
"if",
"show_covariance_ellipse",
"and",
"data",
".",
"shape",
"[",
"1",
"]",
">",
"2",
":",
"cov",
"=",
"data",
"[",
"[",
"pc1",
",",
"pc2",
"]",
",",
":",
"]",
".",
"T",
"ellip",
"=",
"plot_point_cov",
"(",
"cov",
",",
"nstd",
"=",
"2",
",",
"linestyle",
"=",
"'dashed'",
",",
"linewidth",
"=",
"0.5",
",",
"edgecolor",
"=",
"ec",
"or",
"fc",
",",
"alpha",
"=",
"0.8",
")",
"#**kwargs for ellipse styling",
"ax",
".",
"add_artist",
"(",
"ellip",
")",
"if",
"label_scores",
":",
"scores_f",
"=",
"scores",
".",
"iloc",
"[",
"[",
"pc1",
",",
"pc2",
"]",
"]",
"idxs",
"=",
"get_index_list",
"(",
"scores_f",
".",
"columns",
".",
"names",
",",
"label_scores",
")",
"texts",
"=",
"[",
"]",
"for",
"n",
",",
"(",
"x",
",",
"y",
")",
"in",
"enumerate",
"(",
"scores_f",
".",
"T",
".",
"values",
")",
":",
"t",
"=",
"ax",
".",
"text",
"(",
"x",
",",
"y",
",",
"build_combined_label",
"(",
"scores_f",
".",
"columns",
".",
"values",
"[",
"n",
"]",
",",
"idxs",
",",
"', '",
")",
",",
"bbox",
"=",
"dict",
"(",
"boxstyle",
"=",
"'round,pad=0.3'",
",",
"fc",
"=",
"'#ffffff'",
",",
"ec",
"=",
"'none'",
",",
"alpha",
"=",
"0.6",
")",
")",
"texts",
".",
"append",
"(",
"t",
")",
"if",
"texts",
"and",
"optimize_label_iter",
":",
"adjust_text",
"(",
"texts",
",",
"lim",
"=",
"optimize_label_iter",
")",
"ax",
".",
"set_xlabel",
"(",
"scores",
".",
"index",
"[",
"pc1",
"]",
",",
"fontsize",
"=",
"16",
")",
"ax",
".",
"set_ylabel",
"(",
"scores",
".",
"index",
"[",
"pc2",
"]",
",",
"fontsize",
"=",
"16",
")",
"fig",
".",
"tight_layout",
"(",
")",
"return",
"ax"
] |
Plot a scores plot for two principal components as AxB scatter plot.
Returns the plotted axis.
:param scores: DataFrame containing scores
:param pc1: Column indexer into scores for PC1
:param pc2: Column indexer into scores for PC2
:param fcol: Face (fill) color definition
:param ecol: Edge color definition
:param marker: Marker style (matplotlib; default 'o')
:param markersize: int Size of the marker
:param label_scores: Index level to label markers with
:param show_covariance_ellipse: Plot covariance (2*std) ellipse around each grouping
:param optimize_label_iter: Number of iterations to run label adjustment algorithm
:return: Generated axes
|
[
"Plot",
"a",
"scores",
"plot",
"for",
"two",
"principal",
"components",
"as",
"AxB",
"scatter",
"plot",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L117-L200
|
train
|
mfitzp/padua
|
padua/visualize.py
|
modifiedaminoacids
|
def modifiedaminoacids(df, kind='pie'):
"""
Generate a plot of relative numbers of modified amino acids in source DataFrame.
Plot a pie or bar chart showing the number and percentage of modified amino
acids in the supplied data frame. The amino acids displayed will be
determined from the supplied data/modification type.
:param df: processed DataFrame
:param kind: `str` type of plot; either "pie" or "bar"
:return: matplotlib ax
"""
colors = ['#6baed6','#c6dbef','#bdbdbd']
total_aas, quants = analysis.modifiedaminoacids(df)
df = pd.DataFrame()
for a, n in quants.items():
df[a] = [n]
df.sort_index(axis=1, inplace=True)
if kind == 'bar' or kind == 'both':
ax1 = df.plot(kind='bar', figsize=(7,7), color=colors)
ax1.set_ylabel('Number of phosphorylated amino acids')
ax1.set_xlabel('Amino acid')
ax1.set_xticks([])
ylim = np.max(df.values)+1000
ax1.set_ylim(0, ylim )
_bartoplabel(ax1, 100*df.values[0], total_aas, ylim )
ax1.set_xlim((-0.3, 0.3))
return ax
if kind == 'pie' or kind == 'both':
dfp =df.T
residues = dfp.index.values
dfp.index = ["%.2f%% (%d)" % (100*df[i].values[0]/total_aas, df[i].values[0]) for i in dfp.index.values ]
ax2 = dfp.plot(kind='pie', y=0, colors=colors)
ax2.legend(residues, loc='upper left', bbox_to_anchor=(1.0, 1.0))
ax2.set_ylabel('')
ax2.set_xlabel('')
ax2.figure.set_size_inches(6,6)
for t in ax2.texts:
t.set_fontsize(15)
return ax2
return ax1, ax2
|
python
|
def modifiedaminoacids(df, kind='pie'):
"""
Generate a plot of relative numbers of modified amino acids in source DataFrame.
Plot a pie or bar chart showing the number and percentage of modified amino
acids in the supplied data frame. The amino acids displayed will be
determined from the supplied data/modification type.
:param df: processed DataFrame
:param kind: `str` type of plot; either "pie" or "bar"
:return: matplotlib ax
"""
colors = ['#6baed6','#c6dbef','#bdbdbd']
total_aas, quants = analysis.modifiedaminoacids(df)
df = pd.DataFrame()
for a, n in quants.items():
df[a] = [n]
df.sort_index(axis=1, inplace=True)
if kind == 'bar' or kind == 'both':
ax1 = df.plot(kind='bar', figsize=(7,7), color=colors)
ax1.set_ylabel('Number of phosphorylated amino acids')
ax1.set_xlabel('Amino acid')
ax1.set_xticks([])
ylim = np.max(df.values)+1000
ax1.set_ylim(0, ylim )
_bartoplabel(ax1, 100*df.values[0], total_aas, ylim )
ax1.set_xlim((-0.3, 0.3))
return ax
if kind == 'pie' or kind == 'both':
dfp =df.T
residues = dfp.index.values
dfp.index = ["%.2f%% (%d)" % (100*df[i].values[0]/total_aas, df[i].values[0]) for i in dfp.index.values ]
ax2 = dfp.plot(kind='pie', y=0, colors=colors)
ax2.legend(residues, loc='upper left', bbox_to_anchor=(1.0, 1.0))
ax2.set_ylabel('')
ax2.set_xlabel('')
ax2.figure.set_size_inches(6,6)
for t in ax2.texts:
t.set_fontsize(15)
return ax2
return ax1, ax2
|
[
"def",
"modifiedaminoacids",
"(",
"df",
",",
"kind",
"=",
"'pie'",
")",
":",
"colors",
"=",
"[",
"'#6baed6'",
",",
"'#c6dbef'",
",",
"'#bdbdbd'",
"]",
"total_aas",
",",
"quants",
"=",
"analysis",
".",
"modifiedaminoacids",
"(",
"df",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"a",
",",
"n",
"in",
"quants",
".",
"items",
"(",
")",
":",
"df",
"[",
"a",
"]",
"=",
"[",
"n",
"]",
"df",
".",
"sort_index",
"(",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"if",
"kind",
"==",
"'bar'",
"or",
"kind",
"==",
"'both'",
":",
"ax1",
"=",
"df",
".",
"plot",
"(",
"kind",
"=",
"'bar'",
",",
"figsize",
"=",
"(",
"7",
",",
"7",
")",
",",
"color",
"=",
"colors",
")",
"ax1",
".",
"set_ylabel",
"(",
"'Number of phosphorylated amino acids'",
")",
"ax1",
".",
"set_xlabel",
"(",
"'Amino acid'",
")",
"ax1",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ylim",
"=",
"np",
".",
"max",
"(",
"df",
".",
"values",
")",
"+",
"1000",
"ax1",
".",
"set_ylim",
"(",
"0",
",",
"ylim",
")",
"_bartoplabel",
"(",
"ax1",
",",
"100",
"*",
"df",
".",
"values",
"[",
"0",
"]",
",",
"total_aas",
",",
"ylim",
")",
"ax1",
".",
"set_xlim",
"(",
"(",
"-",
"0.3",
",",
"0.3",
")",
")",
"return",
"ax",
"if",
"kind",
"==",
"'pie'",
"or",
"kind",
"==",
"'both'",
":",
"dfp",
"=",
"df",
".",
"T",
"residues",
"=",
"dfp",
".",
"index",
".",
"values",
"dfp",
".",
"index",
"=",
"[",
"\"%.2f%% (%d)\"",
"%",
"(",
"100",
"*",
"df",
"[",
"i",
"]",
".",
"values",
"[",
"0",
"]",
"/",
"total_aas",
",",
"df",
"[",
"i",
"]",
".",
"values",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"dfp",
".",
"index",
".",
"values",
"]",
"ax2",
"=",
"dfp",
".",
"plot",
"(",
"kind",
"=",
"'pie'",
",",
"y",
"=",
"0",
",",
"colors",
"=",
"colors",
")",
"ax2",
".",
"legend",
"(",
"residues",
",",
"loc",
"=",
"'upper left'",
",",
"bbox_to_anchor",
"=",
"(",
"1.0",
",",
"1.0",
")",
")",
"ax2",
".",
"set_ylabel",
"(",
"''",
")",
"ax2",
".",
"set_xlabel",
"(",
"''",
")",
"ax2",
".",
"figure",
".",
"set_size_inches",
"(",
"6",
",",
"6",
")",
"for",
"t",
"in",
"ax2",
".",
"texts",
":",
"t",
".",
"set_fontsize",
"(",
"15",
")",
"return",
"ax2",
"return",
"ax1",
",",
"ax2"
] |
Generate a plot of relative numbers of modified amino acids in source DataFrame.
Plot a pie or bar chart showing the number and percentage of modified amino
acids in the supplied data frame. The amino acids displayed will be
determined from the supplied data/modification type.
:param df: processed DataFrame
:param kind: `str` type of plot; either "pie" or "bar"
:return: matplotlib ax
|
[
"Generate",
"a",
"plot",
"of",
"relative",
"numbers",
"of",
"modified",
"amino",
"acids",
"in",
"source",
"DataFrame",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L697-L748
|
train
|
mfitzp/padua
|
padua/visualize.py
|
venn
|
def venn(df1, df2, df3=None, labels=None, ix1=None, ix2=None, ix3=None, return_intersection=False, fcols=None):
"""
Plot a 2 or 3-part venn diagram showing the overlap between 2 or 3 pandas DataFrames.
Provided with two or three Pandas DataFrames, this will return a venn diagram showing the overlap calculated between
the DataFrame indexes provided as ix1, ix2, ix3. Labels for each DataFrame can be provided as a list in the same order,
while `fcol` can be used to specify the colors of each section.
:param df1: Pandas DataFrame
:param df2: Pandas DataFrame
:param df3: Pandas DataFrame (optional)
:param labels: List of labels for the provided dataframes
:param ix1: Index level name of of Dataframe 1 to use for comparison
:param ix2: Index level name of of Dataframe 2 to use for comparison
:param ix3: Index level name of of Dataframe 3 to use for comparison
:param return_intersection: Return the intersection of the supplied indices
:param fcols: List of colors for the provided dataframes
:return: ax, or ax with intersection
"""
try:
import matplotlib_venn as mplv
except ImportError:
raise ImportError("To plot venn diagrams, install matplotlib-venn package: pip install matplotlib-venn")
plt.gcf().clear()
if labels is None:
labels = ["A", "B", "C"]
s1 = _process_ix(df1.index, ix1)
s2 = _process_ix(df2.index, ix2)
if df3 is not None:
s3 = _process_ix(df3.index, ix3)
kwargs = {}
if fcols:
kwargs['set_colors'] = [fcols[l] for l in labels]
if df3 is not None:
vn = mplv.venn3([s1,s2,s3], set_labels=labels, **kwargs)
intersection = s1 & s2 & s3
else:
vn = mplv.venn2([s1,s2], set_labels=labels, **kwargs)
intersection = s1 & s2
ax = plt.gca()
if return_intersection:
return ax, list(intersection)
else:
return ax
|
python
|
def venn(df1, df2, df3=None, labels=None, ix1=None, ix2=None, ix3=None, return_intersection=False, fcols=None):
"""
Plot a 2 or 3-part venn diagram showing the overlap between 2 or 3 pandas DataFrames.
Provided with two or three Pandas DataFrames, this will return a venn diagram showing the overlap calculated between
the DataFrame indexes provided as ix1, ix2, ix3. Labels for each DataFrame can be provided as a list in the same order,
while `fcol` can be used to specify the colors of each section.
:param df1: Pandas DataFrame
:param df2: Pandas DataFrame
:param df3: Pandas DataFrame (optional)
:param labels: List of labels for the provided dataframes
:param ix1: Index level name of of Dataframe 1 to use for comparison
:param ix2: Index level name of of Dataframe 2 to use for comparison
:param ix3: Index level name of of Dataframe 3 to use for comparison
:param return_intersection: Return the intersection of the supplied indices
:param fcols: List of colors for the provided dataframes
:return: ax, or ax with intersection
"""
try:
import matplotlib_venn as mplv
except ImportError:
raise ImportError("To plot venn diagrams, install matplotlib-venn package: pip install matplotlib-venn")
plt.gcf().clear()
if labels is None:
labels = ["A", "B", "C"]
s1 = _process_ix(df1.index, ix1)
s2 = _process_ix(df2.index, ix2)
if df3 is not None:
s3 = _process_ix(df3.index, ix3)
kwargs = {}
if fcols:
kwargs['set_colors'] = [fcols[l] for l in labels]
if df3 is not None:
vn = mplv.venn3([s1,s2,s3], set_labels=labels, **kwargs)
intersection = s1 & s2 & s3
else:
vn = mplv.venn2([s1,s2], set_labels=labels, **kwargs)
intersection = s1 & s2
ax = plt.gca()
if return_intersection:
return ax, list(intersection)
else:
return ax
|
[
"def",
"venn",
"(",
"df1",
",",
"df2",
",",
"df3",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"ix1",
"=",
"None",
",",
"ix2",
"=",
"None",
",",
"ix3",
"=",
"None",
",",
"return_intersection",
"=",
"False",
",",
"fcols",
"=",
"None",
")",
":",
"try",
":",
"import",
"matplotlib_venn",
"as",
"mplv",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"To plot venn diagrams, install matplotlib-venn package: pip install matplotlib-venn\"",
")",
"plt",
".",
"gcf",
"(",
")",
".",
"clear",
"(",
")",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"[",
"\"A\"",
",",
"\"B\"",
",",
"\"C\"",
"]",
"s1",
"=",
"_process_ix",
"(",
"df1",
".",
"index",
",",
"ix1",
")",
"s2",
"=",
"_process_ix",
"(",
"df2",
".",
"index",
",",
"ix2",
")",
"if",
"df3",
"is",
"not",
"None",
":",
"s3",
"=",
"_process_ix",
"(",
"df3",
".",
"index",
",",
"ix3",
")",
"kwargs",
"=",
"{",
"}",
"if",
"fcols",
":",
"kwargs",
"[",
"'set_colors'",
"]",
"=",
"[",
"fcols",
"[",
"l",
"]",
"for",
"l",
"in",
"labels",
"]",
"if",
"df3",
"is",
"not",
"None",
":",
"vn",
"=",
"mplv",
".",
"venn3",
"(",
"[",
"s1",
",",
"s2",
",",
"s3",
"]",
",",
"set_labels",
"=",
"labels",
",",
"*",
"*",
"kwargs",
")",
"intersection",
"=",
"s1",
"&",
"s2",
"&",
"s3",
"else",
":",
"vn",
"=",
"mplv",
".",
"venn2",
"(",
"[",
"s1",
",",
"s2",
"]",
",",
"set_labels",
"=",
"labels",
",",
"*",
"*",
"kwargs",
")",
"intersection",
"=",
"s1",
"&",
"s2",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"return_intersection",
":",
"return",
"ax",
",",
"list",
"(",
"intersection",
")",
"else",
":",
"return",
"ax"
] |
Plot a 2 or 3-part venn diagram showing the overlap between 2 or 3 pandas DataFrames.
Provided with two or three Pandas DataFrames, this will return a venn diagram showing the overlap calculated between
the DataFrame indexes provided as ix1, ix2, ix3. Labels for each DataFrame can be provided as a list in the same order,
while `fcol` can be used to specify the colors of each section.
:param df1: Pandas DataFrame
:param df2: Pandas DataFrame
:param df3: Pandas DataFrame (optional)
:param labels: List of labels for the provided dataframes
:param ix1: Index level name of of Dataframe 1 to use for comparison
:param ix2: Index level name of of Dataframe 2 to use for comparison
:param ix3: Index level name of of Dataframe 3 to use for comparison
:param return_intersection: Return the intersection of the supplied indices
:param fcols: List of colors for the provided dataframes
:return: ax, or ax with intersection
|
[
"Plot",
"a",
"2",
"or",
"3",
"-",
"part",
"venn",
"diagram",
"showing",
"the",
"overlap",
"between",
"2",
"or",
"3",
"pandas",
"DataFrames",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L979-L1033
|
train
|
mfitzp/padua
|
padua/visualize.py
|
sitespeptidesproteins
|
def sitespeptidesproteins(df, labels=None, colors=None, site_localization_probability=0.75):
"""
Plot the number of sites, peptides and proteins in the dataset.
Generates a plot with sites, peptides and proteins displayed hierarchically in chevrons.
The site count is limited to Class I (<=0.75 site localization probability) by default
but may be altered using the `site_localization_probability` parameter.
Labels and alternate colours may be supplied as a 3-entry iterable.
:param df: pandas DataFrame to calculate numbers from
:param labels: list/tuple of 3 strings containing labels
:param colors: list/tuple of 3 colours as hex codes or matplotlib color codes
:param site_localization_probability: the cut-off for site inclusion (default=0.75; Class I)
:return:
"""
fig = plt.figure(figsize=(4,6))
ax = fig.add_subplot(1,1,1)
shift = 0.5
values = analysis.sitespeptidesproteins(df, site_localization_probability)
if labels is None:
labels = ['Sites (Class I)', 'Peptides', 'Proteins']
if colors is None:
colors = ['#756bb1', '#bcbddc', '#dadaeb']
for n, (c, l, v) in enumerate(zip(colors, labels, values)):
ax.fill_between([0,1,2], np.array([shift,0,shift]) + n, np.array([1+shift,1,1+shift]) + n, color=c, alpha=0.5 )
ax.text(1, 0.5 + n, "{}\n{:,}".format(l, v), ha='center', color='k', fontsize=16 )
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
return ax
|
python
|
def sitespeptidesproteins(df, labels=None, colors=None, site_localization_probability=0.75):
"""
Plot the number of sites, peptides and proteins in the dataset.
Generates a plot with sites, peptides and proteins displayed hierarchically in chevrons.
The site count is limited to Class I (<=0.75 site localization probability) by default
but may be altered using the `site_localization_probability` parameter.
Labels and alternate colours may be supplied as a 3-entry iterable.
:param df: pandas DataFrame to calculate numbers from
:param labels: list/tuple of 3 strings containing labels
:param colors: list/tuple of 3 colours as hex codes or matplotlib color codes
:param site_localization_probability: the cut-off for site inclusion (default=0.75; Class I)
:return:
"""
fig = plt.figure(figsize=(4,6))
ax = fig.add_subplot(1,1,1)
shift = 0.5
values = analysis.sitespeptidesproteins(df, site_localization_probability)
if labels is None:
labels = ['Sites (Class I)', 'Peptides', 'Proteins']
if colors is None:
colors = ['#756bb1', '#bcbddc', '#dadaeb']
for n, (c, l, v) in enumerate(zip(colors, labels, values)):
ax.fill_between([0,1,2], np.array([shift,0,shift]) + n, np.array([1+shift,1,1+shift]) + n, color=c, alpha=0.5 )
ax.text(1, 0.5 + n, "{}\n{:,}".format(l, v), ha='center', color='k', fontsize=16 )
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_off()
return ax
|
[
"def",
"sitespeptidesproteins",
"(",
"df",
",",
"labels",
"=",
"None",
",",
"colors",
"=",
"None",
",",
"site_localization_probability",
"=",
"0.75",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"4",
",",
"6",
")",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"1",
",",
"1",
",",
"1",
")",
"shift",
"=",
"0.5",
"values",
"=",
"analysis",
".",
"sitespeptidesproteins",
"(",
"df",
",",
"site_localization_probability",
")",
"if",
"labels",
"is",
"None",
":",
"labels",
"=",
"[",
"'Sites (Class I)'",
",",
"'Peptides'",
",",
"'Proteins'",
"]",
"if",
"colors",
"is",
"None",
":",
"colors",
"=",
"[",
"'#756bb1'",
",",
"'#bcbddc'",
",",
"'#dadaeb'",
"]",
"for",
"n",
",",
"(",
"c",
",",
"l",
",",
"v",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"colors",
",",
"labels",
",",
"values",
")",
")",
":",
"ax",
".",
"fill_between",
"(",
"[",
"0",
",",
"1",
",",
"2",
"]",
",",
"np",
".",
"array",
"(",
"[",
"shift",
",",
"0",
",",
"shift",
"]",
")",
"+",
"n",
",",
"np",
".",
"array",
"(",
"[",
"1",
"+",
"shift",
",",
"1",
",",
"1",
"+",
"shift",
"]",
")",
"+",
"n",
",",
"color",
"=",
"c",
",",
"alpha",
"=",
"0.5",
")",
"ax",
".",
"text",
"(",
"1",
",",
"0.5",
"+",
"n",
",",
"\"{}\\n{:,}\"",
".",
"format",
"(",
"l",
",",
"v",
")",
",",
"ha",
"=",
"'center'",
",",
"color",
"=",
"'k'",
",",
"fontsize",
"=",
"16",
")",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_axis_off",
"(",
")",
"return",
"ax"
] |
Plot the number of sites, peptides and proteins in the dataset.
Generates a plot with sites, peptides and proteins displayed hierarchically in chevrons.
The site count is limited to Class I (<=0.75 site localization probability) by default
but may be altered using the `site_localization_probability` parameter.
Labels and alternate colours may be supplied as a 3-entry iterable.
:param df: pandas DataFrame to calculate numbers from
:param labels: list/tuple of 3 strings containing labels
:param colors: list/tuple of 3 colours as hex codes or matplotlib color codes
:param site_localization_probability: the cut-off for site inclusion (default=0.75; Class I)
:return:
|
[
"Plot",
"the",
"number",
"of",
"sites",
"peptides",
"and",
"proteins",
"in",
"the",
"dataset",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L1036-L1071
|
train
|
mfitzp/padua
|
padua/visualize.py
|
_areadist
|
def _areadist(ax, v, xr, c, bins=100, by=None, alpha=1, label=None):
"""
Plot the histogram distribution but as an area plot
"""
y, x = np.histogram(v[~np.isnan(v)], bins)
x = x[:-1]
if by is None:
by = np.zeros((bins,))
ax.fill_between(x, y, by, facecolor=c, alpha=alpha, label=label)
return y
|
python
|
def _areadist(ax, v, xr, c, bins=100, by=None, alpha=1, label=None):
"""
Plot the histogram distribution but as an area plot
"""
y, x = np.histogram(v[~np.isnan(v)], bins)
x = x[:-1]
if by is None:
by = np.zeros((bins,))
ax.fill_between(x, y, by, facecolor=c, alpha=alpha, label=label)
return y
|
[
"def",
"_areadist",
"(",
"ax",
",",
"v",
",",
"xr",
",",
"c",
",",
"bins",
"=",
"100",
",",
"by",
"=",
"None",
",",
"alpha",
"=",
"1",
",",
"label",
"=",
"None",
")",
":",
"y",
",",
"x",
"=",
"np",
".",
"histogram",
"(",
"v",
"[",
"~",
"np",
".",
"isnan",
"(",
"v",
")",
"]",
",",
"bins",
")",
"x",
"=",
"x",
"[",
":",
"-",
"1",
"]",
"if",
"by",
"is",
"None",
":",
"by",
"=",
"np",
".",
"zeros",
"(",
"(",
"bins",
",",
")",
")",
"ax",
".",
"fill_between",
"(",
"x",
",",
"y",
",",
"by",
",",
"facecolor",
"=",
"c",
",",
"alpha",
"=",
"alpha",
",",
"label",
"=",
"label",
")",
"return",
"y"
] |
Plot the histogram distribution but as an area plot
|
[
"Plot",
"the",
"histogram",
"distribution",
"but",
"as",
"an",
"area",
"plot"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L1374-L1385
|
train
|
mfitzp/padua
|
padua/visualize.py
|
hierarchical_timecourse
|
def hierarchical_timecourse(
df,
cluster_cols=True,
cluster_rows=False,
n_col_clusters=False,
n_row_clusters=False,
fcol=None,
z_score=0,
method='ward',
cmap=cm.PuOr_r,
return_clusters=False,
rdistance_fn=distance.pdist,
cdistance_fn=distance.pdist,
xlabel='Timepoint',
ylabel='log$_2$ Fold Change'
):
"""
Hierarchical clustering of samples across timecourse experiment.
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
timecourse density plot.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
"""
dfc, row_clusters, row_denD, col_clusters, col_denD, edges = _cluster(df,
cluster_cols=cluster_cols, cluster_rows=cluster_rows, n_col_clusters=n_col_clusters,
n_row_clusters=n_row_clusters, z_score=z_score, method='ward',
rdistance_fn=rdistance_fn, cdistance_fn=cdistance_fn
)
# FIXME: Need to apply a sort function to the DataFrame to order by the clustering
# so we can slice the edges.
dfh = dfc.iloc[row_denD['leaves'], col_denD['leaves']]
dfh = dfh.mean(axis=0, level=[0, 1])
vmax = np.max(dfh.values)
color = ScalarMappable(norm=Normalize(vmin=0, vmax=vmax), cmap=viridis)
fig = plt.figure(figsize=(12, 6))
edges = [0] + edges + [dfh.shape[1]]
for n in range(len(edges) - 1):
ax = fig.add_subplot(2, 4, n + 1)
dfhf = dfh.iloc[:, edges[n]:edges[n + 1]]
xpos = dfhf.index.get_level_values(1)
mv = dfhf.mean(axis=1)
distances = [distance.euclidean(mv, dfhf.values[:, n]) for n in range(dfhf.shape[1])]
colors = [color.to_rgba(v) for v in distances]
order = np.argsort(distances)[::-1]
for y in order:
ax.plot(xpos, dfhf.values[:, y], c=colors[y], alpha=0.5, lw=1) # dfhf.index.get_level_values(1),
ax.set_xticks(xpos)
if n > 3:
ax.set_xticklabels(xpos)
ax.set_xlabel(xlabel)
else:
ax.set_xticklabels([])
if n % 4 != 0:
ax.set_yticklabels([])
else:
ax.set_ylabel(ylabel)
ax.set_ylim((-3, +3))
fig.subplots_adjust(hspace=0.15, wspace=0.15)
if return_clusters:
return fig, dfh, edges
else:
return fig
|
python
|
def hierarchical_timecourse(
df,
cluster_cols=True,
cluster_rows=False,
n_col_clusters=False,
n_row_clusters=False,
fcol=None,
z_score=0,
method='ward',
cmap=cm.PuOr_r,
return_clusters=False,
rdistance_fn=distance.pdist,
cdistance_fn=distance.pdist,
xlabel='Timepoint',
ylabel='log$_2$ Fold Change'
):
"""
Hierarchical clustering of samples across timecourse experiment.
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
timecourse density plot.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
"""
dfc, row_clusters, row_denD, col_clusters, col_denD, edges = _cluster(df,
cluster_cols=cluster_cols, cluster_rows=cluster_rows, n_col_clusters=n_col_clusters,
n_row_clusters=n_row_clusters, z_score=z_score, method='ward',
rdistance_fn=rdistance_fn, cdistance_fn=cdistance_fn
)
# FIXME: Need to apply a sort function to the DataFrame to order by the clustering
# so we can slice the edges.
dfh = dfc.iloc[row_denD['leaves'], col_denD['leaves']]
dfh = dfh.mean(axis=0, level=[0, 1])
vmax = np.max(dfh.values)
color = ScalarMappable(norm=Normalize(vmin=0, vmax=vmax), cmap=viridis)
fig = plt.figure(figsize=(12, 6))
edges = [0] + edges + [dfh.shape[1]]
for n in range(len(edges) - 1):
ax = fig.add_subplot(2, 4, n + 1)
dfhf = dfh.iloc[:, edges[n]:edges[n + 1]]
xpos = dfhf.index.get_level_values(1)
mv = dfhf.mean(axis=1)
distances = [distance.euclidean(mv, dfhf.values[:, n]) for n in range(dfhf.shape[1])]
colors = [color.to_rgba(v) for v in distances]
order = np.argsort(distances)[::-1]
for y in order:
ax.plot(xpos, dfhf.values[:, y], c=colors[y], alpha=0.5, lw=1) # dfhf.index.get_level_values(1),
ax.set_xticks(xpos)
if n > 3:
ax.set_xticklabels(xpos)
ax.set_xlabel(xlabel)
else:
ax.set_xticklabels([])
if n % 4 != 0:
ax.set_yticklabels([])
else:
ax.set_ylabel(ylabel)
ax.set_ylim((-3, +3))
fig.subplots_adjust(hspace=0.15, wspace=0.15)
if return_clusters:
return fig, dfh, edges
else:
return fig
|
[
"def",
"hierarchical_timecourse",
"(",
"df",
",",
"cluster_cols",
"=",
"True",
",",
"cluster_rows",
"=",
"False",
",",
"n_col_clusters",
"=",
"False",
",",
"n_row_clusters",
"=",
"False",
",",
"fcol",
"=",
"None",
",",
"z_score",
"=",
"0",
",",
"method",
"=",
"'ward'",
",",
"cmap",
"=",
"cm",
".",
"PuOr_r",
",",
"return_clusters",
"=",
"False",
",",
"rdistance_fn",
"=",
"distance",
".",
"pdist",
",",
"cdistance_fn",
"=",
"distance",
".",
"pdist",
",",
"xlabel",
"=",
"'Timepoint'",
",",
"ylabel",
"=",
"'log$_2$ Fold Change'",
")",
":",
"dfc",
",",
"row_clusters",
",",
"row_denD",
",",
"col_clusters",
",",
"col_denD",
",",
"edges",
"=",
"_cluster",
"(",
"df",
",",
"cluster_cols",
"=",
"cluster_cols",
",",
"cluster_rows",
"=",
"cluster_rows",
",",
"n_col_clusters",
"=",
"n_col_clusters",
",",
"n_row_clusters",
"=",
"n_row_clusters",
",",
"z_score",
"=",
"z_score",
",",
"method",
"=",
"'ward'",
",",
"rdistance_fn",
"=",
"rdistance_fn",
",",
"cdistance_fn",
"=",
"cdistance_fn",
")",
"# FIXME: Need to apply a sort function to the DataFrame to order by the clustering",
"# so we can slice the edges.",
"dfh",
"=",
"dfc",
".",
"iloc",
"[",
"row_denD",
"[",
"'leaves'",
"]",
",",
"col_denD",
"[",
"'leaves'",
"]",
"]",
"dfh",
"=",
"dfh",
".",
"mean",
"(",
"axis",
"=",
"0",
",",
"level",
"=",
"[",
"0",
",",
"1",
"]",
")",
"vmax",
"=",
"np",
".",
"max",
"(",
"dfh",
".",
"values",
")",
"color",
"=",
"ScalarMappable",
"(",
"norm",
"=",
"Normalize",
"(",
"vmin",
"=",
"0",
",",
"vmax",
"=",
"vmax",
")",
",",
"cmap",
"=",
"viridis",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"12",
",",
"6",
")",
")",
"edges",
"=",
"[",
"0",
"]",
"+",
"edges",
"+",
"[",
"dfh",
".",
"shape",
"[",
"1",
"]",
"]",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"edges",
")",
"-",
"1",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"4",
",",
"n",
"+",
"1",
")",
"dfhf",
"=",
"dfh",
".",
"iloc",
"[",
":",
",",
"edges",
"[",
"n",
"]",
":",
"edges",
"[",
"n",
"+",
"1",
"]",
"]",
"xpos",
"=",
"dfhf",
".",
"index",
".",
"get_level_values",
"(",
"1",
")",
"mv",
"=",
"dfhf",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"distances",
"=",
"[",
"distance",
".",
"euclidean",
"(",
"mv",
",",
"dfhf",
".",
"values",
"[",
":",
",",
"n",
"]",
")",
"for",
"n",
"in",
"range",
"(",
"dfhf",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"colors",
"=",
"[",
"color",
".",
"to_rgba",
"(",
"v",
")",
"for",
"v",
"in",
"distances",
"]",
"order",
"=",
"np",
".",
"argsort",
"(",
"distances",
")",
"[",
":",
":",
"-",
"1",
"]",
"for",
"y",
"in",
"order",
":",
"ax",
".",
"plot",
"(",
"xpos",
",",
"dfhf",
".",
"values",
"[",
":",
",",
"y",
"]",
",",
"c",
"=",
"colors",
"[",
"y",
"]",
",",
"alpha",
"=",
"0.5",
",",
"lw",
"=",
"1",
")",
"# dfhf.index.get_level_values(1),",
"ax",
".",
"set_xticks",
"(",
"xpos",
")",
"if",
"n",
">",
"3",
":",
"ax",
".",
"set_xticklabels",
"(",
"xpos",
")",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
")",
"else",
":",
"ax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"if",
"n",
"%",
"4",
"!=",
"0",
":",
"ax",
".",
"set_yticklabels",
"(",
"[",
"]",
")",
"else",
":",
"ax",
".",
"set_ylabel",
"(",
"ylabel",
")",
"ax",
".",
"set_ylim",
"(",
"(",
"-",
"3",
",",
"+",
"3",
")",
")",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0.15",
",",
"wspace",
"=",
"0.15",
")",
"if",
"return_clusters",
":",
"return",
"fig",
",",
"dfh",
",",
"edges",
"else",
":",
"return",
"fig"
] |
Hierarchical clustering of samples across timecourse experiment.
Peform a hiearchical clustering on a pandas DataFrame and display the resulting clustering as a
timecourse density plot.
Samples are z-scored along the 0-axis (y) by default. To override this use the `z_score` param with the axis to `z_score`
or alternatively, `None`, to turn it off.
If a `n_col_clusters` or `n_row_clusters` is specified, this defines the number of clusters to identify and highlight
in the resulting heatmap. At *least* this number of clusters will be selected, in some instances there will be more
if 2 clusters rank equally at the determined cutoff.
If specified `fcol` will be used to colour the axes for matching samples.
:param df: Pandas ``DataFrame`` to cluster
:param n_col_clusters: ``int`` the ideal number of highlighted clusters in cols
:param n_row_clusters: ``int`` the ideal number of highlighted clusters in rows
:param fcol: ``dict`` of label:colors to be applied along the axes
:param z_score: ``int`` to specify the axis to Z score or `None` to disable
:param method: ``str`` describing cluster method, default ward
:param cmap: matplotlib colourmap for heatmap
:param return_clusters: ``bool`` return clusters in addition to axis
:return: matplotlib axis, or axis and cluster data
|
[
"Hierarchical",
"clustering",
"of",
"samples",
"across",
"timecourse",
"experiment",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L1872-L1965
|
train
|
mfitzp/padua
|
padua/normalization.py
|
subtract_column_median
|
def subtract_column_median(df, prefix='Intensity '):
"""
Apply column-wise normalisation to expression columns.
Default is median transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
"""
df = df.copy()
df.replace([np.inf, -np.inf], np.nan, inplace=True)
mask = [l.startswith(prefix) for l in df.columns.values]
df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0)
return df
|
python
|
def subtract_column_median(df, prefix='Intensity '):
"""
Apply column-wise normalisation to expression columns.
Default is median transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
"""
df = df.copy()
df.replace([np.inf, -np.inf], np.nan, inplace=True)
mask = [l.startswith(prefix) for l in df.columns.values]
df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0)
return df
|
[
"def",
"subtract_column_median",
"(",
"df",
",",
"prefix",
"=",
"'Intensity '",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"replace",
"(",
"[",
"np",
".",
"inf",
",",
"-",
"np",
".",
"inf",
"]",
",",
"np",
".",
"nan",
",",
"inplace",
"=",
"True",
")",
"mask",
"=",
"[",
"l",
".",
"startswith",
"(",
"prefix",
")",
"for",
"l",
"in",
"df",
".",
"columns",
".",
"values",
"]",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
"-",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
".",
"median",
"(",
"axis",
"=",
"0",
")",
"return",
"df"
] |
Apply column-wise normalisation to expression columns.
Default is median transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
|
[
"Apply",
"column",
"-",
"wise",
"normalisation",
"to",
"expression",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/normalization.py#L4-L22
|
train
|
mfitzp/padua
|
padua/utils.py
|
get_protein_id_list
|
def get_protein_id_list(df, level=0):
"""
Return a complete list of shortform IDs from a DataFrame
Extract all protein IDs from a dataframe from multiple rows containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: list of string ids
"""
protein_list = []
for s in df.index.get_level_values(level):
protein_list.extend( get_protein_ids(s) )
return list(set(protein_list))
|
python
|
def get_protein_id_list(df, level=0):
"""
Return a complete list of shortform IDs from a DataFrame
Extract all protein IDs from a dataframe from multiple rows containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: list of string ids
"""
protein_list = []
for s in df.index.get_level_values(level):
protein_list.extend( get_protein_ids(s) )
return list(set(protein_list))
|
[
"def",
"get_protein_id_list",
"(",
"df",
",",
"level",
"=",
"0",
")",
":",
"protein_list",
"=",
"[",
"]",
"for",
"s",
"in",
"df",
".",
"index",
".",
"get_level_values",
"(",
"level",
")",
":",
"protein_list",
".",
"extend",
"(",
"get_protein_ids",
"(",
"s",
")",
")",
"return",
"list",
"(",
"set",
"(",
"protein_list",
")",
")"
] |
Return a complete list of shortform IDs from a DataFrame
Extract all protein IDs from a dataframe from multiple rows containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: list of string ids
|
[
"Return",
"a",
"complete",
"list",
"of",
"shortform",
"IDs",
"from",
"a",
"DataFrame"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/utils.py#L142-L162
|
train
|
mfitzp/padua
|
padua/utils.py
|
hierarchical_match
|
def hierarchical_match(d, k, default=None):
"""
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
"""
if d is None:
return default
if type(k) != list and type(k) != tuple:
k = [k]
for n, _ in enumerate(k):
key = tuple(k[0:len(k)-n])
if len(key) == 1:
key = key[0]
try:
d[key]
except:
pass
else:
return d[key]
return default
|
python
|
def hierarchical_match(d, k, default=None):
"""
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
"""
if d is None:
return default
if type(k) != list and type(k) != tuple:
k = [k]
for n, _ in enumerate(k):
key = tuple(k[0:len(k)-n])
if len(key) == 1:
key = key[0]
try:
d[key]
except:
pass
else:
return d[key]
return default
|
[
"def",
"hierarchical_match",
"(",
"d",
",",
"k",
",",
"default",
"=",
"None",
")",
":",
"if",
"d",
"is",
"None",
":",
"return",
"default",
"if",
"type",
"(",
"k",
")",
"!=",
"list",
"and",
"type",
"(",
"k",
")",
"!=",
"tuple",
":",
"k",
"=",
"[",
"k",
"]",
"for",
"n",
",",
"_",
"in",
"enumerate",
"(",
"k",
")",
":",
"key",
"=",
"tuple",
"(",
"k",
"[",
"0",
":",
"len",
"(",
"k",
")",
"-",
"n",
"]",
")",
"if",
"len",
"(",
"key",
")",
"==",
"1",
":",
"key",
"=",
"key",
"[",
"0",
"]",
"try",
":",
"d",
"[",
"key",
"]",
"except",
":",
"pass",
"else",
":",
"return",
"d",
"[",
"key",
"]",
"return",
"default"
] |
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
|
[
"Match",
"a",
"key",
"against",
"a",
"dict",
"simplifying",
"element",
"at",
"a",
"time"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/utils.py#L228-L256
|
train
|
mfitzp/padua
|
padua/utils.py
|
calculate_s0_curve
|
def calculate_s0_curve(s0, minpval, maxpval, minratio, maxratio, curve_interval=0.1):
"""
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
"""
mminpval = -np.log10(minpval)
mmaxpval = -np.log10(maxpval)
maxpval_adjust = mmaxpval - mminpval
ax0 = (s0 + maxpval_adjust * minratio) / maxpval_adjust
edge_offset = (maxratio-ax0) % curve_interval
max_x = maxratio-edge_offset
if (max_x > ax0):
x = np.arange(ax0, max_x, curve_interval)
else:
x = np.arange(max_x, ax0, curve_interval)
fn = lambda x: 10 ** (-s0/(x-minratio) - mminpval)
y = fn(x)
return x, y, fn
|
python
|
def calculate_s0_curve(s0, minpval, maxpval, minratio, maxratio, curve_interval=0.1):
"""
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
"""
mminpval = -np.log10(minpval)
mmaxpval = -np.log10(maxpval)
maxpval_adjust = mmaxpval - mminpval
ax0 = (s0 + maxpval_adjust * minratio) / maxpval_adjust
edge_offset = (maxratio-ax0) % curve_interval
max_x = maxratio-edge_offset
if (max_x > ax0):
x = np.arange(ax0, max_x, curve_interval)
else:
x = np.arange(max_x, ax0, curve_interval)
fn = lambda x: 10 ** (-s0/(x-minratio) - mminpval)
y = fn(x)
return x, y, fn
|
[
"def",
"calculate_s0_curve",
"(",
"s0",
",",
"minpval",
",",
"maxpval",
",",
"minratio",
",",
"maxratio",
",",
"curve_interval",
"=",
"0.1",
")",
":",
"mminpval",
"=",
"-",
"np",
".",
"log10",
"(",
"minpval",
")",
"mmaxpval",
"=",
"-",
"np",
".",
"log10",
"(",
"maxpval",
")",
"maxpval_adjust",
"=",
"mmaxpval",
"-",
"mminpval",
"ax0",
"=",
"(",
"s0",
"+",
"maxpval_adjust",
"*",
"minratio",
")",
"/",
"maxpval_adjust",
"edge_offset",
"=",
"(",
"maxratio",
"-",
"ax0",
")",
"%",
"curve_interval",
"max_x",
"=",
"maxratio",
"-",
"edge_offset",
"if",
"(",
"max_x",
">",
"ax0",
")",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"ax0",
",",
"max_x",
",",
"curve_interval",
")",
"else",
":",
"x",
"=",
"np",
".",
"arange",
"(",
"max_x",
",",
"ax0",
",",
"curve_interval",
")",
"fn",
"=",
"lambda",
"x",
":",
"10",
"**",
"(",
"-",
"s0",
"/",
"(",
"x",
"-",
"minratio",
")",
"-",
"mminpval",
")",
"y",
"=",
"fn",
"(",
"x",
")",
"return",
"x",
",",
"y",
",",
"fn"
] |
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
|
[
"Calculate",
"s0",
"curve",
"for",
"volcano",
"plot",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/utils.py#L282-L317
|
train
|
mfitzp/padua
|
padua/analysis.py
|
correlation
|
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
|
python
|
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
|
[
"def",
"correlation",
"(",
"df",
",",
"rowvar",
"=",
"False",
")",
":",
"# Create a correlation matrix for all correlations",
"# of the columns (filled with na for all values)",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"maskv",
"=",
"np",
".",
"ma",
".",
"masked_where",
"(",
"np",
".",
"isnan",
"(",
"df",
".",
"values",
")",
",",
"df",
".",
"values",
")",
"cdf",
"=",
"np",
".",
"ma",
".",
"corrcoef",
"(",
"maskv",
",",
"rowvar",
"=",
"False",
")",
"cdf",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"array",
"(",
"cdf",
")",
")",
"cdf",
".",
"columns",
"=",
"df",
".",
"columns",
"cdf",
".",
"index",
"=",
"df",
".",
"columns",
"cdf",
"=",
"cdf",
".",
"sort_index",
"(",
"level",
"=",
"0",
",",
"axis",
"=",
"1",
")",
"cdf",
"=",
"cdf",
".",
"sort_index",
"(",
"level",
"=",
"0",
")",
"return",
"cdf"
] |
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
|
[
"Calculate",
"column",
"-",
"wise",
"Pearson",
"correlations",
"using",
"numpy",
".",
"ma",
".",
"corrcoef"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L26-L48
|
train
|
mfitzp/padua
|
padua/analysis.py
|
pca
|
def pca(df, n_components=2, mean_center=False, **kwargs):
"""
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PCA analysis')
from sklearn.decomposition import PCA
df = df.copy()
# We have to zero fill, nan errors in PCA
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
pca = PCA(n_components=n_components, **kwargs)
pca.fit(df.values.T)
scores = pd.DataFrame(pca.transform(df.values.T)).T
scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(pca.components_).T
weights.index = df.index
weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])]
return scores, weights
|
python
|
def pca(df, n_components=2, mean_center=False, **kwargs):
"""
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PCA analysis')
from sklearn.decomposition import PCA
df = df.copy()
# We have to zero fill, nan errors in PCA
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
pca = PCA(n_components=n_components, **kwargs)
pca.fit(df.values.T)
scores = pd.DataFrame(pca.transform(df.values.T)).T
scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(pca.components_).T
weights.index = df.index
weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])]
return scores, weights
|
[
"def",
"pca",
"(",
"df",
",",
"n_components",
"=",
"2",
",",
"mean_center",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"sklearn",
":",
"assert",
"(",
"'This library depends on scikit-learn (sklearn) to perform PCA analysis'",
")",
"from",
"sklearn",
".",
"decomposition",
"import",
"PCA",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"# We have to zero fill, nan errors in PCA",
"df",
"[",
"np",
".",
"isnan",
"(",
"df",
")",
"]",
"=",
"0",
"if",
"mean_center",
":",
"mean",
"=",
"np",
".",
"mean",
"(",
"df",
".",
"values",
",",
"axis",
"=",
"0",
")",
"df",
"=",
"df",
"-",
"mean",
"pca",
"=",
"PCA",
"(",
"n_components",
"=",
"n_components",
",",
"*",
"*",
"kwargs",
")",
"pca",
".",
"fit",
"(",
"df",
".",
"values",
".",
"T",
")",
"scores",
"=",
"pd",
".",
"DataFrame",
"(",
"pca",
".",
"transform",
"(",
"df",
".",
"values",
".",
"T",
")",
")",
".",
"T",
"scores",
".",
"index",
"=",
"[",
"'Principal Component %d (%.2f%%)'",
"%",
"(",
"(",
"n",
"+",
"1",
")",
",",
"pca",
".",
"explained_variance_ratio_",
"[",
"n",
"]",
"*",
"100",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"scores",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"scores",
".",
"columns",
"=",
"df",
".",
"columns",
"weights",
"=",
"pd",
".",
"DataFrame",
"(",
"pca",
".",
"components_",
")",
".",
"T",
"weights",
".",
"index",
"=",
"df",
".",
"index",
"weights",
".",
"columns",
"=",
"[",
"'Weights on Principal Component %d'",
"%",
"(",
"n",
"+",
"1",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"weights",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"return",
"scores",
",",
"weights"
] |
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
|
[
"Principal",
"Component",
"Analysis",
"based",
"on",
"sklearn",
".",
"decomposition",
".",
"PCA"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L51-L93
|
train
|
mfitzp/padua
|
padua/analysis.py
|
plsda
|
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
sxa, _ = df.columns.get_loc_level(a)
sxb, _ = df.columns.get_loc_level(b)
dfa = df.iloc[:, sxa]
dfb = df.iloc[:, sxb]
dff = pd.concat([dfa, dfb], axis=1)
y = np.ones(dff.shape[1])
y[np.arange(dfa.shape[1])] = 0
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(dff.values.T, y)
# Apply the generated model to the original data
x_scores = plsr.transform(df.values.T)
scores = pd.DataFrame(x_scores.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = pd.DataFrame(plsr.x_loadings_)
loadings.index = df.index
loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])]
return scores, weights, loadings
|
python
|
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
sxa, _ = df.columns.get_loc_level(a)
sxb, _ = df.columns.get_loc_level(b)
dfa = df.iloc[:, sxa]
dfb = df.iloc[:, sxb]
dff = pd.concat([dfa, dfb], axis=1)
y = np.ones(dff.shape[1])
y[np.arange(dfa.shape[1])] = 0
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(dff.values.T, y)
# Apply the generated model to the original data
x_scores = plsr.transform(df.values.T)
scores = pd.DataFrame(x_scores.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = pd.DataFrame(plsr.x_loadings_)
loadings.index = df.index
loadings.columns = ['Loadings on Latent Variable %d' % (n+1) for n in range(0, loadings.shape[1])]
return scores, weights, loadings
|
[
"def",
"plsda",
"(",
"df",
",",
"a",
",",
"b",
",",
"n_components",
"=",
"2",
",",
"mean_center",
"=",
"False",
",",
"scale",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"sklearn",
":",
"assert",
"(",
"'This library depends on scikit-learn (sklearn) to perform PLS-DA'",
")",
"from",
"sklearn",
".",
"cross_decomposition",
"import",
"PLSRegression",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"# We have to zero fill, nan errors in PLSRegression",
"df",
"[",
"np",
".",
"isnan",
"(",
"df",
")",
"]",
"=",
"0",
"if",
"mean_center",
":",
"mean",
"=",
"np",
".",
"mean",
"(",
"df",
".",
"values",
",",
"axis",
"=",
"0",
")",
"df",
"=",
"df",
"-",
"mean",
"sxa",
",",
"_",
"=",
"df",
".",
"columns",
".",
"get_loc_level",
"(",
"a",
")",
"sxb",
",",
"_",
"=",
"df",
".",
"columns",
".",
"get_loc_level",
"(",
"b",
")",
"dfa",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"sxa",
"]",
"dfb",
"=",
"df",
".",
"iloc",
"[",
":",
",",
"sxb",
"]",
"dff",
"=",
"pd",
".",
"concat",
"(",
"[",
"dfa",
",",
"dfb",
"]",
",",
"axis",
"=",
"1",
")",
"y",
"=",
"np",
".",
"ones",
"(",
"dff",
".",
"shape",
"[",
"1",
"]",
")",
"y",
"[",
"np",
".",
"arange",
"(",
"dfa",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"=",
"0",
"plsr",
"=",
"PLSRegression",
"(",
"n_components",
"=",
"n_components",
",",
"scale",
"=",
"scale",
",",
"*",
"*",
"kwargs",
")",
"plsr",
".",
"fit",
"(",
"dff",
".",
"values",
".",
"T",
",",
"y",
")",
"# Apply the generated model to the original data",
"x_scores",
"=",
"plsr",
".",
"transform",
"(",
"df",
".",
"values",
".",
"T",
")",
"scores",
"=",
"pd",
".",
"DataFrame",
"(",
"x_scores",
".",
"T",
")",
"scores",
".",
"index",
"=",
"[",
"'Latent Variable %d'",
"%",
"(",
"n",
"+",
"1",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"scores",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"scores",
".",
"columns",
"=",
"df",
".",
"columns",
"weights",
"=",
"pd",
".",
"DataFrame",
"(",
"plsr",
".",
"x_weights_",
")",
"weights",
".",
"index",
"=",
"df",
".",
"index",
"weights",
".",
"columns",
"=",
"[",
"'Weights on Latent Variable %d'",
"%",
"(",
"n",
"+",
"1",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"weights",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"loadings",
"=",
"pd",
".",
"DataFrame",
"(",
"plsr",
".",
"x_loadings_",
")",
"loadings",
".",
"index",
"=",
"df",
".",
"index",
"loadings",
".",
"columns",
"=",
"[",
"'Loadings on Latent Variable %d'",
"%",
"(",
"n",
"+",
"1",
")",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"loadings",
".",
"shape",
"[",
"1",
"]",
")",
"]",
"return",
"scores",
",",
"weights",
",",
"loadings"
] |
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
|
[
"Partial",
"Least",
"Squares",
"Discriminant",
"Analysis",
"based",
"on",
"sklearn",
".",
"cross_decomposition",
".",
"PLSRegression"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L96-L161
|
train
|
mfitzp/padua
|
padua/analysis.py
|
enrichment_from_evidence
|
def enrichment_from_evidence(dfe, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from evidence.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of evidence
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfe = dfe.reset_index().set_index('Experiment')
dfe['Modifications'] = np.array([modification in m for m in dfe['Modifications']])
dfe = dfe.set_index('Modifications', append=True)
dfes = dfe.sum(axis=0, level=[0,1]).T
columns = dfes.sum(axis=1, level=0).columns
total = dfes.sum(axis=1, level=0).values.flatten() # Total values
modified = dfes.iloc[0, dfes.columns.get_level_values('Modifications').values ].values # Modified
enrichment = modified / total
return pd.DataFrame([enrichment], columns=columns, index=['% Enrichment'])
|
python
|
def enrichment_from_evidence(dfe, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from evidence.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of evidence
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfe = dfe.reset_index().set_index('Experiment')
dfe['Modifications'] = np.array([modification in m for m in dfe['Modifications']])
dfe = dfe.set_index('Modifications', append=True)
dfes = dfe.sum(axis=0, level=[0,1]).T
columns = dfes.sum(axis=1, level=0).columns
total = dfes.sum(axis=1, level=0).values.flatten() # Total values
modified = dfes.iloc[0, dfes.columns.get_level_values('Modifications').values ].values # Modified
enrichment = modified / total
return pd.DataFrame([enrichment], columns=columns, index=['% Enrichment'])
|
[
"def",
"enrichment_from_evidence",
"(",
"dfe",
",",
"modification",
"=",
"\"Phospho (STY)\"",
")",
":",
"dfe",
"=",
"dfe",
".",
"reset_index",
"(",
")",
".",
"set_index",
"(",
"'Experiment'",
")",
"dfe",
"[",
"'Modifications'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"modification",
"in",
"m",
"for",
"m",
"in",
"dfe",
"[",
"'Modifications'",
"]",
"]",
")",
"dfe",
"=",
"dfe",
".",
"set_index",
"(",
"'Modifications'",
",",
"append",
"=",
"True",
")",
"dfes",
"=",
"dfe",
".",
"sum",
"(",
"axis",
"=",
"0",
",",
"level",
"=",
"[",
"0",
",",
"1",
"]",
")",
".",
"T",
"columns",
"=",
"dfes",
".",
"sum",
"(",
"axis",
"=",
"1",
",",
"level",
"=",
"0",
")",
".",
"columns",
"total",
"=",
"dfes",
".",
"sum",
"(",
"axis",
"=",
"1",
",",
"level",
"=",
"0",
")",
".",
"values",
".",
"flatten",
"(",
")",
"# Total values",
"modified",
"=",
"dfes",
".",
"iloc",
"[",
"0",
",",
"dfes",
".",
"columns",
".",
"get_level_values",
"(",
"'Modifications'",
")",
".",
"values",
"]",
".",
"values",
"# Modified",
"enrichment",
"=",
"modified",
"/",
"total",
"return",
"pd",
".",
"DataFrame",
"(",
"[",
"enrichment",
"]",
",",
"columns",
"=",
"columns",
",",
"index",
"=",
"[",
"'% Enrichment'",
"]",
")"
] |
Calculate relative enrichment of peptide modifications from evidence.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of evidence
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
|
[
"Calculate",
"relative",
"enrichment",
"of",
"peptide",
"modifications",
"from",
"evidence",
".",
"txt",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L232-L258
|
train
|
mfitzp/padua
|
padua/analysis.py
|
enrichment_from_msp
|
def enrichment_from_msp(dfmsp, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of modificationSpecificPeptides
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfmsp['Modifications'] = np.array([modification in m for m in dfmsp['Modifications']])
dfmsp = dfmsp.set_index(['Modifications'])
dfmsp = dfmsp.filter(regex='Intensity ')
dfmsp[ dfmsp == 0] = np.nan
df_r = dfmsp.sum(axis=0, level=0)
modified = df_r.loc[True].values
total = df_r.sum(axis=0).values
enrichment = modified / total
return pd.DataFrame([enrichment], columns=dfmsp.columns, index=['% Enrichment'])
|
python
|
def enrichment_from_msp(dfmsp, modification="Phospho (STY)"):
"""
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of modificationSpecificPeptides
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
"""
dfmsp['Modifications'] = np.array([modification in m for m in dfmsp['Modifications']])
dfmsp = dfmsp.set_index(['Modifications'])
dfmsp = dfmsp.filter(regex='Intensity ')
dfmsp[ dfmsp == 0] = np.nan
df_r = dfmsp.sum(axis=0, level=0)
modified = df_r.loc[True].values
total = df_r.sum(axis=0).values
enrichment = modified / total
return pd.DataFrame([enrichment], columns=dfmsp.columns, index=['% Enrichment'])
|
[
"def",
"enrichment_from_msp",
"(",
"dfmsp",
",",
"modification",
"=",
"\"Phospho (STY)\"",
")",
":",
"dfmsp",
"[",
"'Modifications'",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"modification",
"in",
"m",
"for",
"m",
"in",
"dfmsp",
"[",
"'Modifications'",
"]",
"]",
")",
"dfmsp",
"=",
"dfmsp",
".",
"set_index",
"(",
"[",
"'Modifications'",
"]",
")",
"dfmsp",
"=",
"dfmsp",
".",
"filter",
"(",
"regex",
"=",
"'Intensity '",
")",
"dfmsp",
"[",
"dfmsp",
"==",
"0",
"]",
"=",
"np",
".",
"nan",
"df_r",
"=",
"dfmsp",
".",
"sum",
"(",
"axis",
"=",
"0",
",",
"level",
"=",
"0",
")",
"modified",
"=",
"df_r",
".",
"loc",
"[",
"True",
"]",
".",
"values",
"total",
"=",
"df_r",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
".",
"values",
"enrichment",
"=",
"modified",
"/",
"total",
"return",
"pd",
".",
"DataFrame",
"(",
"[",
"enrichment",
"]",
",",
"columns",
"=",
"dfmsp",
".",
"columns",
",",
"index",
"=",
"[",
"'% Enrichment'",
"]",
")"
] |
Calculate relative enrichment of peptide modifications from modificationSpecificPeptides.txt.
Taking a modifiedsitepeptides ``DataFrame`` returns the relative enrichment of the specified
modification in the table.
The returned data columns are generated from the input data columns.
:param df: Pandas ``DataFrame`` of modificationSpecificPeptides
:return: Pandas ``DataFrame`` of percentage modifications in the supplied data.
|
[
"Calculate",
"relative",
"enrichment",
"of",
"peptide",
"modifications",
"from",
"modificationSpecificPeptides",
".",
"txt",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L263-L287
|
train
|
mfitzp/padua
|
padua/analysis.py
|
sitespeptidesproteins
|
def sitespeptidesproteins(df, site_localization_probability=0.75):
"""
Generate summary count of modified sites, peptides and proteins in a processed dataset ``DataFrame``.
Returns the number of sites, peptides and proteins as calculated as follows:
- `sites` (>0.75; or specified site localization probability) count of all sites > threshold
- `peptides` the set of `Sequence windows` in the dataset (unique peptides)
- `proteins` the set of unique leading proteins in the dataset
:param df: Pandas ``DataFrame`` of processed data
:param site_localization_probability: ``float`` site localization probability threshold (for sites calculation)
:return: ``tuple`` of ``int``, containing sites, peptides, proteins
"""
sites = filters.filter_localization_probability(df, site_localization_probability)['Sequence window']
peptides = set(df['Sequence window'])
proteins = set([str(p).split(';')[0] for p in df['Proteins']])
return len(sites), len(peptides), len(proteins)
|
python
|
def sitespeptidesproteins(df, site_localization_probability=0.75):
"""
Generate summary count of modified sites, peptides and proteins in a processed dataset ``DataFrame``.
Returns the number of sites, peptides and proteins as calculated as follows:
- `sites` (>0.75; or specified site localization probability) count of all sites > threshold
- `peptides` the set of `Sequence windows` in the dataset (unique peptides)
- `proteins` the set of unique leading proteins in the dataset
:param df: Pandas ``DataFrame`` of processed data
:param site_localization_probability: ``float`` site localization probability threshold (for sites calculation)
:return: ``tuple`` of ``int``, containing sites, peptides, proteins
"""
sites = filters.filter_localization_probability(df, site_localization_probability)['Sequence window']
peptides = set(df['Sequence window'])
proteins = set([str(p).split(';')[0] for p in df['Proteins']])
return len(sites), len(peptides), len(proteins)
|
[
"def",
"sitespeptidesproteins",
"(",
"df",
",",
"site_localization_probability",
"=",
"0.75",
")",
":",
"sites",
"=",
"filters",
".",
"filter_localization_probability",
"(",
"df",
",",
"site_localization_probability",
")",
"[",
"'Sequence window'",
"]",
"peptides",
"=",
"set",
"(",
"df",
"[",
"'Sequence window'",
"]",
")",
"proteins",
"=",
"set",
"(",
"[",
"str",
"(",
"p",
")",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"for",
"p",
"in",
"df",
"[",
"'Proteins'",
"]",
"]",
")",
"return",
"len",
"(",
"sites",
")",
",",
"len",
"(",
"peptides",
")",
",",
"len",
"(",
"proteins",
")"
] |
Generate summary count of modified sites, peptides and proteins in a processed dataset ``DataFrame``.
Returns the number of sites, peptides and proteins as calculated as follows:
- `sites` (>0.75; or specified site localization probability) count of all sites > threshold
- `peptides` the set of `Sequence windows` in the dataset (unique peptides)
- `proteins` the set of unique leading proteins in the dataset
:param df: Pandas ``DataFrame`` of processed data
:param site_localization_probability: ``float`` site localization probability threshold (for sites calculation)
:return: ``tuple`` of ``int``, containing sites, peptides, proteins
|
[
"Generate",
"summary",
"count",
"of",
"modified",
"sites",
"peptides",
"and",
"proteins",
"in",
"a",
"processed",
"dataset",
"DataFrame",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L291-L309
|
train
|
mfitzp/padua
|
padua/analysis.py
|
modifiedaminoacids
|
def modifiedaminoacids(df):
"""
Calculate the number of modified amino acids in supplied ``DataFrame``.
Returns the total of all modifications and the total for each amino acid individually, as an ``int`` and a
``dict`` of ``int``, keyed by amino acid, respectively.
:param df: Pandas ``DataFrame`` containing processed data.
:return: total_aas ``int`` the total number of all modified amino acids
quants ``dict`` of ``int`` keyed by amino acid, giving individual counts for each aa.
"""
amino_acids = list(df['Amino acid'].values)
aas = set(amino_acids)
quants = {}
for aa in aas:
quants[aa] = amino_acids.count(aa)
total_aas = len(amino_acids)
return total_aas, quants
|
python
|
def modifiedaminoacids(df):
"""
Calculate the number of modified amino acids in supplied ``DataFrame``.
Returns the total of all modifications and the total for each amino acid individually, as an ``int`` and a
``dict`` of ``int``, keyed by amino acid, respectively.
:param df: Pandas ``DataFrame`` containing processed data.
:return: total_aas ``int`` the total number of all modified amino acids
quants ``dict`` of ``int`` keyed by amino acid, giving individual counts for each aa.
"""
amino_acids = list(df['Amino acid'].values)
aas = set(amino_acids)
quants = {}
for aa in aas:
quants[aa] = amino_acids.count(aa)
total_aas = len(amino_acids)
return total_aas, quants
|
[
"def",
"modifiedaminoacids",
"(",
"df",
")",
":",
"amino_acids",
"=",
"list",
"(",
"df",
"[",
"'Amino acid'",
"]",
".",
"values",
")",
"aas",
"=",
"set",
"(",
"amino_acids",
")",
"quants",
"=",
"{",
"}",
"for",
"aa",
"in",
"aas",
":",
"quants",
"[",
"aa",
"]",
"=",
"amino_acids",
".",
"count",
"(",
"aa",
")",
"total_aas",
"=",
"len",
"(",
"amino_acids",
")",
"return",
"total_aas",
",",
"quants"
] |
Calculate the number of modified amino acids in supplied ``DataFrame``.
Returns the total of all modifications and the total for each amino acid individually, as an ``int`` and a
``dict`` of ``int``, keyed by amino acid, respectively.
:param df: Pandas ``DataFrame`` containing processed data.
:return: total_aas ``int`` the total number of all modified amino acids
quants ``dict`` of ``int`` keyed by amino acid, giving individual counts for each aa.
|
[
"Calculate",
"the",
"number",
"of",
"modified",
"amino",
"acids",
"in",
"supplied",
"DataFrame",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/analysis.py#L312-L333
|
train
|
mfitzp/padua
|
padua/process.py
|
build_index_from_design
|
def build_index_from_design(df, design, remove_prefix=None, types=None, axis=1, auto_convert_numeric=True, unmatched_columns='index'):
"""
Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return:
"""
df = df.copy()
if 'Label' not in design.index.names:
design = design.set_index('Label')
if remove_prefix is None:
remove_prefix = []
if type(remove_prefix) is str:
remove_prefix=[remove_prefix]
unmatched_for_index = []
names = design.columns.values
idx_levels = len(names)
indexes = []
# Convert numeric only columns_to_combine; except index
if auto_convert_numeric:
design = design.apply(pd.to_numeric, errors="ignore")
# The match columns are always strings, so the index must also be
design.index = design.index.astype(str)
# Apply type settings
if types:
for n, t in types.items():
if n in design.columns.values:
design[n] = design[n].astype(t)
# Build the index
for lo in df.columns.values:
l = copy(lo)
for s in remove_prefix:
l = l.replace(s, '')
# Remove trailing/forward spaces
l = l.strip()
# Convert to numeric if possible
l = numeric(l)
# Attempt to match to the labels
try:
# Index
idx = design.loc[str(l)]
except:
if unmatched_columns:
unmatched_for_index.append(lo)
else:
# No match, fill with None
idx = tuple([None] * idx_levels)
indexes.append(idx)
else:
# We have a matched row, store it
idx = tuple(idx.values)
indexes.append(idx)
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=names)
else:
# If using unmatched for index, append
if unmatched_columns == 'index':
df = df.set_index(unmatched_for_index, append=True)
elif unmatched_columns == 'drop':
df = df.drop(unmatched_for_index, axis=1)
df.columns = pd.MultiIndex.from_tuples(indexes, names=names)
df = df.sort_index(axis=1)
return df
|
python
|
def build_index_from_design(df, design, remove_prefix=None, types=None, axis=1, auto_convert_numeric=True, unmatched_columns='index'):
"""
Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return:
"""
df = df.copy()
if 'Label' not in design.index.names:
design = design.set_index('Label')
if remove_prefix is None:
remove_prefix = []
if type(remove_prefix) is str:
remove_prefix=[remove_prefix]
unmatched_for_index = []
names = design.columns.values
idx_levels = len(names)
indexes = []
# Convert numeric only columns_to_combine; except index
if auto_convert_numeric:
design = design.apply(pd.to_numeric, errors="ignore")
# The match columns are always strings, so the index must also be
design.index = design.index.astype(str)
# Apply type settings
if types:
for n, t in types.items():
if n in design.columns.values:
design[n] = design[n].astype(t)
# Build the index
for lo in df.columns.values:
l = copy(lo)
for s in remove_prefix:
l = l.replace(s, '')
# Remove trailing/forward spaces
l = l.strip()
# Convert to numeric if possible
l = numeric(l)
# Attempt to match to the labels
try:
# Index
idx = design.loc[str(l)]
except:
if unmatched_columns:
unmatched_for_index.append(lo)
else:
# No match, fill with None
idx = tuple([None] * idx_levels)
indexes.append(idx)
else:
# We have a matched row, store it
idx = tuple(idx.values)
indexes.append(idx)
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=names)
else:
# If using unmatched for index, append
if unmatched_columns == 'index':
df = df.set_index(unmatched_for_index, append=True)
elif unmatched_columns == 'drop':
df = df.drop(unmatched_for_index, axis=1)
df.columns = pd.MultiIndex.from_tuples(indexes, names=names)
df = df.sort_index(axis=1)
return df
|
[
"def",
"build_index_from_design",
"(",
"df",
",",
"design",
",",
"remove_prefix",
"=",
"None",
",",
"types",
"=",
"None",
",",
"axis",
"=",
"1",
",",
"auto_convert_numeric",
"=",
"True",
",",
"unmatched_columns",
"=",
"'index'",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"if",
"'Label'",
"not",
"in",
"design",
".",
"index",
".",
"names",
":",
"design",
"=",
"design",
".",
"set_index",
"(",
"'Label'",
")",
"if",
"remove_prefix",
"is",
"None",
":",
"remove_prefix",
"=",
"[",
"]",
"if",
"type",
"(",
"remove_prefix",
")",
"is",
"str",
":",
"remove_prefix",
"=",
"[",
"remove_prefix",
"]",
"unmatched_for_index",
"=",
"[",
"]",
"names",
"=",
"design",
".",
"columns",
".",
"values",
"idx_levels",
"=",
"len",
"(",
"names",
")",
"indexes",
"=",
"[",
"]",
"# Convert numeric only columns_to_combine; except index",
"if",
"auto_convert_numeric",
":",
"design",
"=",
"design",
".",
"apply",
"(",
"pd",
".",
"to_numeric",
",",
"errors",
"=",
"\"ignore\"",
")",
"# The match columns are always strings, so the index must also be",
"design",
".",
"index",
"=",
"design",
".",
"index",
".",
"astype",
"(",
"str",
")",
"# Apply type settings",
"if",
"types",
":",
"for",
"n",
",",
"t",
"in",
"types",
".",
"items",
"(",
")",
":",
"if",
"n",
"in",
"design",
".",
"columns",
".",
"values",
":",
"design",
"[",
"n",
"]",
"=",
"design",
"[",
"n",
"]",
".",
"astype",
"(",
"t",
")",
"# Build the index",
"for",
"lo",
"in",
"df",
".",
"columns",
".",
"values",
":",
"l",
"=",
"copy",
"(",
"lo",
")",
"for",
"s",
"in",
"remove_prefix",
":",
"l",
"=",
"l",
".",
"replace",
"(",
"s",
",",
"''",
")",
"# Remove trailing/forward spaces",
"l",
"=",
"l",
".",
"strip",
"(",
")",
"# Convert to numeric if possible",
"l",
"=",
"numeric",
"(",
"l",
")",
"# Attempt to match to the labels",
"try",
":",
"# Index",
"idx",
"=",
"design",
".",
"loc",
"[",
"str",
"(",
"l",
")",
"]",
"except",
":",
"if",
"unmatched_columns",
":",
"unmatched_for_index",
".",
"append",
"(",
"lo",
")",
"else",
":",
"# No match, fill with None",
"idx",
"=",
"tuple",
"(",
"[",
"None",
"]",
"*",
"idx_levels",
")",
"indexes",
".",
"append",
"(",
"idx",
")",
"else",
":",
"# We have a matched row, store it",
"idx",
"=",
"tuple",
"(",
"idx",
".",
"values",
")",
"indexes",
".",
"append",
"(",
"idx",
")",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"indexes",
",",
"names",
"=",
"names",
")",
"else",
":",
"# If using unmatched for index, append",
"if",
"unmatched_columns",
"==",
"'index'",
":",
"df",
"=",
"df",
".",
"set_index",
"(",
"unmatched_for_index",
",",
"append",
"=",
"True",
")",
"elif",
"unmatched_columns",
"==",
"'drop'",
":",
"df",
"=",
"df",
".",
"drop",
"(",
"unmatched_for_index",
",",
"axis",
"=",
"1",
")",
"df",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"indexes",
",",
"names",
"=",
"names",
")",
"df",
"=",
"df",
".",
"sort_index",
"(",
"axis",
"=",
"1",
")",
"return",
"df"
] |
Build a MultiIndex from a design table.
Supply with a table with column headings for the new multiindex
and a index containing the labels to search for in the data.
:param df:
:param design:
:param remove:
:param types:
:param axis:
:param auto_convert_numeric:
:return:
|
[
"Build",
"a",
"MultiIndex",
"from",
"a",
"design",
"table",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L23-L111
|
train
|
mfitzp/padua
|
padua/process.py
|
build_index_from_labels
|
def build_index_from_labels(df, indices, remove_prefix=None, types=None, axis=1):
"""
Build a MultiIndex from a list of labels and matching regex
Supply with a dictionary of Hierarchy levels and matching regex to
extract this level from the sample label
:param df:
:param indices: Tuples of indices ('label','regex') matches
:param strip: Strip these strings from labels before matching (e.g. headers)
:param axis=1: Axis (1 = columns, 0 = rows)
:return:
"""
df = df.copy()
if remove_prefix is None:
remove_prefix = []
if types is None:
types = {}
idx = [df.index, df.columns][axis]
indexes = []
for l in idx.get_level_values(0):
for s in remove_prefix:
l = l.replace(s+" ", '')
ixr = []
for n, m in indices:
m = re.search(m, l)
if m:
r = m.group(1)
if n in types:
# Map this value to a new type
r = types[n](r)
else:
r = None
ixr.append(r)
indexes.append( tuple(ixr) )
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=[n for n, _ in indices])
else:
df.columns = pd.MultiIndex.from_tuples(indexes, names=[n for n, _ in indices])
return df
|
python
|
def build_index_from_labels(df, indices, remove_prefix=None, types=None, axis=1):
"""
Build a MultiIndex from a list of labels and matching regex
Supply with a dictionary of Hierarchy levels and matching regex to
extract this level from the sample label
:param df:
:param indices: Tuples of indices ('label','regex') matches
:param strip: Strip these strings from labels before matching (e.g. headers)
:param axis=1: Axis (1 = columns, 0 = rows)
:return:
"""
df = df.copy()
if remove_prefix is None:
remove_prefix = []
if types is None:
types = {}
idx = [df.index, df.columns][axis]
indexes = []
for l in idx.get_level_values(0):
for s in remove_prefix:
l = l.replace(s+" ", '')
ixr = []
for n, m in indices:
m = re.search(m, l)
if m:
r = m.group(1)
if n in types:
# Map this value to a new type
r = types[n](r)
else:
r = None
ixr.append(r)
indexes.append( tuple(ixr) )
if axis == 0:
df.index = pd.MultiIndex.from_tuples(indexes, names=[n for n, _ in indices])
else:
df.columns = pd.MultiIndex.from_tuples(indexes, names=[n for n, _ in indices])
return df
|
[
"def",
"build_index_from_labels",
"(",
"df",
",",
"indices",
",",
"remove_prefix",
"=",
"None",
",",
"types",
"=",
"None",
",",
"axis",
"=",
"1",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"if",
"remove_prefix",
"is",
"None",
":",
"remove_prefix",
"=",
"[",
"]",
"if",
"types",
"is",
"None",
":",
"types",
"=",
"{",
"}",
"idx",
"=",
"[",
"df",
".",
"index",
",",
"df",
".",
"columns",
"]",
"[",
"axis",
"]",
"indexes",
"=",
"[",
"]",
"for",
"l",
"in",
"idx",
".",
"get_level_values",
"(",
"0",
")",
":",
"for",
"s",
"in",
"remove_prefix",
":",
"l",
"=",
"l",
".",
"replace",
"(",
"s",
"+",
"\" \"",
",",
"''",
")",
"ixr",
"=",
"[",
"]",
"for",
"n",
",",
"m",
"in",
"indices",
":",
"m",
"=",
"re",
".",
"search",
"(",
"m",
",",
"l",
")",
"if",
"m",
":",
"r",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"n",
"in",
"types",
":",
"# Map this value to a new type",
"r",
"=",
"types",
"[",
"n",
"]",
"(",
"r",
")",
"else",
":",
"r",
"=",
"None",
"ixr",
".",
"append",
"(",
"r",
")",
"indexes",
".",
"append",
"(",
"tuple",
"(",
"ixr",
")",
")",
"if",
"axis",
"==",
"0",
":",
"df",
".",
"index",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"indexes",
",",
"names",
"=",
"[",
"n",
"for",
"n",
",",
"_",
"in",
"indices",
"]",
")",
"else",
":",
"df",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"indexes",
",",
"names",
"=",
"[",
"n",
"for",
"n",
",",
"_",
"in",
"indices",
"]",
")",
"return",
"df"
] |
Build a MultiIndex from a list of labels and matching regex
Supply with a dictionary of Hierarchy levels and matching regex to
extract this level from the sample label
:param df:
:param indices: Tuples of indices ('label','regex') matches
:param strip: Strip these strings from labels before matching (e.g. headers)
:param axis=1: Axis (1 = columns, 0 = rows)
:return:
|
[
"Build",
"a",
"MultiIndex",
"from",
"a",
"list",
"of",
"labels",
"and",
"matching",
"regex"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L114-L165
|
train
|
mfitzp/padua
|
padua/process.py
|
combine_expression_columns
|
def combine_expression_columns(df, columns_to_combine, remove_combined=True):
"""
Combine expression columns, calculating the mean for 2 columns
:param df: Pandas dataframe
:param columns_to_combine: A list of tuples containing the column names to combine
:return:
"""
df = df.copy()
for ca, cb in columns_to_combine:
df["%s_(x+y)/2_%s" % (ca, cb)] = (df[ca] + df[cb]) / 2
if remove_combined:
for ca, cb in columns_to_combine:
df.drop([ca, cb], inplace=True, axis=1)
return df
|
python
|
def combine_expression_columns(df, columns_to_combine, remove_combined=True):
"""
Combine expression columns, calculating the mean for 2 columns
:param df: Pandas dataframe
:param columns_to_combine: A list of tuples containing the column names to combine
:return:
"""
df = df.copy()
for ca, cb in columns_to_combine:
df["%s_(x+y)/2_%s" % (ca, cb)] = (df[ca] + df[cb]) / 2
if remove_combined:
for ca, cb in columns_to_combine:
df.drop([ca, cb], inplace=True, axis=1)
return df
|
[
"def",
"combine_expression_columns",
"(",
"df",
",",
"columns_to_combine",
",",
"remove_combined",
"=",
"True",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"for",
"ca",
",",
"cb",
"in",
"columns_to_combine",
":",
"df",
"[",
"\"%s_(x+y)/2_%s\"",
"%",
"(",
"ca",
",",
"cb",
")",
"]",
"=",
"(",
"df",
"[",
"ca",
"]",
"+",
"df",
"[",
"cb",
"]",
")",
"/",
"2",
"if",
"remove_combined",
":",
"for",
"ca",
",",
"cb",
"in",
"columns_to_combine",
":",
"df",
".",
"drop",
"(",
"[",
"ca",
",",
"cb",
"]",
",",
"inplace",
"=",
"True",
",",
"axis",
"=",
"1",
")",
"return",
"df"
] |
Combine expression columns, calculating the mean for 2 columns
:param df: Pandas dataframe
:param columns_to_combine: A list of tuples containing the column names to combine
:return:
|
[
"Combine",
"expression",
"columns",
"calculating",
"the",
"mean",
"for",
"2",
"columns"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L198-L218
|
train
|
mfitzp/padua
|
padua/process.py
|
expand_side_table
|
def expand_side_table(df):
"""
Perform equivalent of 'expand side table' in Perseus by folding
Multiplicity columns down onto duplicate rows
The id is remapped to UID___Multiplicity, which
is different to Perseus behaviour, but prevents accidental of
non-matching rows from occurring later in analysis.
:param df:
:return:
"""
df = df.copy()
idx = df.index.names
df.reset_index(inplace=True)
def strip_multiplicity(df):
df.columns = [c[:-4] for c in df.columns]
return df
def strip_multiple(s):
for sr in ['___1','___2','___3']:
if s.endswith(sr):
s = s[:-4]
return s
base = df.filter(regex='.*(?<!___\d)$')
# Remove columns that will match ripped multiplicity columns
for c in df.columns.values:
if strip_multiple(c) != c and strip_multiple(c) in list(base.columns.values):
base.drop(strip_multiple(c), axis=1, inplace=True)
multi1 = df.filter(regex='^.*___1$')
multi1 = strip_multiplicity(multi1)
multi1['Multiplicity'] = '___1'
multi1 = pd.concat([multi1, base], axis=1)
multi2 = df.filter(regex='^.*___2$')
multi2 = strip_multiplicity(multi2)
multi2['Multiplicity'] = '___2'
multi2 = pd.concat([multi2, base], axis=1)
multi3 = df.filter(regex='^.*___3$')
multi3 = strip_multiplicity(multi3)
multi3['Multiplicity'] = '___3'
multi3 = pd.concat([multi3, base], axis=1)
df = pd.concat([multi1, multi2, multi3], axis=0)
df['id'] = ["%s%s" % (a, b) for a, b in zip(df['id'], df['Multiplicity'])]
if idx[0] is not None:
df.set_index(idx, inplace=True)
return df
|
python
|
def expand_side_table(df):
"""
Perform equivalent of 'expand side table' in Perseus by folding
Multiplicity columns down onto duplicate rows
The id is remapped to UID___Multiplicity, which
is different to Perseus behaviour, but prevents accidental of
non-matching rows from occurring later in analysis.
:param df:
:return:
"""
df = df.copy()
idx = df.index.names
df.reset_index(inplace=True)
def strip_multiplicity(df):
df.columns = [c[:-4] for c in df.columns]
return df
def strip_multiple(s):
for sr in ['___1','___2','___3']:
if s.endswith(sr):
s = s[:-4]
return s
base = df.filter(regex='.*(?<!___\d)$')
# Remove columns that will match ripped multiplicity columns
for c in df.columns.values:
if strip_multiple(c) != c and strip_multiple(c) in list(base.columns.values):
base.drop(strip_multiple(c), axis=1, inplace=True)
multi1 = df.filter(regex='^.*___1$')
multi1 = strip_multiplicity(multi1)
multi1['Multiplicity'] = '___1'
multi1 = pd.concat([multi1, base], axis=1)
multi2 = df.filter(regex='^.*___2$')
multi2 = strip_multiplicity(multi2)
multi2['Multiplicity'] = '___2'
multi2 = pd.concat([multi2, base], axis=1)
multi3 = df.filter(regex='^.*___3$')
multi3 = strip_multiplicity(multi3)
multi3['Multiplicity'] = '___3'
multi3 = pd.concat([multi3, base], axis=1)
df = pd.concat([multi1, multi2, multi3], axis=0)
df['id'] = ["%s%s" % (a, b) for a, b in zip(df['id'], df['Multiplicity'])]
if idx[0] is not None:
df.set_index(idx, inplace=True)
return df
|
[
"def",
"expand_side_table",
"(",
"df",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"idx",
"=",
"df",
".",
"index",
".",
"names",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
")",
"def",
"strip_multiplicity",
"(",
"df",
")",
":",
"df",
".",
"columns",
"=",
"[",
"c",
"[",
":",
"-",
"4",
"]",
"for",
"c",
"in",
"df",
".",
"columns",
"]",
"return",
"df",
"def",
"strip_multiple",
"(",
"s",
")",
":",
"for",
"sr",
"in",
"[",
"'___1'",
",",
"'___2'",
",",
"'___3'",
"]",
":",
"if",
"s",
".",
"endswith",
"(",
"sr",
")",
":",
"s",
"=",
"s",
"[",
":",
"-",
"4",
"]",
"return",
"s",
"base",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'.*(?<!___\\d)$'",
")",
"# Remove columns that will match ripped multiplicity columns",
"for",
"c",
"in",
"df",
".",
"columns",
".",
"values",
":",
"if",
"strip_multiple",
"(",
"c",
")",
"!=",
"c",
"and",
"strip_multiple",
"(",
"c",
")",
"in",
"list",
"(",
"base",
".",
"columns",
".",
"values",
")",
":",
"base",
".",
"drop",
"(",
"strip_multiple",
"(",
"c",
")",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"multi1",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^.*___1$'",
")",
"multi1",
"=",
"strip_multiplicity",
"(",
"multi1",
")",
"multi1",
"[",
"'Multiplicity'",
"]",
"=",
"'___1'",
"multi1",
"=",
"pd",
".",
"concat",
"(",
"[",
"multi1",
",",
"base",
"]",
",",
"axis",
"=",
"1",
")",
"multi2",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^.*___2$'",
")",
"multi2",
"=",
"strip_multiplicity",
"(",
"multi2",
")",
"multi2",
"[",
"'Multiplicity'",
"]",
"=",
"'___2'",
"multi2",
"=",
"pd",
".",
"concat",
"(",
"[",
"multi2",
",",
"base",
"]",
",",
"axis",
"=",
"1",
")",
"multi3",
"=",
"df",
".",
"filter",
"(",
"regex",
"=",
"'^.*___3$'",
")",
"multi3",
"=",
"strip_multiplicity",
"(",
"multi3",
")",
"multi3",
"[",
"'Multiplicity'",
"]",
"=",
"'___3'",
"multi3",
"=",
"pd",
".",
"concat",
"(",
"[",
"multi3",
",",
"base",
"]",
",",
"axis",
"=",
"1",
")",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"multi1",
",",
"multi2",
",",
"multi3",
"]",
",",
"axis",
"=",
"0",
")",
"df",
"[",
"'id'",
"]",
"=",
"[",
"\"%s%s\"",
"%",
"(",
"a",
",",
"b",
")",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"df",
"[",
"'id'",
"]",
",",
"df",
"[",
"'Multiplicity'",
"]",
")",
"]",
"if",
"idx",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"df",
".",
"set_index",
"(",
"idx",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] |
Perform equivalent of 'expand side table' in Perseus by folding
Multiplicity columns down onto duplicate rows
The id is remapped to UID___Multiplicity, which
is different to Perseus behaviour, but prevents accidental of
non-matching rows from occurring later in analysis.
:param df:
:return:
|
[
"Perform",
"equivalent",
"of",
"expand",
"side",
"table",
"in",
"Perseus",
"by",
"folding",
"Multiplicity",
"columns",
"down",
"onto",
"duplicate",
"rows"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L221-L277
|
train
|
mfitzp/padua
|
padua/process.py
|
apply_experimental_design
|
def apply_experimental_design(df, f, prefix='Intensity '):
"""
Load the experimental design template from MaxQuant and use it to apply the label names to the data columns.
:param df:
:param f: File path for the experimental design template
:param prefix:
:return: dt
"""
df = df.copy()
edt = pd.read_csv(f, sep='\t', header=0)
edt.set_index('Experiment', inplace=True)
new_column_labels = []
for l in df.columns.values:
try:
l = edt.loc[l.replace(prefix, '')]['Name']
except (IndexError, KeyError):
pass
new_column_labels.append(l)
df.columns = new_column_labels
return df
|
python
|
def apply_experimental_design(df, f, prefix='Intensity '):
"""
Load the experimental design template from MaxQuant and use it to apply the label names to the data columns.
:param df:
:param f: File path for the experimental design template
:param prefix:
:return: dt
"""
df = df.copy()
edt = pd.read_csv(f, sep='\t', header=0)
edt.set_index('Experiment', inplace=True)
new_column_labels = []
for l in df.columns.values:
try:
l = edt.loc[l.replace(prefix, '')]['Name']
except (IndexError, KeyError):
pass
new_column_labels.append(l)
df.columns = new_column_labels
return df
|
[
"def",
"apply_experimental_design",
"(",
"df",
",",
"f",
",",
"prefix",
"=",
"'Intensity '",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"edt",
"=",
"pd",
".",
"read_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
",",
"header",
"=",
"0",
")",
"edt",
".",
"set_index",
"(",
"'Experiment'",
",",
"inplace",
"=",
"True",
")",
"new_column_labels",
"=",
"[",
"]",
"for",
"l",
"in",
"df",
".",
"columns",
".",
"values",
":",
"try",
":",
"l",
"=",
"edt",
".",
"loc",
"[",
"l",
".",
"replace",
"(",
"prefix",
",",
"''",
")",
"]",
"[",
"'Name'",
"]",
"except",
"(",
"IndexError",
",",
"KeyError",
")",
":",
"pass",
"new_column_labels",
".",
"append",
"(",
"l",
")",
"df",
".",
"columns",
"=",
"new_column_labels",
"return",
"df"
] |
Load the experimental design template from MaxQuant and use it to apply the label names to the data columns.
:param df:
:param f: File path for the experimental design template
:param prefix:
:return: dt
|
[
"Load",
"the",
"experimental",
"design",
"template",
"from",
"MaxQuant",
"and",
"use",
"it",
"to",
"apply",
"the",
"label",
"names",
"to",
"the",
"data",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L280-L306
|
train
|
mfitzp/padua
|
padua/process.py
|
transform_expression_columns
|
def transform_expression_columns(df, fn=np.log2, prefix='Intensity '):
"""
Apply transformation to expression columns.
Default is log2 transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
"""
df = df.copy()
mask = np.array([l.startswith(prefix) for l in df.columns.values])
df.iloc[:, mask] = fn(df.iloc[:, mask])
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
|
python
|
def transform_expression_columns(df, fn=np.log2, prefix='Intensity '):
"""
Apply transformation to expression columns.
Default is log2 transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
"""
df = df.copy()
mask = np.array([l.startswith(prefix) for l in df.columns.values])
df.iloc[:, mask] = fn(df.iloc[:, mask])
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
|
[
"def",
"transform_expression_columns",
"(",
"df",
",",
"fn",
"=",
"np",
".",
"log2",
",",
"prefix",
"=",
"'Intensity '",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"mask",
"=",
"np",
".",
"array",
"(",
"[",
"l",
".",
"startswith",
"(",
"prefix",
")",
"for",
"l",
"in",
"df",
".",
"columns",
".",
"values",
"]",
")",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
"=",
"fn",
"(",
"df",
".",
"iloc",
"[",
":",
",",
"mask",
"]",
")",
"df",
".",
"replace",
"(",
"[",
"np",
".",
"inf",
",",
"-",
"np",
".",
"inf",
"]",
",",
"np",
".",
"nan",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] |
Apply transformation to expression columns.
Default is log2 transform to expression columns beginning with Intensity
:param df:
:param prefix: The column prefix for expression columns
:return:
|
[
"Apply",
"transformation",
"to",
"expression",
"columns",
"."
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L309-L327
|
train
|
mfitzp/padua
|
padua/process.py
|
fold_columns_to_rows
|
def fold_columns_to_rows(df, levels_from=2):
"""
Take a levels from the columns and fold down into the row index.
This destroys the existing index; existing rows will appear as
columns under the new column index
:param df:
:param levels_from: The level (inclusive) from which column index will be folded
:return:
"""
df = df.copy()
df.reset_index(inplace=True, drop=True) # Wipe out the current index
df = df.T
# Build all index combinations
a = [list( set( df.index.get_level_values(i) ) ) for i in range(0, levels_from)]
combinations = list(itertools.product(*a))
names = df.index.names[:levels_from]
concats = []
for c in combinations:
try:
dfcc = df.loc[c]
except KeyError:
continue
else:
# Silly pandas
if len(dfcc.shape) == 1:
continue
dfcc.columns = pd.MultiIndex.from_tuples([c]*dfcc.shape[1], names=names)
concats.append(dfcc)
# Concatenate
dfc = pd.concat(concats, axis=1)
dfc.sort_index(axis=1, inplace=True)
# Fix name if collapsed
if dfc.index.name is None:
dfc.index.name = df.index.names[-1]
return dfc
|
python
|
def fold_columns_to_rows(df, levels_from=2):
"""
Take a levels from the columns and fold down into the row index.
This destroys the existing index; existing rows will appear as
columns under the new column index
:param df:
:param levels_from: The level (inclusive) from which column index will be folded
:return:
"""
df = df.copy()
df.reset_index(inplace=True, drop=True) # Wipe out the current index
df = df.T
# Build all index combinations
a = [list( set( df.index.get_level_values(i) ) ) for i in range(0, levels_from)]
combinations = list(itertools.product(*a))
names = df.index.names[:levels_from]
concats = []
for c in combinations:
try:
dfcc = df.loc[c]
except KeyError:
continue
else:
# Silly pandas
if len(dfcc.shape) == 1:
continue
dfcc.columns = pd.MultiIndex.from_tuples([c]*dfcc.shape[1], names=names)
concats.append(dfcc)
# Concatenate
dfc = pd.concat(concats, axis=1)
dfc.sort_index(axis=1, inplace=True)
# Fix name if collapsed
if dfc.index.name is None:
dfc.index.name = df.index.names[-1]
return dfc
|
[
"def",
"fold_columns_to_rows",
"(",
"df",
",",
"levels_from",
"=",
"2",
")",
":",
"df",
"=",
"df",
".",
"copy",
"(",
")",
"df",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"# Wipe out the current index",
"df",
"=",
"df",
".",
"T",
"# Build all index combinations",
"a",
"=",
"[",
"list",
"(",
"set",
"(",
"df",
".",
"index",
".",
"get_level_values",
"(",
"i",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"levels_from",
")",
"]",
"combinations",
"=",
"list",
"(",
"itertools",
".",
"product",
"(",
"*",
"a",
")",
")",
"names",
"=",
"df",
".",
"index",
".",
"names",
"[",
":",
"levels_from",
"]",
"concats",
"=",
"[",
"]",
"for",
"c",
"in",
"combinations",
":",
"try",
":",
"dfcc",
"=",
"df",
".",
"loc",
"[",
"c",
"]",
"except",
"KeyError",
":",
"continue",
"else",
":",
"# Silly pandas",
"if",
"len",
"(",
"dfcc",
".",
"shape",
")",
"==",
"1",
":",
"continue",
"dfcc",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_tuples",
"(",
"[",
"c",
"]",
"*",
"dfcc",
".",
"shape",
"[",
"1",
"]",
",",
"names",
"=",
"names",
")",
"concats",
".",
"append",
"(",
"dfcc",
")",
"# Concatenate",
"dfc",
"=",
"pd",
".",
"concat",
"(",
"concats",
",",
"axis",
"=",
"1",
")",
"dfc",
".",
"sort_index",
"(",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"# Fix name if collapsed",
"if",
"dfc",
".",
"index",
".",
"name",
"is",
"None",
":",
"dfc",
".",
"index",
".",
"name",
"=",
"df",
".",
"index",
".",
"names",
"[",
"-",
"1",
"]",
"return",
"dfc"
] |
Take a levels from the columns and fold down into the row index.
This destroys the existing index; existing rows will appear as
columns under the new column index
:param df:
:param levels_from: The level (inclusive) from which column index will be folded
:return:
|
[
"Take",
"a",
"levels",
"from",
"the",
"columns",
"and",
"fold",
"down",
"into",
"the",
"row",
"index",
".",
"This",
"destroys",
"the",
"existing",
"index",
";",
"existing",
"rows",
"will",
"appear",
"as",
"columns",
"under",
"the",
"new",
"column",
"index"
] |
8b14bf4d2f895da6aea5d7885d409315bd303ec6
|
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/process.py#L330-L377
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.args
|
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
|
python
|
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
|
[
"def",
"args",
"(",
"self",
",",
"args",
")",
":",
"self",
".",
"_args",
"=",
"args",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Args set to {}'",
".",
"format",
"(",
"args",
")",
")"
] |
Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
|
[
"Set",
"additional",
"arguments",
"to",
"be",
"passed",
"to",
"the",
"fitness",
"function"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L147-L154
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.minimize
|
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
|
python
|
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
|
[
"def",
"minimize",
"(",
"self",
",",
"minimize",
")",
":",
"self",
".",
"_minimize",
"=",
"minimize",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Minimize set to {}'",
".",
"format",
"(",
"minimize",
")",
")"
] |
Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
|
[
"Configures",
"the",
"ABC",
"to",
"minimize",
"fitness",
"function",
"return",
"value",
"or",
"derived",
"score"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L165-L175
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.num_employers
|
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
|
python
|
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
|
[
"def",
"num_employers",
"(",
"self",
",",
"num_employers",
")",
":",
"if",
"num_employers",
"<",
"2",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'warn'",
",",
"'Two employers are needed: setting to two'",
")",
"num_employers",
"=",
"2",
"self",
".",
"_num_employers",
"=",
"num_employers",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Number of employers set to {}'",
".",
"format",
"(",
"num_employers",
")",
")",
"self",
".",
"_limit",
"=",
"num_employers",
"*",
"len",
"(",
"self",
".",
"_value_ranges",
")",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Limit set to {}'",
".",
"format",
"(",
"self",
".",
"_limit",
")",
")"
] |
Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
|
[
"Sets",
"the",
"number",
"of",
"employer",
"bees",
";",
"at",
"least",
"two",
"are",
"required"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L184-L202
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.processes
|
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
|
python
|
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
|
[
"def",
"processes",
"(",
"self",
",",
"processes",
")",
":",
"if",
"self",
".",
"_processes",
">",
"1",
":",
"self",
".",
"_pool",
".",
"close",
"(",
")",
"self",
".",
"_pool",
".",
"join",
"(",
")",
"self",
".",
"_pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"processes",
")",
"else",
":",
"self",
".",
"_pool",
"=",
"None",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Number of processes set to {}'",
".",
"format",
"(",
"processes",
")",
")"
] |
Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
|
[
"Set",
"the",
"number",
"of",
"concurrent",
"processes",
"the",
"ABC",
"will",
"utilize",
"for",
"fitness",
"function",
"evaluation",
";",
"if",
"<",
"=",
"1",
"single",
"process",
"is",
"used"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L268-L284
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.infer_process_count
|
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
|
python
|
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
|
[
"def",
"infer_process_count",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"processes",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"except",
"NotImplementedError",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'error'",
",",
"'Could infer CPU count, setting number of processes back to 4'",
")",
"self",
".",
"processes",
"=",
"4"
] |
Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
|
[
"Infers",
"the",
"number",
"of",
"CPU",
"cores",
"in",
"the",
"current",
"system",
"sets",
"the",
"number",
"of",
"concurrent",
"processes",
"accordingly"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L286-L298
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.create_employers
|
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
|
python
|
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
|
[
"def",
"create_employers",
"(",
"self",
")",
":",
"self",
".",
"__verify_ready",
"(",
"True",
")",
"employers",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"_num_employers",
")",
":",
"employer",
"=",
"EmployerBee",
"(",
"self",
".",
"__gen_random_values",
"(",
")",
")",
"if",
"self",
".",
"_processes",
"<=",
"1",
":",
"employer",
".",
"error",
"=",
"self",
".",
"_fitness_fxn",
"(",
"employer",
".",
"values",
",",
"*",
"*",
"self",
".",
"_args",
")",
"employer",
".",
"score",
"=",
"employer",
".",
"get_score",
"(",
")",
"if",
"np",
".",
"isnan",
"(",
"employer",
".",
"score",
")",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'warn'",
",",
"'NaN bee score: {}, {}'",
".",
"format",
"(",
"employer",
".",
"id",
",",
"employer",
".",
"score",
")",
")",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Bee number {} created'",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
"self",
".",
"__update",
"(",
"employer",
".",
"score",
",",
"employer",
".",
"values",
",",
"employer",
".",
"error",
")",
"else",
":",
"employer",
".",
"error",
"=",
"self",
".",
"_pool",
".",
"apply_async",
"(",
"self",
".",
"_fitness_fxn",
",",
"[",
"employer",
".",
"values",
"]",
",",
"self",
".",
"_args",
")",
"employers",
".",
"append",
"(",
"employer",
")",
"self",
".",
"_employers",
".",
"append",
"(",
"employer",
")",
"for",
"idx",
",",
"employer",
"in",
"enumerate",
"(",
"employers",
")",
":",
"try",
":",
"employer",
".",
"error",
"=",
"employer",
".",
"error",
".",
"get",
"(",
")",
"employer",
".",
"score",
"=",
"employer",
".",
"get_score",
"(",
")",
"if",
"np",
".",
"isnan",
"(",
"employer",
".",
"score",
")",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'warn'",
",",
"'NaN bee score: {}, {}'",
".",
"format",
"(",
"employer",
".",
"id",
",",
"employer",
".",
"score",
")",
")",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Bee number {} created'",
".",
"format",
"(",
"i",
"+",
"1",
")",
")",
"self",
".",
"__update",
"(",
"employer",
".",
"score",
",",
"employer",
".",
"values",
",",
"employer",
".",
"error",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Employer creation complete'",
")"
] |
Generate employer bees. This should be called directly after the
ABC is initialized.
|
[
"Generate",
"employer",
"bees",
".",
"This",
"should",
"be",
"called",
"directly",
"after",
"the",
"ABC",
"is",
"initialized",
"."
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L300-L344
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.run_iteration
|
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
|
python
|
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
|
[
"def",
"run_iteration",
"(",
"self",
")",
":",
"self",
".",
"_employer_phase",
"(",
")",
"self",
".",
"_calc_probability",
"(",
")",
"self",
".",
"_onlooker_phase",
"(",
")",
"self",
".",
"_check_positions",
"(",
")"
] |
Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
|
[
"Runs",
"a",
"single",
"iteration",
"of",
"the",
"ABC",
";",
"employer",
"phase",
"-",
">",
"probability",
"calculation",
"-",
">",
"onlooker",
"phase",
"-",
">",
"check",
"positions"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L346-L354
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC._calc_probability
|
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
|
python
|
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
|
[
"def",
"_calc_probability",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Calculating bee probabilities'",
")",
"self",
".",
"__verify_ready",
"(",
")",
"self",
".",
"_total_score",
"=",
"0",
"for",
"employer",
"in",
"self",
".",
"_employers",
":",
"self",
".",
"_total_score",
"+=",
"employer",
".",
"score",
"if",
"self",
".",
"__update",
"(",
"employer",
".",
"score",
",",
"employer",
".",
"values",
",",
"employer",
".",
"error",
")",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'info'",
",",
"'Update to best performer -'",
"' error: {} | score: {} | values: {}'",
".",
"format",
"(",
"employer",
".",
"error",
",",
"employer",
".",
"score",
",",
"employer",
".",
"values",
")",
")",
"for",
"employer",
"in",
"self",
".",
"_employers",
":",
"employer",
".",
"calculate_probability",
"(",
"self",
".",
"_total_score",
")"
] |
Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
|
[
"Determines",
"the",
"probability",
"that",
"each",
"bee",
"will",
"be",
"chosen",
"during",
"the",
"onlooker",
"phase",
";",
"also",
"determines",
"if",
"a",
"new",
"best",
"-",
"performing",
"bee",
"is",
"found"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L377-L398
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC._merge_bee
|
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
|
python
|
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
|
[
"def",
"_merge_bee",
"(",
"self",
",",
"bee",
")",
":",
"random_dimension",
"=",
"randint",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_value_ranges",
")",
"-",
"1",
")",
"second_bee",
"=",
"randint",
"(",
"0",
",",
"self",
".",
"_num_employers",
"-",
"1",
")",
"while",
"(",
"bee",
".",
"id",
"==",
"self",
".",
"_employers",
"[",
"second_bee",
"]",
".",
"id",
")",
":",
"second_bee",
"=",
"randint",
"(",
"0",
",",
"self",
".",
"_num_employers",
"-",
"1",
")",
"new_bee",
"=",
"deepcopy",
"(",
"bee",
")",
"new_bee",
".",
"values",
"[",
"random_dimension",
"]",
"=",
"self",
".",
"__onlooker",
".",
"calculate_positions",
"(",
"new_bee",
".",
"values",
"[",
"random_dimension",
"]",
",",
"self",
".",
"_employers",
"[",
"second_bee",
"]",
".",
"values",
"[",
"random_dimension",
"]",
",",
"self",
".",
"_value_ranges",
"[",
"random_dimension",
"]",
")",
"fitness_score",
"=",
"new_bee",
".",
"get_score",
"(",
"self",
".",
"_fitness_fxn",
"(",
"new_bee",
".",
"values",
",",
"*",
"*",
"self",
".",
"_args",
")",
")",
"return",
"(",
"fitness_score",
",",
"new_bee",
".",
"values",
",",
"new_bee",
".",
"error",
")"
] |
Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
|
[
"Shifts",
"a",
"random",
"value",
"for",
"a",
"supplied",
"bee",
"with",
"in",
"accordance",
"with",
"another",
"random",
"bee",
"s",
"value"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L452-L478
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC._move_bee
|
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
|
python
|
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
|
[
"def",
"_move_bee",
"(",
"self",
",",
"bee",
",",
"new_values",
")",
":",
"score",
"=",
"np",
".",
"nan_to_num",
"(",
"new_values",
"[",
"0",
"]",
")",
"if",
"bee",
".",
"score",
">",
"score",
":",
"bee",
".",
"failed_trials",
"+=",
"1",
"else",
":",
"bee",
".",
"values",
"=",
"new_values",
"[",
"1",
"]",
"bee",
".",
"score",
"=",
"score",
"bee",
".",
"error",
"=",
"new_values",
"[",
"2",
"]",
"bee",
".",
"failed_trials",
"=",
"0",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'Bee assigned to new merged position'",
")"
] |
Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
|
[
"Moves",
"a",
"bee",
"to",
"a",
"new",
"position",
"if",
"new",
"fitness",
"score",
"is",
"better",
"than",
"the",
"bee",
"s",
"current",
"fitness",
"score"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L480-L498
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.__update
|
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
|
python
|
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
|
[
"def",
"__update",
"(",
"self",
",",
"score",
",",
"values",
",",
"error",
")",
":",
"if",
"self",
".",
"_minimize",
":",
"if",
"self",
".",
"_best_score",
"is",
"None",
"or",
"score",
">",
"self",
".",
"_best_score",
":",
"self",
".",
"_best_score",
"=",
"score",
"self",
".",
"_best_values",
"=",
"values",
".",
"copy",
"(",
")",
"self",
".",
"_best_error",
"=",
"error",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'New best food source memorized: {}'",
".",
"format",
"(",
"self",
".",
"_best_error",
")",
")",
"return",
"True",
"elif",
"not",
"self",
".",
"_minimize",
":",
"if",
"self",
".",
"_best_score",
"is",
"None",
"or",
"score",
"<",
"self",
".",
"_best_score",
":",
"self",
".",
"_best_score",
"=",
"score",
"self",
".",
"_best_values",
"=",
"values",
".",
"copy",
"(",
")",
"self",
".",
"_best_error",
"=",
"error",
"self",
".",
"_logger",
".",
"log",
"(",
"'debug'",
",",
"'New best food source memorized: {}'",
".",
"format",
"(",
"self",
".",
"_best_error",
")",
")",
"return",
"True",
"return",
"False"
] |
Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
|
[
"Update",
"the",
"best",
"score",
"and",
"values",
"if",
"the",
"given",
"score",
"is",
"better",
"than",
"the",
"current",
"best",
"score"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L500-L537
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.__gen_random_values
|
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
|
python
|
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
|
[
"def",
"__gen_random_values",
"(",
"self",
")",
":",
"values",
"=",
"[",
"]",
"if",
"self",
".",
"_value_ranges",
"is",
"None",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'crit'",
",",
"'Must set the type/range of possible values'",
")",
"raise",
"RuntimeError",
"(",
"\"Must set the type/range of possible values\"",
")",
"else",
":",
"for",
"t",
"in",
"self",
".",
"_value_ranges",
":",
"if",
"t",
"[",
"0",
"]",
"==",
"'int'",
":",
"values",
".",
"append",
"(",
"randint",
"(",
"t",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
"[",
"1",
"]",
")",
")",
"elif",
"t",
"[",
"0",
"]",
"==",
"'float'",
":",
"values",
".",
"append",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"t",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"t",
"[",
"1",
"]",
"[",
"1",
"]",
")",
")",
"else",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'crit'",
",",
"'Value type must be either an `int` or a `float`'",
")",
"raise",
"RuntimeError",
"(",
"'Value type must be either an `int` or a `float`'",
")",
"return",
"values"
] |
Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
|
[
"Generate",
"random",
"values",
"based",
"on",
"supplied",
"value",
"ranges"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L539-L567
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.__verify_ready
|
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
|
python
|
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
|
[
"def",
"__verify_ready",
"(",
"self",
",",
"creating",
"=",
"False",
")",
":",
"if",
"len",
"(",
"self",
".",
"_value_ranges",
")",
"==",
"0",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'crit'",
",",
"'Attribute value_ranges must have at least one value'",
")",
"raise",
"RuntimeWarning",
"(",
"'Attribute value_ranges must have at least one value'",
")",
"if",
"len",
"(",
"self",
".",
"_employers",
")",
"==",
"0",
"and",
"creating",
"is",
"False",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'crit'",
",",
"'Need to create employers'",
")",
"raise",
"RuntimeWarning",
"(",
"'Need to create employers'",
")"
] |
Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
|
[
"Some",
"cleanup",
"ensures",
"that",
"everything",
"is",
"set",
"up",
"properly",
"to",
"avoid",
"random",
"errors",
"during",
"execution"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L569-L588
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.import_settings
|
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
|
python
|
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
|
[
"def",
"import_settings",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"self",
".",
"_logger",
".",
"log",
"(",
"'error'",
",",
"'File: {} not found, continuing with default settings'",
".",
"format",
"(",
"filename",
")",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"jsonFile",
":",
"data",
"=",
"json",
".",
"load",
"(",
"jsonFile",
")",
"self",
".",
"_value_ranges",
"=",
"data",
"[",
"'valueRanges'",
"]",
"self",
".",
"_best_values",
"=",
"data",
"[",
"'best_values'",
"]",
"self",
".",
"_best_values",
"=",
"[",
"]",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"data",
"[",
"'best_values'",
"]",
")",
":",
"if",
"self",
".",
"_value_ranges",
"[",
"index",
"]",
"==",
"'int'",
":",
"self",
".",
"_best_values",
".",
"append",
"(",
"int",
"(",
"value",
")",
")",
"else",
":",
"self",
".",
"_best_values",
".",
"append",
"(",
"float",
"(",
"value",
")",
")",
"self",
".",
"minimize",
"=",
"data",
"[",
"'minimize'",
"]",
"self",
".",
"num_employers",
"=",
"data",
"[",
"'num_employers'",
"]",
"self",
".",
"_best_score",
"=",
"float",
"(",
"data",
"[",
"'best_score'",
"]",
")",
"self",
".",
"limit",
"=",
"data",
"[",
"'limit'",
"]"
] |
Import settings from a JSON file
Args:
filename (string): name of the file to import from
|
[
"Import",
"settings",
"from",
"a",
"JSON",
"file"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L590-L618
|
train
|
ECRL/ecabc
|
ecabc/abc.py
|
ABC.save_settings
|
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
|
python
|
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
|
[
"def",
"save_settings",
"(",
"self",
",",
"filename",
")",
":",
"data",
"=",
"dict",
"(",
")",
"data",
"[",
"'valueRanges'",
"]",
"=",
"self",
".",
"_value_ranges",
"data",
"[",
"'best_values'",
"]",
"=",
"[",
"str",
"(",
"value",
")",
"for",
"value",
"in",
"self",
".",
"_best_values",
"]",
"data",
"[",
"'minimize'",
"]",
"=",
"self",
".",
"_minimize",
"data",
"[",
"'num_employers'",
"]",
"=",
"self",
".",
"_num_employers",
"data",
"[",
"'best_score'",
"]",
"=",
"str",
"(",
"self",
".",
"_best_score",
")",
"data",
"[",
"'limit'",
"]",
"=",
"self",
".",
"_limit",
"data",
"[",
"'best_error'",
"]",
"=",
"self",
".",
"_best_error",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"outfile",
":",
"json",
".",
"dump",
"(",
"data",
",",
"outfile",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")"
] |
Save settings to a JSON file
Arge:
filename (string): name of the file to save to
|
[
"Save",
"settings",
"to",
"a",
"JSON",
"file"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L620-L636
|
train
|
ECRL/ecabc
|
ecabc/bees.py
|
EmployerBee.get_score
|
def get_score(self, error=None):
'''Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score
'''
if error is not None:
self.error = error
if self.error >= 0:
return 1 / (self.error + 1)
else:
return 1 + abs(self.error)
|
python
|
def get_score(self, error=None):
'''Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score
'''
if error is not None:
self.error = error
if self.error >= 0:
return 1 / (self.error + 1)
else:
return 1 + abs(self.error)
|
[
"def",
"get_score",
"(",
"self",
",",
"error",
"=",
"None",
")",
":",
"if",
"error",
"is",
"not",
"None",
":",
"self",
".",
"error",
"=",
"error",
"if",
"self",
".",
"error",
">=",
"0",
":",
"return",
"1",
"/",
"(",
"self",
".",
"error",
"+",
"1",
")",
"else",
":",
"return",
"1",
"+",
"abs",
"(",
"self",
".",
"error",
")"
] |
Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score
|
[
"Calculate",
"bee",
"s",
"fitness",
"score",
"given",
"a",
"value",
"returned",
"by",
"the",
"fitness",
"function"
] |
4e73125ff90bfeeae359a5ab1badba8894d70eaa
|
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/bees.py#L40-L56
|
train
|
foremast/foremast
|
src/foremast/dns/create_dns.py
|
SpinnakerDns.create_elb_dns
|
def create_elb_dns(self, regionspecific=False):
"""Create dns entries in route53.
Args:
regionspecific (bool): The DNS entry should have region on it
Returns:
str: Auto-generated DNS name for the Elastic Load Balancer.
"""
if regionspecific:
dns_elb = self.generated.dns()['elb_region']
else:
dns_elb = self.generated.dns()['elb']
dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region)
zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)
self.log.info('Updating Application URL: %s', dns_elb)
dns_kwargs = {
'dns_name': dns_elb,
'dns_name_aws': dns_elb_aws,
'dns_ttl': self.dns_ttl,
}
for zone_id in zone_ids:
self.log.debug('zone_id: %s', zone_id)
update_dns_zone_record(self.env, zone_id, **dns_kwargs)
return dns_elb
|
python
|
def create_elb_dns(self, regionspecific=False):
"""Create dns entries in route53.
Args:
regionspecific (bool): The DNS entry should have region on it
Returns:
str: Auto-generated DNS name for the Elastic Load Balancer.
"""
if regionspecific:
dns_elb = self.generated.dns()['elb_region']
else:
dns_elb = self.generated.dns()['elb']
dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region)
zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)
self.log.info('Updating Application URL: %s', dns_elb)
dns_kwargs = {
'dns_name': dns_elb,
'dns_name_aws': dns_elb_aws,
'dns_ttl': self.dns_ttl,
}
for zone_id in zone_ids:
self.log.debug('zone_id: %s', zone_id)
update_dns_zone_record(self.env, zone_id, **dns_kwargs)
return dns_elb
|
[
"def",
"create_elb_dns",
"(",
"self",
",",
"regionspecific",
"=",
"False",
")",
":",
"if",
"regionspecific",
":",
"dns_elb",
"=",
"self",
".",
"generated",
".",
"dns",
"(",
")",
"[",
"'elb_region'",
"]",
"else",
":",
"dns_elb",
"=",
"self",
".",
"generated",
".",
"dns",
"(",
")",
"[",
"'elb'",
"]",
"dns_elb_aws",
"=",
"find_elb",
"(",
"name",
"=",
"self",
".",
"app_name",
",",
"env",
"=",
"self",
".",
"env",
",",
"region",
"=",
"self",
".",
"region",
")",
"zone_ids",
"=",
"get_dns_zone_ids",
"(",
"env",
"=",
"self",
".",
"env",
",",
"facing",
"=",
"self",
".",
"elb_subnet",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Updating Application URL: %s'",
",",
"dns_elb",
")",
"dns_kwargs",
"=",
"{",
"'dns_name'",
":",
"dns_elb",
",",
"'dns_name_aws'",
":",
"dns_elb_aws",
",",
"'dns_ttl'",
":",
"self",
".",
"dns_ttl",
",",
"}",
"for",
"zone_id",
"in",
"zone_ids",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'zone_id: %s'",
",",
"zone_id",
")",
"update_dns_zone_record",
"(",
"self",
".",
"env",
",",
"zone_id",
",",
"*",
"*",
"dns_kwargs",
")",
"return",
"dns_elb"
] |
Create dns entries in route53.
Args:
regionspecific (bool): The DNS entry should have region on it
Returns:
str: Auto-generated DNS name for the Elastic Load Balancer.
|
[
"Create",
"dns",
"entries",
"in",
"route53",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/dns/create_dns.py#L55-L85
|
train
|
foremast/foremast
|
src/foremast/dns/create_dns.py
|
SpinnakerDns.create_failover_dns
|
def create_failover_dns(self, primary_region='us-east-1'):
"""Create dns entries in route53 for multiregion failover setups.
Args:
primary_region (str): primary AWS region for failover
Returns:
Auto-generated DNS name.
"""
dns_record = self.generated.dns()['global']
zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)
elb_dns_aws = find_elb(name=self.app_name, env=self.env, region=self.region)
elb_dns_zone_id = find_elb_dns_zone_id(name=self.app_name, env=self.env, region=self.region)
if primary_region in elb_dns_aws:
failover_state = 'PRIMARY'
else:
failover_state = 'SECONDARY'
self.log.info("%s set as %s record", elb_dns_aws, failover_state)
self.log.info('Updating Application Failover URL: %s', dns_record)
dns_kwargs = {
'dns_name': dns_record,
'elb_dns_zone_id': elb_dns_zone_id,
'elb_aws_dns': elb_dns_aws,
'dns_ttl': self.dns_ttl,
'failover_state': failover_state,
}
for zone_id in zone_ids:
self.log.debug('zone_id: %s', zone_id)
update_failover_dns_record(self.env, zone_id, **dns_kwargs)
return dns_record
|
python
|
def create_failover_dns(self, primary_region='us-east-1'):
"""Create dns entries in route53 for multiregion failover setups.
Args:
primary_region (str): primary AWS region for failover
Returns:
Auto-generated DNS name.
"""
dns_record = self.generated.dns()['global']
zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)
elb_dns_aws = find_elb(name=self.app_name, env=self.env, region=self.region)
elb_dns_zone_id = find_elb_dns_zone_id(name=self.app_name, env=self.env, region=self.region)
if primary_region in elb_dns_aws:
failover_state = 'PRIMARY'
else:
failover_state = 'SECONDARY'
self.log.info("%s set as %s record", elb_dns_aws, failover_state)
self.log.info('Updating Application Failover URL: %s', dns_record)
dns_kwargs = {
'dns_name': dns_record,
'elb_dns_zone_id': elb_dns_zone_id,
'elb_aws_dns': elb_dns_aws,
'dns_ttl': self.dns_ttl,
'failover_state': failover_state,
}
for zone_id in zone_ids:
self.log.debug('zone_id: %s', zone_id)
update_failover_dns_record(self.env, zone_id, **dns_kwargs)
return dns_record
|
[
"def",
"create_failover_dns",
"(",
"self",
",",
"primary_region",
"=",
"'us-east-1'",
")",
":",
"dns_record",
"=",
"self",
".",
"generated",
".",
"dns",
"(",
")",
"[",
"'global'",
"]",
"zone_ids",
"=",
"get_dns_zone_ids",
"(",
"env",
"=",
"self",
".",
"env",
",",
"facing",
"=",
"self",
".",
"elb_subnet",
")",
"elb_dns_aws",
"=",
"find_elb",
"(",
"name",
"=",
"self",
".",
"app_name",
",",
"env",
"=",
"self",
".",
"env",
",",
"region",
"=",
"self",
".",
"region",
")",
"elb_dns_zone_id",
"=",
"find_elb_dns_zone_id",
"(",
"name",
"=",
"self",
".",
"app_name",
",",
"env",
"=",
"self",
".",
"env",
",",
"region",
"=",
"self",
".",
"region",
")",
"if",
"primary_region",
"in",
"elb_dns_aws",
":",
"failover_state",
"=",
"'PRIMARY'",
"else",
":",
"failover_state",
"=",
"'SECONDARY'",
"self",
".",
"log",
".",
"info",
"(",
"\"%s set as %s record\"",
",",
"elb_dns_aws",
",",
"failover_state",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Updating Application Failover URL: %s'",
",",
"dns_record",
")",
"dns_kwargs",
"=",
"{",
"'dns_name'",
":",
"dns_record",
",",
"'elb_dns_zone_id'",
":",
"elb_dns_zone_id",
",",
"'elb_aws_dns'",
":",
"elb_dns_aws",
",",
"'dns_ttl'",
":",
"self",
".",
"dns_ttl",
",",
"'failover_state'",
":",
"failover_state",
",",
"}",
"for",
"zone_id",
"in",
"zone_ids",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'zone_id: %s'",
",",
"zone_id",
")",
"update_failover_dns_record",
"(",
"self",
".",
"env",
",",
"zone_id",
",",
"*",
"*",
"dns_kwargs",
")",
"return",
"dns_record"
] |
Create dns entries in route53 for multiregion failover setups.
Args:
primary_region (str): primary AWS region for failover
Returns:
Auto-generated DNS name.
|
[
"Create",
"dns",
"entries",
"in",
"route53",
"for",
"multiregion",
"failover",
"setups",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/dns/create_dns.py#L87-L121
|
train
|
foremast/foremast
|
src/foremast/elb/format_listeners.py
|
format_listeners
|
def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(
env=env, region=region, account=account, certificate=listener.get('certificate', None))
lb_proto, lb_port = listener['loadbalancer'].split(':')
i_proto, i_port = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {
'externalPort': int(lb_port),
'externalProtocol': lb_proto.upper(),
'internalPort': int(i_port),
'internalProtocol': i_proto.upper(),
'sslCertificateId': cert_name,
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}
listeners.append(elb_data)
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{
'externalPort': int(elb_settings['lb_port']),
'externalProtocol': elb_settings['lb_proto'],
'internalPort': int(elb_settings['i_port']),
'internalProtocol': elb_settings['i_proto'],
'sslCertificateId': elb_settings['certificate'],
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}]
for listener in listeners:
LOG.info('ELB Listener:\n'
'loadbalancer %(externalProtocol)s:%(externalPort)d\n'
'instance %(internalProtocol)s:%(internalPort)d\n'
'certificate: %(sslCertificateId)s\n'
'listener_policies: %(listenerPolicies)s\n'
'backend_policies: %(backendPolicies)s', listener)
return listeners
|
python
|
def format_listeners(elb_settings=None, env='dev', region='us-east-1'):
"""Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
"""
LOG.debug('ELB settings:\n%s', elb_settings)
credential = get_env_credential(env=env)
account = credential['accountId']
listeners = []
if 'ports' in elb_settings:
for listener in elb_settings['ports']:
cert_name = format_cert_name(
env=env, region=region, account=account, certificate=listener.get('certificate', None))
lb_proto, lb_port = listener['loadbalancer'].split(':')
i_proto, i_port = listener['instance'].split(':')
listener_policies = listener.get('policies', [])
listener_policies += listener.get('listener_policies', [])
backend_policies = listener.get('backend_policies', [])
elb_data = {
'externalPort': int(lb_port),
'externalProtocol': lb_proto.upper(),
'internalPort': int(i_port),
'internalProtocol': i_proto.upper(),
'sslCertificateId': cert_name,
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}
listeners.append(elb_data)
else:
listener_policies = elb_settings.get('policies', [])
listener_policies += elb_settings.get('listener_policies', [])
backend_policies = elb_settings.get('backend_policies', [])
listeners = [{
'externalPort': int(elb_settings['lb_port']),
'externalProtocol': elb_settings['lb_proto'],
'internalPort': int(elb_settings['i_port']),
'internalProtocol': elb_settings['i_proto'],
'sslCertificateId': elb_settings['certificate'],
'listenerPolicies': listener_policies,
'backendPolicies': backend_policies,
}]
for listener in listeners:
LOG.info('ELB Listener:\n'
'loadbalancer %(externalProtocol)s:%(externalPort)d\n'
'instance %(internalProtocol)s:%(internalPort)d\n'
'certificate: %(sslCertificateId)s\n'
'listener_policies: %(listenerPolicies)s\n'
'backend_policies: %(backendPolicies)s', listener)
return listeners
|
[
"def",
"format_listeners",
"(",
"elb_settings",
"=",
"None",
",",
"env",
"=",
"'dev'",
",",
"region",
"=",
"'us-east-1'",
")",
":",
"LOG",
".",
"debug",
"(",
"'ELB settings:\\n%s'",
",",
"elb_settings",
")",
"credential",
"=",
"get_env_credential",
"(",
"env",
"=",
"env",
")",
"account",
"=",
"credential",
"[",
"'accountId'",
"]",
"listeners",
"=",
"[",
"]",
"if",
"'ports'",
"in",
"elb_settings",
":",
"for",
"listener",
"in",
"elb_settings",
"[",
"'ports'",
"]",
":",
"cert_name",
"=",
"format_cert_name",
"(",
"env",
"=",
"env",
",",
"region",
"=",
"region",
",",
"account",
"=",
"account",
",",
"certificate",
"=",
"listener",
".",
"get",
"(",
"'certificate'",
",",
"None",
")",
")",
"lb_proto",
",",
"lb_port",
"=",
"listener",
"[",
"'loadbalancer'",
"]",
".",
"split",
"(",
"':'",
")",
"i_proto",
",",
"i_port",
"=",
"listener",
"[",
"'instance'",
"]",
".",
"split",
"(",
"':'",
")",
"listener_policies",
"=",
"listener",
".",
"get",
"(",
"'policies'",
",",
"[",
"]",
")",
"listener_policies",
"+=",
"listener",
".",
"get",
"(",
"'listener_policies'",
",",
"[",
"]",
")",
"backend_policies",
"=",
"listener",
".",
"get",
"(",
"'backend_policies'",
",",
"[",
"]",
")",
"elb_data",
"=",
"{",
"'externalPort'",
":",
"int",
"(",
"lb_port",
")",
",",
"'externalProtocol'",
":",
"lb_proto",
".",
"upper",
"(",
")",
",",
"'internalPort'",
":",
"int",
"(",
"i_port",
")",
",",
"'internalProtocol'",
":",
"i_proto",
".",
"upper",
"(",
")",
",",
"'sslCertificateId'",
":",
"cert_name",
",",
"'listenerPolicies'",
":",
"listener_policies",
",",
"'backendPolicies'",
":",
"backend_policies",
",",
"}",
"listeners",
".",
"append",
"(",
"elb_data",
")",
"else",
":",
"listener_policies",
"=",
"elb_settings",
".",
"get",
"(",
"'policies'",
",",
"[",
"]",
")",
"listener_policies",
"+=",
"elb_settings",
".",
"get",
"(",
"'listener_policies'",
",",
"[",
"]",
")",
"backend_policies",
"=",
"elb_settings",
".",
"get",
"(",
"'backend_policies'",
",",
"[",
"]",
")",
"listeners",
"=",
"[",
"{",
"'externalPort'",
":",
"int",
"(",
"elb_settings",
"[",
"'lb_port'",
"]",
")",
",",
"'externalProtocol'",
":",
"elb_settings",
"[",
"'lb_proto'",
"]",
",",
"'internalPort'",
":",
"int",
"(",
"elb_settings",
"[",
"'i_port'",
"]",
")",
",",
"'internalProtocol'",
":",
"elb_settings",
"[",
"'i_proto'",
"]",
",",
"'sslCertificateId'",
":",
"elb_settings",
"[",
"'certificate'",
"]",
",",
"'listenerPolicies'",
":",
"listener_policies",
",",
"'backendPolicies'",
":",
"backend_policies",
",",
"}",
"]",
"for",
"listener",
"in",
"listeners",
":",
"LOG",
".",
"info",
"(",
"'ELB Listener:\\n'",
"'loadbalancer %(externalProtocol)s:%(externalPort)d\\n'",
"'instance %(internalProtocol)s:%(internalPort)d\\n'",
"'certificate: %(sslCertificateId)s\\n'",
"'listener_policies: %(listenerPolicies)s\\n'",
"'backend_policies: %(backendPolicies)s'",
",",
"listener",
")",
"return",
"listeners"
] |
Format ELB Listeners into standard list.
Args:
elb_settings (dict): ELB settings including ELB Listeners to add,
e.g.::
# old
{
"certificate": null,
"i_port": 8080,
"lb_port": 80,
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
# new
{
"ports": [
{
"instance": "HTTP:8080",
"loadbalancer": "HTTP:80"
},
{
"certificate": "cert_name",
"instance": "HTTP:8443",
"loadbalancer": "HTTPS:443"
}
],
"subnet_purpose": "internal",
"target": "HTTP:8080/health"
}
env (str): Environment to find the Account Number for.
Returns:
list: ELB Listeners formatted into dicts for Spinnaker::
[
{
'externalPort': 80,
'externalProtocol': 'HTTP',
'internalPort': 8080,
'internalProtocol': 'HTTP',
'sslCertificateId': None,
'listenerPolicies': [],
'backendPolicies': []
},
...
]
|
[
"Format",
"ELB",
"Listeners",
"into",
"standard",
"list",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/format_listeners.py#L26-L128
|
train
|
foremast/foremast
|
src/foremast/elb/format_listeners.py
|
format_cert_name
|
def format_cert_name(env='', account='', region='', certificate=None):
"""Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
"""
cert_name = None
if certificate:
if certificate.startswith('arn'):
LOG.info("Full ARN provided...skipping lookup.")
cert_name = certificate
else:
generated_cert_name = generate_custom_cert_name(env, region, account, certificate)
if generated_cert_name:
LOG.info("Found generated certificate %s from template", generated_cert_name)
cert_name = generated_cert_name
else:
LOG.info("Using default certificate name logic")
cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format(
account=account, name=certificate))
LOG.debug('Certificate name: %s', cert_name)
return cert_name
|
python
|
def format_cert_name(env='', account='', region='', certificate=None):
"""Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
"""
cert_name = None
if certificate:
if certificate.startswith('arn'):
LOG.info("Full ARN provided...skipping lookup.")
cert_name = certificate
else:
generated_cert_name = generate_custom_cert_name(env, region, account, certificate)
if generated_cert_name:
LOG.info("Found generated certificate %s from template", generated_cert_name)
cert_name = generated_cert_name
else:
LOG.info("Using default certificate name logic")
cert_name = ('arn:aws:iam::{account}:server-certificate/{name}'.format(
account=account, name=certificate))
LOG.debug('Certificate name: %s', cert_name)
return cert_name
|
[
"def",
"format_cert_name",
"(",
"env",
"=",
"''",
",",
"account",
"=",
"''",
",",
"region",
"=",
"''",
",",
"certificate",
"=",
"None",
")",
":",
"cert_name",
"=",
"None",
"if",
"certificate",
":",
"if",
"certificate",
".",
"startswith",
"(",
"'arn'",
")",
":",
"LOG",
".",
"info",
"(",
"\"Full ARN provided...skipping lookup.\"",
")",
"cert_name",
"=",
"certificate",
"else",
":",
"generated_cert_name",
"=",
"generate_custom_cert_name",
"(",
"env",
",",
"region",
",",
"account",
",",
"certificate",
")",
"if",
"generated_cert_name",
":",
"LOG",
".",
"info",
"(",
"\"Found generated certificate %s from template\"",
",",
"generated_cert_name",
")",
"cert_name",
"=",
"generated_cert_name",
"else",
":",
"LOG",
".",
"info",
"(",
"\"Using default certificate name logic\"",
")",
"cert_name",
"=",
"(",
"'arn:aws:iam::{account}:server-certificate/{name}'",
".",
"format",
"(",
"account",
"=",
"account",
",",
"name",
"=",
"certificate",
")",
")",
"LOG",
".",
"debug",
"(",
"'Certificate name: %s'",
",",
"cert_name",
")",
"return",
"cert_name"
] |
Format the SSL certificate name into ARN for ELB.
Args:
env (str): Account environment name
account (str): Account number for ARN
region (str): AWS Region.
certificate (str): Name of SSL certificate
Returns:
str: Fully qualified ARN for SSL certificate
None: Certificate is not desired
|
[
"Format",
"the",
"SSL",
"certificate",
"name",
"into",
"ARN",
"for",
"ELB",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/format_listeners.py#L131-L161
|
train
|
foremast/foremast
|
src/foremast/elb/format_listeners.py
|
generate_custom_cert_name
|
def generate_custom_cert_name(env='', region='', account='', certificate=None):
"""Generate a custom TLS Cert name based on a template.
Args:
env (str): Account environment name
region (str): AWS Region.
account (str): Account number for ARN.
certificate (str): Name of SSL certificate.
Returns:
str: Fully qualified ARN for SSL certificate.
None: Template doesn't exist.
"""
cert_name = None
template_kwargs = {'account': account, 'name': certificate}
# TODO: Investigate moving this to a remote API, then fallback to local file if unable to connect
try:
rendered_template = get_template(template_file='infrastructure/iam/tlscert_naming.json.j2', **template_kwargs)
tlscert_dict = json.loads(rendered_template)
except ForemastTemplateNotFound:
LOG.info('Unable to find TLS Cert Template...falling back to default logic...')
return cert_name
# TODO: Move to v1 method for check
try:
LOG.info("Attempting to find TLS Cert using TLS Cert Template v1 lookup...")
cert_name = tlscert_dict[env][certificate]
LOG.info("Found TLS certificate named %s under %s using TLS Cert Template v1", certificate, env)
except KeyError:
LOG.error("Unable to find TLS certificate named %s under %s using v1 TLS Cert Template.", certificate, env)
# TODO: Move variable to consts
# TODO: move to v2 method for check
tls_services = ['iam', 'acm']
if cert_name is None and all(service in tlscert_dict for service in tls_services):
LOG.info("Attempting to find TLS Cert using TLS Cert Template v2 lookup...")
if certificate in tlscert_dict['iam'][env]:
cert_name = tlscert_dict['iam'][env][certificate]
LOG.info("Found IAM TLS certificate named %s under %s using TLS Cert Template v2", certificate, env)
elif certificate in tlscert_dict['acm'][region][env]:
cert_name = tlscert_dict['acm'][region][env][certificate]
LOG.info("Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2", certificate, env,
region)
else:
LOG.error(
"Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template.",
certificate, env)
return cert_name
|
python
|
def generate_custom_cert_name(env='', region='', account='', certificate=None):
"""Generate a custom TLS Cert name based on a template.
Args:
env (str): Account environment name
region (str): AWS Region.
account (str): Account number for ARN.
certificate (str): Name of SSL certificate.
Returns:
str: Fully qualified ARN for SSL certificate.
None: Template doesn't exist.
"""
cert_name = None
template_kwargs = {'account': account, 'name': certificate}
# TODO: Investigate moving this to a remote API, then fallback to local file if unable to connect
try:
rendered_template = get_template(template_file='infrastructure/iam/tlscert_naming.json.j2', **template_kwargs)
tlscert_dict = json.loads(rendered_template)
except ForemastTemplateNotFound:
LOG.info('Unable to find TLS Cert Template...falling back to default logic...')
return cert_name
# TODO: Move to v1 method for check
try:
LOG.info("Attempting to find TLS Cert using TLS Cert Template v1 lookup...")
cert_name = tlscert_dict[env][certificate]
LOG.info("Found TLS certificate named %s under %s using TLS Cert Template v1", certificate, env)
except KeyError:
LOG.error("Unable to find TLS certificate named %s under %s using v1 TLS Cert Template.", certificate, env)
# TODO: Move variable to consts
# TODO: move to v2 method for check
tls_services = ['iam', 'acm']
if cert_name is None and all(service in tlscert_dict for service in tls_services):
LOG.info("Attempting to find TLS Cert using TLS Cert Template v2 lookup...")
if certificate in tlscert_dict['iam'][env]:
cert_name = tlscert_dict['iam'][env][certificate]
LOG.info("Found IAM TLS certificate named %s under %s using TLS Cert Template v2", certificate, env)
elif certificate in tlscert_dict['acm'][region][env]:
cert_name = tlscert_dict['acm'][region][env][certificate]
LOG.info("Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2", certificate, env,
region)
else:
LOG.error(
"Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template.",
certificate, env)
return cert_name
|
[
"def",
"generate_custom_cert_name",
"(",
"env",
"=",
"''",
",",
"region",
"=",
"''",
",",
"account",
"=",
"''",
",",
"certificate",
"=",
"None",
")",
":",
"cert_name",
"=",
"None",
"template_kwargs",
"=",
"{",
"'account'",
":",
"account",
",",
"'name'",
":",
"certificate",
"}",
"# TODO: Investigate moving this to a remote API, then fallback to local file if unable to connect",
"try",
":",
"rendered_template",
"=",
"get_template",
"(",
"template_file",
"=",
"'infrastructure/iam/tlscert_naming.json.j2'",
",",
"*",
"*",
"template_kwargs",
")",
"tlscert_dict",
"=",
"json",
".",
"loads",
"(",
"rendered_template",
")",
"except",
"ForemastTemplateNotFound",
":",
"LOG",
".",
"info",
"(",
"'Unable to find TLS Cert Template...falling back to default logic...'",
")",
"return",
"cert_name",
"# TODO: Move to v1 method for check",
"try",
":",
"LOG",
".",
"info",
"(",
"\"Attempting to find TLS Cert using TLS Cert Template v1 lookup...\"",
")",
"cert_name",
"=",
"tlscert_dict",
"[",
"env",
"]",
"[",
"certificate",
"]",
"LOG",
".",
"info",
"(",
"\"Found TLS certificate named %s under %s using TLS Cert Template v1\"",
",",
"certificate",
",",
"env",
")",
"except",
"KeyError",
":",
"LOG",
".",
"error",
"(",
"\"Unable to find TLS certificate named %s under %s using v1 TLS Cert Template.\"",
",",
"certificate",
",",
"env",
")",
"# TODO: Move variable to consts",
"# TODO: move to v2 method for check",
"tls_services",
"=",
"[",
"'iam'",
",",
"'acm'",
"]",
"if",
"cert_name",
"is",
"None",
"and",
"all",
"(",
"service",
"in",
"tlscert_dict",
"for",
"service",
"in",
"tls_services",
")",
":",
"LOG",
".",
"info",
"(",
"\"Attempting to find TLS Cert using TLS Cert Template v2 lookup...\"",
")",
"if",
"certificate",
"in",
"tlscert_dict",
"[",
"'iam'",
"]",
"[",
"env",
"]",
":",
"cert_name",
"=",
"tlscert_dict",
"[",
"'iam'",
"]",
"[",
"env",
"]",
"[",
"certificate",
"]",
"LOG",
".",
"info",
"(",
"\"Found IAM TLS certificate named %s under %s using TLS Cert Template v2\"",
",",
"certificate",
",",
"env",
")",
"elif",
"certificate",
"in",
"tlscert_dict",
"[",
"'acm'",
"]",
"[",
"region",
"]",
"[",
"env",
"]",
":",
"cert_name",
"=",
"tlscert_dict",
"[",
"'acm'",
"]",
"[",
"region",
"]",
"[",
"env",
"]",
"[",
"certificate",
"]",
"LOG",
".",
"info",
"(",
"\"Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2\"",
",",
"certificate",
",",
"env",
",",
"region",
")",
"else",
":",
"LOG",
".",
"error",
"(",
"\"Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template.\"",
",",
"certificate",
",",
"env",
")",
"return",
"cert_name"
] |
Generate a custom TLS Cert name based on a template.
Args:
env (str): Account environment name
region (str): AWS Region.
account (str): Account number for ARN.
certificate (str): Name of SSL certificate.
Returns:
str: Fully qualified ARN for SSL certificate.
None: Template doesn't exist.
|
[
"Generate",
"a",
"custom",
"TLS",
"Cert",
"name",
"based",
"on",
"a",
"template",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/elb/format_listeners.py#L164-L213
|
train
|
foremast/foremast
|
src/foremast/slacknotify/__main__.py
|
main
|
def main():
"""Send Slack notification to a configured channel."""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_env(parser)
add_properties(parser)
args = parser.parse_args()
logging.getLogger(__package__.split(".")[0]).setLevel(args.debug)
log.debug('Parsed arguements: %s', args)
if "prod" not in args.env:
log.info('No slack message sent, not a production environment')
else:
log.info("Sending slack message, production environment")
slacknotify = SlackNotification(app=args.app, env=args.env, prop_path=args.properties)
slacknotify.post_message()
|
python
|
def main():
"""Send Slack notification to a configured channel."""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_env(parser)
add_properties(parser)
args = parser.parse_args()
logging.getLogger(__package__.split(".")[0]).setLevel(args.debug)
log.debug('Parsed arguements: %s', args)
if "prod" not in args.env:
log.info('No slack message sent, not a production environment')
else:
log.info("Sending slack message, production environment")
slacknotify = SlackNotification(app=args.app, env=args.env, prop_path=args.properties)
slacknotify.post_message()
|
[
"def",
"main",
"(",
")",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"LOGGING_FORMAT",
")",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"add_debug",
"(",
"parser",
")",
"add_app",
"(",
"parser",
")",
"add_env",
"(",
"parser",
")",
"add_properties",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"logging",
".",
"getLogger",
"(",
"__package__",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
".",
"setLevel",
"(",
"args",
".",
"debug",
")",
"log",
".",
"debug",
"(",
"'Parsed arguements: %s'",
",",
"args",
")",
"if",
"\"prod\"",
"not",
"in",
"args",
".",
"env",
":",
"log",
".",
"info",
"(",
"'No slack message sent, not a production environment'",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Sending slack message, production environment\"",
")",
"slacknotify",
"=",
"SlackNotification",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"args",
".",
"env",
",",
"prop_path",
"=",
"args",
".",
"properties",
")",
"slacknotify",
".",
"post_message",
"(",
")"
] |
Send Slack notification to a configured channel.
|
[
"Send",
"Slack",
"notification",
"to",
"a",
"configured",
"channel",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/slacknotify/__main__.py#L28-L49
|
train
|
foremast/foremast
|
src/foremast/destroyer.py
|
main
|
def main(): # noqa
"""Attempt to fully destroy AWS Resources for a Spinnaker Application."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
add_app(parser)
args = parser.parse_args()
if args.debug == logging.DEBUG:
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
else:
LOG.setLevel(args.debug)
for env in ENVS:
for region in REGIONS:
LOG.info('DESTROY %s:%s', env, region)
try:
destroy_dns(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('DNS issue for %s in %s: %s', env, region, error)
try:
destroy_elb(app=args.app, env=env, region=region)
except SpinnakerError:
pass
try:
destroy_iam(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('IAM issue for %s in %s: %s', env, region, error)
try:
destroy_s3(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('S3 issue for %s in %s: %s', env, region, error)
try:
destroy_sg(app=args.app, env=env, region=region)
except SpinnakerError:
pass
LOG.info('Destroyed %s:%s', env, region)
LOG.info('Destruction complete.')
|
python
|
def main(): # noqa
"""Attempt to fully destroy AWS Resources for a Spinnaker Application."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
add_app(parser)
args = parser.parse_args()
if args.debug == logging.DEBUG:
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
else:
LOG.setLevel(args.debug)
for env in ENVS:
for region in REGIONS:
LOG.info('DESTROY %s:%s', env, region)
try:
destroy_dns(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('DNS issue for %s in %s: %s', env, region, error)
try:
destroy_elb(app=args.app, env=env, region=region)
except SpinnakerError:
pass
try:
destroy_iam(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('IAM issue for %s in %s: %s', env, region, error)
try:
destroy_s3(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('S3 issue for %s in %s: %s', env, region, error)
try:
destroy_sg(app=args.app, env=env, region=region)
except SpinnakerError:
pass
LOG.info('Destroyed %s:%s', env, region)
LOG.info('Destruction complete.')
|
[
"def",
"main",
"(",
")",
":",
"# noqa",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"LOGGING_FORMAT",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"main",
".",
"__doc__",
")",
"add_debug",
"(",
"parser",
")",
"add_app",
"(",
"parser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"debug",
"==",
"logging",
".",
"DEBUG",
":",
"logging",
".",
"getLogger",
"(",
"__package__",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
".",
"setLevel",
"(",
"args",
".",
"debug",
")",
"else",
":",
"LOG",
".",
"setLevel",
"(",
"args",
".",
"debug",
")",
"for",
"env",
"in",
"ENVS",
":",
"for",
"region",
"in",
"REGIONS",
":",
"LOG",
".",
"info",
"(",
"'DESTROY %s:%s'",
",",
"env",
",",
"region",
")",
"try",
":",
"destroy_dns",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"env",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"error",
":",
"LOG",
".",
"warning",
"(",
"'DNS issue for %s in %s: %s'",
",",
"env",
",",
"region",
",",
"error",
")",
"try",
":",
"destroy_elb",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"env",
",",
"region",
"=",
"region",
")",
"except",
"SpinnakerError",
":",
"pass",
"try",
":",
"destroy_iam",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"env",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"error",
":",
"LOG",
".",
"warning",
"(",
"'IAM issue for %s in %s: %s'",
",",
"env",
",",
"region",
",",
"error",
")",
"try",
":",
"destroy_s3",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"env",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"error",
":",
"LOG",
".",
"warning",
"(",
"'S3 issue for %s in %s: %s'",
",",
"env",
",",
"region",
",",
"error",
")",
"try",
":",
"destroy_sg",
"(",
"app",
"=",
"args",
".",
"app",
",",
"env",
"=",
"env",
",",
"region",
"=",
"region",
")",
"except",
"SpinnakerError",
":",
"pass",
"LOG",
".",
"info",
"(",
"'Destroyed %s:%s'",
",",
"env",
",",
"region",
")",
"LOG",
".",
"info",
"(",
"'Destruction complete.'",
")"
] |
Attempt to fully destroy AWS Resources for a Spinnaker Application.
|
[
"Attempt",
"to",
"fully",
"destroy",
"AWS",
"Resources",
"for",
"a",
"Spinnaker",
"Application",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/destroyer.py#L34-L79
|
train
|
foremast/foremast
|
src/foremast/pipeline/construct_pipeline_block.py
|
check_provider_healthcheck
|
def check_provider_healthcheck(settings, default_provider='Discovery'):
"""Set Provider Health Check when specified.
Returns:
collections.namedtuple: **ProviderHealthCheck** with attributes:
* providers (list): Providers set to use native Health Check.
* has_healthcheck (bool): If any native Health Checks requested.
"""
ProviderHealthCheck = collections.namedtuple('ProviderHealthCheck', ['providers', 'has_healthcheck'])
eureka_enabled = settings['app']['eureka_enabled']
providers = settings['asg']['provider_healthcheck']
LOG.debug('Template defined Health Check Providers: %s', providers)
health_check_providers = []
has_healthcheck = False
normalized_default_provider = default_provider.capitalize()
if eureka_enabled:
LOG.info('Eureka enabled, enabling default Provider Health Check: %s', normalized_default_provider)
for provider, active in providers.items():
if provider.lower() == normalized_default_provider.lower():
providers[provider] = True
LOG.debug('Override defined Provider Health Check: %s -> %s', active, providers[provider])
break
else:
LOG.debug('Adding default Provider Health Check: %s', normalized_default_provider)
providers[normalized_default_provider] = True
for provider, active in providers.items():
if active:
health_check_providers.append(provider.capitalize())
LOG.info('Provider healthchecks: %s', health_check_providers)
if health_check_providers:
has_healthcheck = True
return ProviderHealthCheck(providers=health_check_providers, has_healthcheck=has_healthcheck)
|
python
|
def check_provider_healthcheck(settings, default_provider='Discovery'):
"""Set Provider Health Check when specified.
Returns:
collections.namedtuple: **ProviderHealthCheck** with attributes:
* providers (list): Providers set to use native Health Check.
* has_healthcheck (bool): If any native Health Checks requested.
"""
ProviderHealthCheck = collections.namedtuple('ProviderHealthCheck', ['providers', 'has_healthcheck'])
eureka_enabled = settings['app']['eureka_enabled']
providers = settings['asg']['provider_healthcheck']
LOG.debug('Template defined Health Check Providers: %s', providers)
health_check_providers = []
has_healthcheck = False
normalized_default_provider = default_provider.capitalize()
if eureka_enabled:
LOG.info('Eureka enabled, enabling default Provider Health Check: %s', normalized_default_provider)
for provider, active in providers.items():
if provider.lower() == normalized_default_provider.lower():
providers[provider] = True
LOG.debug('Override defined Provider Health Check: %s -> %s', active, providers[provider])
break
else:
LOG.debug('Adding default Provider Health Check: %s', normalized_default_provider)
providers[normalized_default_provider] = True
for provider, active in providers.items():
if active:
health_check_providers.append(provider.capitalize())
LOG.info('Provider healthchecks: %s', health_check_providers)
if health_check_providers:
has_healthcheck = True
return ProviderHealthCheck(providers=health_check_providers, has_healthcheck=has_healthcheck)
|
[
"def",
"check_provider_healthcheck",
"(",
"settings",
",",
"default_provider",
"=",
"'Discovery'",
")",
":",
"ProviderHealthCheck",
"=",
"collections",
".",
"namedtuple",
"(",
"'ProviderHealthCheck'",
",",
"[",
"'providers'",
",",
"'has_healthcheck'",
"]",
")",
"eureka_enabled",
"=",
"settings",
"[",
"'app'",
"]",
"[",
"'eureka_enabled'",
"]",
"providers",
"=",
"settings",
"[",
"'asg'",
"]",
"[",
"'provider_healthcheck'",
"]",
"LOG",
".",
"debug",
"(",
"'Template defined Health Check Providers: %s'",
",",
"providers",
")",
"health_check_providers",
"=",
"[",
"]",
"has_healthcheck",
"=",
"False",
"normalized_default_provider",
"=",
"default_provider",
".",
"capitalize",
"(",
")",
"if",
"eureka_enabled",
":",
"LOG",
".",
"info",
"(",
"'Eureka enabled, enabling default Provider Health Check: %s'",
",",
"normalized_default_provider",
")",
"for",
"provider",
",",
"active",
"in",
"providers",
".",
"items",
"(",
")",
":",
"if",
"provider",
".",
"lower",
"(",
")",
"==",
"normalized_default_provider",
".",
"lower",
"(",
")",
":",
"providers",
"[",
"provider",
"]",
"=",
"True",
"LOG",
".",
"debug",
"(",
"'Override defined Provider Health Check: %s -> %s'",
",",
"active",
",",
"providers",
"[",
"provider",
"]",
")",
"break",
"else",
":",
"LOG",
".",
"debug",
"(",
"'Adding default Provider Health Check: %s'",
",",
"normalized_default_provider",
")",
"providers",
"[",
"normalized_default_provider",
"]",
"=",
"True",
"for",
"provider",
",",
"active",
"in",
"providers",
".",
"items",
"(",
")",
":",
"if",
"active",
":",
"health_check_providers",
".",
"append",
"(",
"provider",
".",
"capitalize",
"(",
")",
")",
"LOG",
".",
"info",
"(",
"'Provider healthchecks: %s'",
",",
"health_check_providers",
")",
"if",
"health_check_providers",
":",
"has_healthcheck",
"=",
"True",
"return",
"ProviderHealthCheck",
"(",
"providers",
"=",
"health_check_providers",
",",
"has_healthcheck",
"=",
"has_healthcheck",
")"
] |
Set Provider Health Check when specified.
Returns:
collections.namedtuple: **ProviderHealthCheck** with attributes:
* providers (list): Providers set to use native Health Check.
* has_healthcheck (bool): If any native Health Checks requested.
|
[
"Set",
"Provider",
"Health",
"Check",
"when",
"specified",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/construct_pipeline_block.py#L29-L70
|
train
|
foremast/foremast
|
src/foremast/pipeline/construct_pipeline_block.py
|
get_template_name
|
def get_template_name(env, pipeline_type):
"""Generates the correct template name based on pipeline type
Args:
env (str): environment to generate templates for
pipeline_type (str): Type of pipeline like ec2 or lambda
Returns:
str: Name of template
"""
pipeline_base = 'pipeline/pipeline'
template_name_format = '{pipeline_base}'
if env.startswith('prod'):
template_name_format = template_name_format + '_{env}'
else:
template_name_format = template_name_format + '_stages'
if pipeline_type != 'ec2':
template_name_format = template_name_format + '_{pipeline_type}'
template_name_format = template_name_format + '.json.j2'
template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)
return template_name
|
python
|
def get_template_name(env, pipeline_type):
"""Generates the correct template name based on pipeline type
Args:
env (str): environment to generate templates for
pipeline_type (str): Type of pipeline like ec2 or lambda
Returns:
str: Name of template
"""
pipeline_base = 'pipeline/pipeline'
template_name_format = '{pipeline_base}'
if env.startswith('prod'):
template_name_format = template_name_format + '_{env}'
else:
template_name_format = template_name_format + '_stages'
if pipeline_type != 'ec2':
template_name_format = template_name_format + '_{pipeline_type}'
template_name_format = template_name_format + '.json.j2'
template_name = template_name_format.format(pipeline_base=pipeline_base, env=env, pipeline_type=pipeline_type)
return template_name
|
[
"def",
"get_template_name",
"(",
"env",
",",
"pipeline_type",
")",
":",
"pipeline_base",
"=",
"'pipeline/pipeline'",
"template_name_format",
"=",
"'{pipeline_base}'",
"if",
"env",
".",
"startswith",
"(",
"'prod'",
")",
":",
"template_name_format",
"=",
"template_name_format",
"+",
"'_{env}'",
"else",
":",
"template_name_format",
"=",
"template_name_format",
"+",
"'_stages'",
"if",
"pipeline_type",
"!=",
"'ec2'",
":",
"template_name_format",
"=",
"template_name_format",
"+",
"'_{pipeline_type}'",
"template_name_format",
"=",
"template_name_format",
"+",
"'.json.j2'",
"template_name",
"=",
"template_name_format",
".",
"format",
"(",
"pipeline_base",
"=",
"pipeline_base",
",",
"env",
"=",
"env",
",",
"pipeline_type",
"=",
"pipeline_type",
")",
"return",
"template_name"
] |
Generates the correct template name based on pipeline type
Args:
env (str): environment to generate templates for
pipeline_type (str): Type of pipeline like ec2 or lambda
Returns:
str: Name of template
|
[
"Generates",
"the",
"correct",
"template",
"name",
"based",
"on",
"pipeline",
"type"
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/construct_pipeline_block.py#L73-L96
|
train
|
foremast/foremast
|
src/foremast/pipeline/construct_pipeline_block.py
|
ec2_pipeline_setup
|
def ec2_pipeline_setup(
generated=None,
project='',
settings=None,
env='',
pipeline_type='',
region='',
region_subnets=None,
):
"""Handles ec2 pipeline data setup
Args:
generated (gogoutils.Generator): Generated naming formats.
project (str): Group name of application
settings (dict): Environment settings from configurations.
env (str): Deploy environment name, e.g. dev, stage, prod.
pipeline_type (str): Type of Foremast Pipeline to configure.
region (str): AWS Region to deploy to.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Updated settings to pass to templates for EC2 info
"""
data = copy.deepcopy(settings)
user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=project,
pipeline_type=pipeline_type,
)
# Use different variable to keep template simple
instance_security_groups = sorted(DEFAULT_EC2_SECURITYGROUPS[env])
instance_security_groups.append(generated.security_group_app)
instance_security_groups.extend(settings['security_group']['instance_extras'])
instance_security_groups = remove_duplicate_sg(instance_security_groups)
LOG.info('Instance security groups to attach: %s', instance_security_groups)
# check if scaling policy exists
if settings['asg']['scaling_policy']:
scalingpolicy = True
LOG.info('Found scaling policy')
else:
scalingpolicy = False
LOG.info('No scaling policy found')
if settings['app']['eureka_enabled']:
elb = []
else:
elb = [generated.elb_app]
LOG.info('Attaching the following ELB: %s', elb)
health_checks = check_provider_healthcheck(settings)
# Use EC2 Health Check for DEV or Eureka enabled
if env == 'dev' or settings['app']['eureka_enabled']:
data['asg'].update({'hc_type': 'EC2'})
LOG.info('Switching health check type to: EC2')
# Aggregate the default grace period, plus the exposed app_grace_period
# to allow per repo extension of asg healthcheck grace period
hc_grace_period = data['asg'].get('hc_grace_period')
app_grace_period = data['asg'].get('app_grace_period')
grace_period = hc_grace_period + app_grace_period
# TODO: Migrate the naming logic to an external library to make it easier
# to update in the future. Gogo-Utils looks like a good candidate
ssh_keypair = data['asg'].get('ssh_keypair', None)
if not ssh_keypair:
ssh_keypair = '{0}_{1}_default'.format(env, region)
LOG.info('SSH keypair (%s) used', ssh_keypair)
if settings['app']['canary']:
canary_user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=project,
canary=True,
)
data['app'].update({
'canary_encoded_user_data': canary_user_data,
})
data['asg'].update({
'hc_type': data['asg'].get('hc_type').upper(),
'hc_grace_period': grace_period,
'ssh_keypair': ssh_keypair,
'provider_healthcheck': json.dumps(health_checks.providers),
'enable_public_ips': json.dumps(settings['asg']['enable_public_ips']),
'has_provider_healthcheck': health_checks.has_healthcheck,
'asg_whitelist': ASG_WHITELIST,
})
data['app'].update({
'az_dict': json.dumps(region_subnets),
'encoded_user_data': user_data,
'instance_security_groups': json.dumps(instance_security_groups),
'elb': json.dumps(elb),
'scalingpolicy': scalingpolicy,
})
return data
|
python
|
def ec2_pipeline_setup(
generated=None,
project='',
settings=None,
env='',
pipeline_type='',
region='',
region_subnets=None,
):
"""Handles ec2 pipeline data setup
Args:
generated (gogoutils.Generator): Generated naming formats.
project (str): Group name of application
settings (dict): Environment settings from configurations.
env (str): Deploy environment name, e.g. dev, stage, prod.
pipeline_type (str): Type of Foremast Pipeline to configure.
region (str): AWS Region to deploy to.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Updated settings to pass to templates for EC2 info
"""
data = copy.deepcopy(settings)
user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=project,
pipeline_type=pipeline_type,
)
# Use different variable to keep template simple
instance_security_groups = sorted(DEFAULT_EC2_SECURITYGROUPS[env])
instance_security_groups.append(generated.security_group_app)
instance_security_groups.extend(settings['security_group']['instance_extras'])
instance_security_groups = remove_duplicate_sg(instance_security_groups)
LOG.info('Instance security groups to attach: %s', instance_security_groups)
# check if scaling policy exists
if settings['asg']['scaling_policy']:
scalingpolicy = True
LOG.info('Found scaling policy')
else:
scalingpolicy = False
LOG.info('No scaling policy found')
if settings['app']['eureka_enabled']:
elb = []
else:
elb = [generated.elb_app]
LOG.info('Attaching the following ELB: %s', elb)
health_checks = check_provider_healthcheck(settings)
# Use EC2 Health Check for DEV or Eureka enabled
if env == 'dev' or settings['app']['eureka_enabled']:
data['asg'].update({'hc_type': 'EC2'})
LOG.info('Switching health check type to: EC2')
# Aggregate the default grace period, plus the exposed app_grace_period
# to allow per repo extension of asg healthcheck grace period
hc_grace_period = data['asg'].get('hc_grace_period')
app_grace_period = data['asg'].get('app_grace_period')
grace_period = hc_grace_period + app_grace_period
# TODO: Migrate the naming logic to an external library to make it easier
# to update in the future. Gogo-Utils looks like a good candidate
ssh_keypair = data['asg'].get('ssh_keypair', None)
if not ssh_keypair:
ssh_keypair = '{0}_{1}_default'.format(env, region)
LOG.info('SSH keypair (%s) used', ssh_keypair)
if settings['app']['canary']:
canary_user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=project,
canary=True,
)
data['app'].update({
'canary_encoded_user_data': canary_user_data,
})
data['asg'].update({
'hc_type': data['asg'].get('hc_type').upper(),
'hc_grace_period': grace_period,
'ssh_keypair': ssh_keypair,
'provider_healthcheck': json.dumps(health_checks.providers),
'enable_public_ips': json.dumps(settings['asg']['enable_public_ips']),
'has_provider_healthcheck': health_checks.has_healthcheck,
'asg_whitelist': ASG_WHITELIST,
})
data['app'].update({
'az_dict': json.dumps(region_subnets),
'encoded_user_data': user_data,
'instance_security_groups': json.dumps(instance_security_groups),
'elb': json.dumps(elb),
'scalingpolicy': scalingpolicy,
})
return data
|
[
"def",
"ec2_pipeline_setup",
"(",
"generated",
"=",
"None",
",",
"project",
"=",
"''",
",",
"settings",
"=",
"None",
",",
"env",
"=",
"''",
",",
"pipeline_type",
"=",
"''",
",",
"region",
"=",
"''",
",",
"region_subnets",
"=",
"None",
",",
")",
":",
"data",
"=",
"copy",
".",
"deepcopy",
"(",
"settings",
")",
"user_data",
"=",
"generate_encoded_user_data",
"(",
"env",
"=",
"env",
",",
"region",
"=",
"region",
",",
"generated",
"=",
"generated",
",",
"group_name",
"=",
"project",
",",
"pipeline_type",
"=",
"pipeline_type",
",",
")",
"# Use different variable to keep template simple",
"instance_security_groups",
"=",
"sorted",
"(",
"DEFAULT_EC2_SECURITYGROUPS",
"[",
"env",
"]",
")",
"instance_security_groups",
".",
"append",
"(",
"generated",
".",
"security_group_app",
")",
"instance_security_groups",
".",
"extend",
"(",
"settings",
"[",
"'security_group'",
"]",
"[",
"'instance_extras'",
"]",
")",
"instance_security_groups",
"=",
"remove_duplicate_sg",
"(",
"instance_security_groups",
")",
"LOG",
".",
"info",
"(",
"'Instance security groups to attach: %s'",
",",
"instance_security_groups",
")",
"# check if scaling policy exists",
"if",
"settings",
"[",
"'asg'",
"]",
"[",
"'scaling_policy'",
"]",
":",
"scalingpolicy",
"=",
"True",
"LOG",
".",
"info",
"(",
"'Found scaling policy'",
")",
"else",
":",
"scalingpolicy",
"=",
"False",
"LOG",
".",
"info",
"(",
"'No scaling policy found'",
")",
"if",
"settings",
"[",
"'app'",
"]",
"[",
"'eureka_enabled'",
"]",
":",
"elb",
"=",
"[",
"]",
"else",
":",
"elb",
"=",
"[",
"generated",
".",
"elb_app",
"]",
"LOG",
".",
"info",
"(",
"'Attaching the following ELB: %s'",
",",
"elb",
")",
"health_checks",
"=",
"check_provider_healthcheck",
"(",
"settings",
")",
"# Use EC2 Health Check for DEV or Eureka enabled",
"if",
"env",
"==",
"'dev'",
"or",
"settings",
"[",
"'app'",
"]",
"[",
"'eureka_enabled'",
"]",
":",
"data",
"[",
"'asg'",
"]",
".",
"update",
"(",
"{",
"'hc_type'",
":",
"'EC2'",
"}",
")",
"LOG",
".",
"info",
"(",
"'Switching health check type to: EC2'",
")",
"# Aggregate the default grace period, plus the exposed app_grace_period",
"# to allow per repo extension of asg healthcheck grace period",
"hc_grace_period",
"=",
"data",
"[",
"'asg'",
"]",
".",
"get",
"(",
"'hc_grace_period'",
")",
"app_grace_period",
"=",
"data",
"[",
"'asg'",
"]",
".",
"get",
"(",
"'app_grace_period'",
")",
"grace_period",
"=",
"hc_grace_period",
"+",
"app_grace_period",
"# TODO: Migrate the naming logic to an external library to make it easier",
"# to update in the future. Gogo-Utils looks like a good candidate",
"ssh_keypair",
"=",
"data",
"[",
"'asg'",
"]",
".",
"get",
"(",
"'ssh_keypair'",
",",
"None",
")",
"if",
"not",
"ssh_keypair",
":",
"ssh_keypair",
"=",
"'{0}_{1}_default'",
".",
"format",
"(",
"env",
",",
"region",
")",
"LOG",
".",
"info",
"(",
"'SSH keypair (%s) used'",
",",
"ssh_keypair",
")",
"if",
"settings",
"[",
"'app'",
"]",
"[",
"'canary'",
"]",
":",
"canary_user_data",
"=",
"generate_encoded_user_data",
"(",
"env",
"=",
"env",
",",
"region",
"=",
"region",
",",
"generated",
"=",
"generated",
",",
"group_name",
"=",
"project",
",",
"canary",
"=",
"True",
",",
")",
"data",
"[",
"'app'",
"]",
".",
"update",
"(",
"{",
"'canary_encoded_user_data'",
":",
"canary_user_data",
",",
"}",
")",
"data",
"[",
"'asg'",
"]",
".",
"update",
"(",
"{",
"'hc_type'",
":",
"data",
"[",
"'asg'",
"]",
".",
"get",
"(",
"'hc_type'",
")",
".",
"upper",
"(",
")",
",",
"'hc_grace_period'",
":",
"grace_period",
",",
"'ssh_keypair'",
":",
"ssh_keypair",
",",
"'provider_healthcheck'",
":",
"json",
".",
"dumps",
"(",
"health_checks",
".",
"providers",
")",
",",
"'enable_public_ips'",
":",
"json",
".",
"dumps",
"(",
"settings",
"[",
"'asg'",
"]",
"[",
"'enable_public_ips'",
"]",
")",
",",
"'has_provider_healthcheck'",
":",
"health_checks",
".",
"has_healthcheck",
",",
"'asg_whitelist'",
":",
"ASG_WHITELIST",
",",
"}",
")",
"data",
"[",
"'app'",
"]",
".",
"update",
"(",
"{",
"'az_dict'",
":",
"json",
".",
"dumps",
"(",
"region_subnets",
")",
",",
"'encoded_user_data'",
":",
"user_data",
",",
"'instance_security_groups'",
":",
"json",
".",
"dumps",
"(",
"instance_security_groups",
")",
",",
"'elb'",
":",
"json",
".",
"dumps",
"(",
"elb",
")",
",",
"'scalingpolicy'",
":",
"scalingpolicy",
",",
"}",
")",
"return",
"data"
] |
Handles ec2 pipeline data setup
Args:
generated (gogoutils.Generator): Generated naming formats.
project (str): Group name of application
settings (dict): Environment settings from configurations.
env (str): Deploy environment name, e.g. dev, stage, prod.
pipeline_type (str): Type of Foremast Pipeline to configure.
region (str): AWS Region to deploy to.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Updated settings to pass to templates for EC2 info
|
[
"Handles",
"ec2",
"pipeline",
"data",
"setup"
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/construct_pipeline_block.py#L169-L275
|
train
|
foremast/foremast
|
src/foremast/pipeline/create_pipeline_manual.py
|
SpinnakerPipelineManual.create_pipeline
|
def create_pipeline(self):
"""Use JSON files to create Pipelines."""
pipelines = self.settings['pipeline']['pipeline_files']
self.log.info('Uploading manual Pipelines: %s', pipelines)
lookup = FileLookup(git_short=self.generated.gitlab()['main'], runway_dir=self.runway_dir)
for json_file in pipelines:
json_dict = lookup.json(filename=json_file)
json_dict.setdefault('application', self.app_name)
json_dict.setdefault('name', normalize_pipeline_name(name=json_file))
json_dict.setdefault('id', get_pipeline_id(app=json_dict['application'], name=json_dict['name']))
self.post_pipeline(json_dict)
return True
|
python
|
def create_pipeline(self):
"""Use JSON files to create Pipelines."""
pipelines = self.settings['pipeline']['pipeline_files']
self.log.info('Uploading manual Pipelines: %s', pipelines)
lookup = FileLookup(git_short=self.generated.gitlab()['main'], runway_dir=self.runway_dir)
for json_file in pipelines:
json_dict = lookup.json(filename=json_file)
json_dict.setdefault('application', self.app_name)
json_dict.setdefault('name', normalize_pipeline_name(name=json_file))
json_dict.setdefault('id', get_pipeline_id(app=json_dict['application'], name=json_dict['name']))
self.post_pipeline(json_dict)
return True
|
[
"def",
"create_pipeline",
"(",
"self",
")",
":",
"pipelines",
"=",
"self",
".",
"settings",
"[",
"'pipeline'",
"]",
"[",
"'pipeline_files'",
"]",
"self",
".",
"log",
".",
"info",
"(",
"'Uploading manual Pipelines: %s'",
",",
"pipelines",
")",
"lookup",
"=",
"FileLookup",
"(",
"git_short",
"=",
"self",
".",
"generated",
".",
"gitlab",
"(",
")",
"[",
"'main'",
"]",
",",
"runway_dir",
"=",
"self",
".",
"runway_dir",
")",
"for",
"json_file",
"in",
"pipelines",
":",
"json_dict",
"=",
"lookup",
".",
"json",
"(",
"filename",
"=",
"json_file",
")",
"json_dict",
".",
"setdefault",
"(",
"'application'",
",",
"self",
".",
"app_name",
")",
"json_dict",
".",
"setdefault",
"(",
"'name'",
",",
"normalize_pipeline_name",
"(",
"name",
"=",
"json_file",
")",
")",
"json_dict",
".",
"setdefault",
"(",
"'id'",
",",
"get_pipeline_id",
"(",
"app",
"=",
"json_dict",
"[",
"'application'",
"]",
",",
"name",
"=",
"json_dict",
"[",
"'name'",
"]",
")",
")",
"self",
".",
"post_pipeline",
"(",
"json_dict",
")",
"return",
"True"
] |
Use JSON files to create Pipelines.
|
[
"Use",
"JSON",
"files",
"to",
"create",
"Pipelines",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/create_pipeline_manual.py#L25-L42
|
train
|
foremast/foremast
|
src/foremast/pipeline/__main__.py
|
main
|
def main():
"""Creates a pipeline in Spinnaker"""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_properties(parser)
parser.add_argument('-b', '--base', help='Base AMI name to use, e.g. fedora, tomcat')
parser.add_argument("--triggerjob", help="The jenkins job to monitor for pipeline triggering", required=True)
parser.add_argument('--onetime', required=False, choices=ENVS, help='Onetime deployment environment')
parser.add_argument(
'-t', '--type', dest='type', required=False, default='ec2', help='Deployment type, e.g. ec2, lambda')
args = parser.parse_args()
if args.base and '"' in args.base:
args.base = args.base.strip('"')
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
log.debug('Parsed arguments: %s', args)
if args.onetime:
spinnakerapps = SpinnakerPipelineOnetime(
app=args.app, onetime=args.onetime, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
else:
if args.type == "ec2":
spinnakerapps = SpinnakerPipeline(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
elif args.type == "lambda":
spinnakerapps = SpinnakerPipelineLambda(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
elif args.type == "s3":
spinnakerapps = SpinnakerPipelineS3(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
|
python
|
def main():
"""Creates a pipeline in Spinnaker"""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_properties(parser)
parser.add_argument('-b', '--base', help='Base AMI name to use, e.g. fedora, tomcat')
parser.add_argument("--triggerjob", help="The jenkins job to monitor for pipeline triggering", required=True)
parser.add_argument('--onetime', required=False, choices=ENVS, help='Onetime deployment environment')
parser.add_argument(
'-t', '--type', dest='type', required=False, default='ec2', help='Deployment type, e.g. ec2, lambda')
args = parser.parse_args()
if args.base and '"' in args.base:
args.base = args.base.strip('"')
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
log.debug('Parsed arguments: %s', args)
if args.onetime:
spinnakerapps = SpinnakerPipelineOnetime(
app=args.app, onetime=args.onetime, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
else:
if args.type == "ec2":
spinnakerapps = SpinnakerPipeline(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
elif args.type == "lambda":
spinnakerapps = SpinnakerPipelineLambda(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
elif args.type == "s3":
spinnakerapps = SpinnakerPipelineS3(
app=args.app, trigger_job=args.triggerjob, prop_path=args.properties, base=args.base)
spinnakerapps.create_pipeline()
|
[
"def",
"main",
"(",
")",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"LOGGING_FORMAT",
")",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"add_debug",
"(",
"parser",
")",
"add_app",
"(",
"parser",
")",
"add_properties",
"(",
"parser",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"'--base'",
",",
"help",
"=",
"'Base AMI name to use, e.g. fedora, tomcat'",
")",
"parser",
".",
"add_argument",
"(",
"\"--triggerjob\"",
",",
"help",
"=",
"\"The jenkins job to monitor for pipeline triggering\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'--onetime'",
",",
"required",
"=",
"False",
",",
"choices",
"=",
"ENVS",
",",
"help",
"=",
"'Onetime deployment environment'",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--type'",
",",
"dest",
"=",
"'type'",
",",
"required",
"=",
"False",
",",
"default",
"=",
"'ec2'",
",",
"help",
"=",
"'Deployment type, e.g. ec2, lambda'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"base",
"and",
"'\"'",
"in",
"args",
".",
"base",
":",
"args",
".",
"base",
"=",
"args",
".",
"base",
".",
"strip",
"(",
"'\"'",
")",
"logging",
".",
"getLogger",
"(",
"__package__",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
".",
"setLevel",
"(",
"args",
".",
"debug",
")",
"log",
".",
"debug",
"(",
"'Parsed arguments: %s'",
",",
"args",
")",
"if",
"args",
".",
"onetime",
":",
"spinnakerapps",
"=",
"SpinnakerPipelineOnetime",
"(",
"app",
"=",
"args",
".",
"app",
",",
"onetime",
"=",
"args",
".",
"onetime",
",",
"trigger_job",
"=",
"args",
".",
"triggerjob",
",",
"prop_path",
"=",
"args",
".",
"properties",
",",
"base",
"=",
"args",
".",
"base",
")",
"spinnakerapps",
".",
"create_pipeline",
"(",
")",
"else",
":",
"if",
"args",
".",
"type",
"==",
"\"ec2\"",
":",
"spinnakerapps",
"=",
"SpinnakerPipeline",
"(",
"app",
"=",
"args",
".",
"app",
",",
"trigger_job",
"=",
"args",
".",
"triggerjob",
",",
"prop_path",
"=",
"args",
".",
"properties",
",",
"base",
"=",
"args",
".",
"base",
")",
"spinnakerapps",
".",
"create_pipeline",
"(",
")",
"elif",
"args",
".",
"type",
"==",
"\"lambda\"",
":",
"spinnakerapps",
"=",
"SpinnakerPipelineLambda",
"(",
"app",
"=",
"args",
".",
"app",
",",
"trigger_job",
"=",
"args",
".",
"triggerjob",
",",
"prop_path",
"=",
"args",
".",
"properties",
",",
"base",
"=",
"args",
".",
"base",
")",
"spinnakerapps",
".",
"create_pipeline",
"(",
")",
"elif",
"args",
".",
"type",
"==",
"\"s3\"",
":",
"spinnakerapps",
"=",
"SpinnakerPipelineS3",
"(",
"app",
"=",
"args",
".",
"app",
",",
"trigger_job",
"=",
"args",
".",
"triggerjob",
",",
"prop_path",
"=",
"args",
".",
"properties",
",",
"base",
"=",
"args",
".",
"base",
")",
"spinnakerapps",
".",
"create_pipeline",
"(",
")"
] |
Creates a pipeline in Spinnaker
|
[
"Creates",
"a",
"pipeline",
"in",
"Spinnaker"
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/pipeline/__main__.py#L31-L71
|
train
|
foremast/foremast
|
src/foremast/configs/outputs.py
|
convert_ini
|
def convert_ini(config_dict):
"""Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
"""
config_lines = []
for env, configs in sorted(config_dict.items()):
for resource, app_properties in sorted(configs.items()):
try:
for app_property, value in sorted(app_properties.items()):
variable = '{env}_{resource}_{app_property}'.format(
env=env, resource=resource, app_property=app_property).upper()
if isinstance(value, (dict, DeepChainMap)):
safe_value = "'{0}'".format(json.dumps(dict(value)))
else:
safe_value = json.dumps(value)
line = "{variable}={value}".format(variable=variable, value=safe_value)
LOG.debug('INI line: %s', line)
config_lines.append(line)
except AttributeError:
resource = resource.upper()
app_properties = "'{}'".format(json.dumps(app_properties))
line = '{0}={1}'.format(resource, app_properties)
LOG.debug('INI line: %s', line)
config_lines.append(line)
return config_lines
|
python
|
def convert_ini(config_dict):
"""Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
"""
config_lines = []
for env, configs in sorted(config_dict.items()):
for resource, app_properties in sorted(configs.items()):
try:
for app_property, value in sorted(app_properties.items()):
variable = '{env}_{resource}_{app_property}'.format(
env=env, resource=resource, app_property=app_property).upper()
if isinstance(value, (dict, DeepChainMap)):
safe_value = "'{0}'".format(json.dumps(dict(value)))
else:
safe_value = json.dumps(value)
line = "{variable}={value}".format(variable=variable, value=safe_value)
LOG.debug('INI line: %s', line)
config_lines.append(line)
except AttributeError:
resource = resource.upper()
app_properties = "'{}'".format(json.dumps(app_properties))
line = '{0}={1}'.format(resource, app_properties)
LOG.debug('INI line: %s', line)
config_lines.append(line)
return config_lines
|
[
"def",
"convert_ini",
"(",
"config_dict",
")",
":",
"config_lines",
"=",
"[",
"]",
"for",
"env",
",",
"configs",
"in",
"sorted",
"(",
"config_dict",
".",
"items",
"(",
")",
")",
":",
"for",
"resource",
",",
"app_properties",
"in",
"sorted",
"(",
"configs",
".",
"items",
"(",
")",
")",
":",
"try",
":",
"for",
"app_property",
",",
"value",
"in",
"sorted",
"(",
"app_properties",
".",
"items",
"(",
")",
")",
":",
"variable",
"=",
"'{env}_{resource}_{app_property}'",
".",
"format",
"(",
"env",
"=",
"env",
",",
"resource",
"=",
"resource",
",",
"app_property",
"=",
"app_property",
")",
".",
"upper",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"dict",
",",
"DeepChainMap",
")",
")",
":",
"safe_value",
"=",
"\"'{0}'\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"dict",
"(",
"value",
")",
")",
")",
"else",
":",
"safe_value",
"=",
"json",
".",
"dumps",
"(",
"value",
")",
"line",
"=",
"\"{variable}={value}\"",
".",
"format",
"(",
"variable",
"=",
"variable",
",",
"value",
"=",
"safe_value",
")",
"LOG",
".",
"debug",
"(",
"'INI line: %s'",
",",
"line",
")",
"config_lines",
".",
"append",
"(",
"line",
")",
"except",
"AttributeError",
":",
"resource",
"=",
"resource",
".",
"upper",
"(",
")",
"app_properties",
"=",
"\"'{}'\"",
".",
"format",
"(",
"json",
".",
"dumps",
"(",
"app_properties",
")",
")",
"line",
"=",
"'{0}={1}'",
".",
"format",
"(",
"resource",
",",
"app_properties",
")",
"LOG",
".",
"debug",
"(",
"'INI line: %s'",
",",
"line",
")",
"config_lines",
".",
"append",
"(",
"line",
")",
"return",
"config_lines"
] |
Convert _config_dict_ into a list of INI formatted strings.
Args:
config_dict (dict): Configuration dictionary to be flattened.
Returns:
(list) Lines to be written to a file in the format of KEY1_KEY2=value.
|
[
"Convert",
"_config_dict_",
"into",
"a",
"list",
"of",
"INI",
"formatted",
"strings",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/configs/outputs.py#L29-L63
|
train
|
foremast/foremast
|
src/foremast/configs/outputs.py
|
write_variables
|
def write_variables(app_configs=None, out_file='', git_short=''):
"""Append _application.json_ configs to _out_file_, .exports, and .json.
Variables are written in INI style, e.g. UPPER_CASE=value. The .exports file
contains 'export' prepended to each line for easy sourcing. The .json file
is a minified representation of the combined configurations.
Args:
app_configs (dict): Environment configurations from _application.json_
files, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.
out_file (str): Name of INI file to append to.
git_short (str): Short name of Git repository, e.g. forrest/core.
Returns:
dict: Configuration equivalent to the JSON output.
"""
generated = gogoutils.Generator(*gogoutils.Parser(git_short).parse_url(), formats=APP_FORMATS)
json_configs = {}
for env, configs in app_configs.items():
if env != 'pipeline':
instance_profile = generated.iam()['profile']
rendered_configs = json.loads(
get_template(
'configs/configs.json.j2',
env=env,
app=generated.app_name(),
profile=instance_profile,
formats=generated))
json_configs[env] = dict(DeepChainMap(configs, rendered_configs))
region_list = configs.get('regions', rendered_configs['regions'])
json_configs[env]['regions'] = region_list # removes regions defined in templates but not configs.
for region in region_list:
region_config = json_configs[env][region]
json_configs[env][region] = dict(DeepChainMap(region_config, rendered_configs))
else:
default_pipeline_json = json.loads(get_template('configs/pipeline.json.j2', formats=generated))
json_configs['pipeline'] = dict(DeepChainMap(configs, default_pipeline_json))
LOG.debug('Compiled configs:\n%s', pformat(json_configs))
config_lines = convert_ini(json_configs)
with open(out_file, 'at') as jenkins_vars:
LOG.info('Appending variables to %s.', out_file)
jenkins_vars.write('\n'.join(config_lines))
with open(out_file + '.exports', 'wt') as export_vars:
LOG.info('Writing sourceable variables to %s.', export_vars.name)
export_vars.write('\n'.join('export {0}'.format(line) for line in config_lines))
with open(out_file + '.json', 'wt') as json_handle:
LOG.info('Writing JSON to %s.', json_handle.name)
LOG.debug('Total JSON dict:\n%s', json_configs)
json.dump(json_configs, json_handle)
return json_configs
|
python
|
def write_variables(app_configs=None, out_file='', git_short=''):
"""Append _application.json_ configs to _out_file_, .exports, and .json.
Variables are written in INI style, e.g. UPPER_CASE=value. The .exports file
contains 'export' prepended to each line for easy sourcing. The .json file
is a minified representation of the combined configurations.
Args:
app_configs (dict): Environment configurations from _application.json_
files, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.
out_file (str): Name of INI file to append to.
git_short (str): Short name of Git repository, e.g. forrest/core.
Returns:
dict: Configuration equivalent to the JSON output.
"""
generated = gogoutils.Generator(*gogoutils.Parser(git_short).parse_url(), formats=APP_FORMATS)
json_configs = {}
for env, configs in app_configs.items():
if env != 'pipeline':
instance_profile = generated.iam()['profile']
rendered_configs = json.loads(
get_template(
'configs/configs.json.j2',
env=env,
app=generated.app_name(),
profile=instance_profile,
formats=generated))
json_configs[env] = dict(DeepChainMap(configs, rendered_configs))
region_list = configs.get('regions', rendered_configs['regions'])
json_configs[env]['regions'] = region_list # removes regions defined in templates but not configs.
for region in region_list:
region_config = json_configs[env][region]
json_configs[env][region] = dict(DeepChainMap(region_config, rendered_configs))
else:
default_pipeline_json = json.loads(get_template('configs/pipeline.json.j2', formats=generated))
json_configs['pipeline'] = dict(DeepChainMap(configs, default_pipeline_json))
LOG.debug('Compiled configs:\n%s', pformat(json_configs))
config_lines = convert_ini(json_configs)
with open(out_file, 'at') as jenkins_vars:
LOG.info('Appending variables to %s.', out_file)
jenkins_vars.write('\n'.join(config_lines))
with open(out_file + '.exports', 'wt') as export_vars:
LOG.info('Writing sourceable variables to %s.', export_vars.name)
export_vars.write('\n'.join('export {0}'.format(line) for line in config_lines))
with open(out_file + '.json', 'wt') as json_handle:
LOG.info('Writing JSON to %s.', json_handle.name)
LOG.debug('Total JSON dict:\n%s', json_configs)
json.dump(json_configs, json_handle)
return json_configs
|
[
"def",
"write_variables",
"(",
"app_configs",
"=",
"None",
",",
"out_file",
"=",
"''",
",",
"git_short",
"=",
"''",
")",
":",
"generated",
"=",
"gogoutils",
".",
"Generator",
"(",
"*",
"gogoutils",
".",
"Parser",
"(",
"git_short",
")",
".",
"parse_url",
"(",
")",
",",
"formats",
"=",
"APP_FORMATS",
")",
"json_configs",
"=",
"{",
"}",
"for",
"env",
",",
"configs",
"in",
"app_configs",
".",
"items",
"(",
")",
":",
"if",
"env",
"!=",
"'pipeline'",
":",
"instance_profile",
"=",
"generated",
".",
"iam",
"(",
")",
"[",
"'profile'",
"]",
"rendered_configs",
"=",
"json",
".",
"loads",
"(",
"get_template",
"(",
"'configs/configs.json.j2'",
",",
"env",
"=",
"env",
",",
"app",
"=",
"generated",
".",
"app_name",
"(",
")",
",",
"profile",
"=",
"instance_profile",
",",
"formats",
"=",
"generated",
")",
")",
"json_configs",
"[",
"env",
"]",
"=",
"dict",
"(",
"DeepChainMap",
"(",
"configs",
",",
"rendered_configs",
")",
")",
"region_list",
"=",
"configs",
".",
"get",
"(",
"'regions'",
",",
"rendered_configs",
"[",
"'regions'",
"]",
")",
"json_configs",
"[",
"env",
"]",
"[",
"'regions'",
"]",
"=",
"region_list",
"# removes regions defined in templates but not configs.",
"for",
"region",
"in",
"region_list",
":",
"region_config",
"=",
"json_configs",
"[",
"env",
"]",
"[",
"region",
"]",
"json_configs",
"[",
"env",
"]",
"[",
"region",
"]",
"=",
"dict",
"(",
"DeepChainMap",
"(",
"region_config",
",",
"rendered_configs",
")",
")",
"else",
":",
"default_pipeline_json",
"=",
"json",
".",
"loads",
"(",
"get_template",
"(",
"'configs/pipeline.json.j2'",
",",
"formats",
"=",
"generated",
")",
")",
"json_configs",
"[",
"'pipeline'",
"]",
"=",
"dict",
"(",
"DeepChainMap",
"(",
"configs",
",",
"default_pipeline_json",
")",
")",
"LOG",
".",
"debug",
"(",
"'Compiled configs:\\n%s'",
",",
"pformat",
"(",
"json_configs",
")",
")",
"config_lines",
"=",
"convert_ini",
"(",
"json_configs",
")",
"with",
"open",
"(",
"out_file",
",",
"'at'",
")",
"as",
"jenkins_vars",
":",
"LOG",
".",
"info",
"(",
"'Appending variables to %s.'",
",",
"out_file",
")",
"jenkins_vars",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"config_lines",
")",
")",
"with",
"open",
"(",
"out_file",
"+",
"'.exports'",
",",
"'wt'",
")",
"as",
"export_vars",
":",
"LOG",
".",
"info",
"(",
"'Writing sourceable variables to %s.'",
",",
"export_vars",
".",
"name",
")",
"export_vars",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"'export {0}'",
".",
"format",
"(",
"line",
")",
"for",
"line",
"in",
"config_lines",
")",
")",
"with",
"open",
"(",
"out_file",
"+",
"'.json'",
",",
"'wt'",
")",
"as",
"json_handle",
":",
"LOG",
".",
"info",
"(",
"'Writing JSON to %s.'",
",",
"json_handle",
".",
"name",
")",
"LOG",
".",
"debug",
"(",
"'Total JSON dict:\\n%s'",
",",
"json_configs",
")",
"json",
".",
"dump",
"(",
"json_configs",
",",
"json_handle",
")",
"return",
"json_configs"
] |
Append _application.json_ configs to _out_file_, .exports, and .json.
Variables are written in INI style, e.g. UPPER_CASE=value. The .exports file
contains 'export' prepended to each line for easy sourcing. The .json file
is a minified representation of the combined configurations.
Args:
app_configs (dict): Environment configurations from _application.json_
files, e.g. {'dev': {'elb': {'subnet_purpose': 'internal'}}}.
out_file (str): Name of INI file to append to.
git_short (str): Short name of Git repository, e.g. forrest/core.
Returns:
dict: Configuration equivalent to the JSON output.
|
[
"Append",
"_application",
".",
"json_",
"configs",
"to",
"_out_file_",
".",
"exports",
"and",
".",
"json",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/configs/outputs.py#L66-L122
|
train
|
foremast/foremast
|
src/foremast/utils/get_sns_subscriptions.py
|
get_sns_subscriptions
|
def get_sns_subscriptions(app_name, env, region):
"""List SNS lambda subscriptions.
Returns:
list: List of Lambda subscribed SNS ARNs.
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
lambda_subscriptions = []
subscriptions = sns_client.list_subscriptions()
for subscription in subscriptions['Subscriptions']:
if subscription['Protocol'] == "lambda" and subscription['Endpoint'] == lambda_alias_arn:
lambda_subscriptions.append(subscription['SubscriptionArn'])
if not lambda_subscriptions:
LOG.debug('SNS subscription for function %s not found', lambda_alias_arn)
return lambda_subscriptions
|
python
|
def get_sns_subscriptions(app_name, env, region):
"""List SNS lambda subscriptions.
Returns:
list: List of Lambda subscribed SNS ARNs.
"""
session = boto3.Session(profile_name=env, region_name=region)
sns_client = session.client('sns')
lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region)
lambda_subscriptions = []
subscriptions = sns_client.list_subscriptions()
for subscription in subscriptions['Subscriptions']:
if subscription['Protocol'] == "lambda" and subscription['Endpoint'] == lambda_alias_arn:
lambda_subscriptions.append(subscription['SubscriptionArn'])
if not lambda_subscriptions:
LOG.debug('SNS subscription for function %s not found', lambda_alias_arn)
return lambda_subscriptions
|
[
"def",
"get_sns_subscriptions",
"(",
"app_name",
",",
"env",
",",
"region",
")",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"env",
",",
"region_name",
"=",
"region",
")",
"sns_client",
"=",
"session",
".",
"client",
"(",
"'sns'",
")",
"lambda_alias_arn",
"=",
"get_lambda_alias_arn",
"(",
"app",
"=",
"app_name",
",",
"account",
"=",
"env",
",",
"region",
"=",
"region",
")",
"lambda_subscriptions",
"=",
"[",
"]",
"subscriptions",
"=",
"sns_client",
".",
"list_subscriptions",
"(",
")",
"for",
"subscription",
"in",
"subscriptions",
"[",
"'Subscriptions'",
"]",
":",
"if",
"subscription",
"[",
"'Protocol'",
"]",
"==",
"\"lambda\"",
"and",
"subscription",
"[",
"'Endpoint'",
"]",
"==",
"lambda_alias_arn",
":",
"lambda_subscriptions",
".",
"append",
"(",
"subscription",
"[",
"'SubscriptionArn'",
"]",
")",
"if",
"not",
"lambda_subscriptions",
":",
"LOG",
".",
"debug",
"(",
"'SNS subscription for function %s not found'",
",",
"lambda_alias_arn",
")",
"return",
"lambda_subscriptions"
] |
List SNS lambda subscriptions.
Returns:
list: List of Lambda subscribed SNS ARNs.
|
[
"List",
"SNS",
"lambda",
"subscriptions",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/utils/get_sns_subscriptions.py#L11-L33
|
train
|
foremast/foremast
|
src/foremast/awslambda/cloudwatch_log_event/destroy_cloudwatch_log_event/destroy_cloudwatch_log_event.py
|
destroy_cloudwatch_log_event
|
def destroy_cloudwatch_log_event(app='', env='dev', region=''):
"""Destroy Cloudwatch log event.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
# FIXME: see below
# TODO: Log group name is required, where do we get it if it is not in application-master-env.json?
cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app)
return True
|
python
|
def destroy_cloudwatch_log_event(app='', env='dev', region=''):
"""Destroy Cloudwatch log event.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
"""
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('logs')
# FIXME: see below
# TODO: Log group name is required, where do we get it if it is not in application-master-env.json?
cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app)
return True
|
[
"def",
"destroy_cloudwatch_log_event",
"(",
"app",
"=",
"''",
",",
"env",
"=",
"'dev'",
",",
"region",
"=",
"''",
")",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"env",
",",
"region_name",
"=",
"region",
")",
"cloudwatch_client",
"=",
"session",
".",
"client",
"(",
"'logs'",
")",
"# FIXME: see below",
"# TODO: Log group name is required, where do we get it if it is not in application-master-env.json?",
"cloudwatch_client",
".",
"delete_subscription_filter",
"(",
"logGroupName",
"=",
"'/aws/lambda/awslimitchecker'",
",",
"filterName",
"=",
"app",
")",
"return",
"True"
] |
Destroy Cloudwatch log event.
Args:
app (str): Spinnaker Application name.
env (str): Deployment environment.
region (str): AWS region.
Returns:
bool: True upon successful completion.
|
[
"Destroy",
"Cloudwatch",
"log",
"event",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/cloudwatch_log_event/destroy_cloudwatch_log_event/destroy_cloudwatch_log_event.py#L24-L42
|
train
|
foremast/foremast
|
src/foremast/app/create_app.py
|
SpinnakerApp.get_accounts
|
def get_accounts(self, provider='aws'):
"""Get Accounts added to Spinnaker.
Args:
provider (str): What provider to find accounts for.
Returns:
list: list of dicts of Spinnaker credentials matching _provider_.
Raises:
AssertionError: Failure getting accounts from Spinnaker.
"""
url = '{gate}/credentials'.format(gate=API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Failed to get accounts: {0}'.format(response.text)
all_accounts = response.json()
self.log.debug('Accounts in Spinnaker:\n%s', all_accounts)
filtered_accounts = []
for account in all_accounts:
if account['type'] == provider:
filtered_accounts.append(account)
if not filtered_accounts:
raise ForemastError('No Accounts matching {0}.'.format(provider))
return filtered_accounts
|
python
|
def get_accounts(self, provider='aws'):
"""Get Accounts added to Spinnaker.
Args:
provider (str): What provider to find accounts for.
Returns:
list: list of dicts of Spinnaker credentials matching _provider_.
Raises:
AssertionError: Failure getting accounts from Spinnaker.
"""
url = '{gate}/credentials'.format(gate=API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Failed to get accounts: {0}'.format(response.text)
all_accounts = response.json()
self.log.debug('Accounts in Spinnaker:\n%s', all_accounts)
filtered_accounts = []
for account in all_accounts:
if account['type'] == provider:
filtered_accounts.append(account)
if not filtered_accounts:
raise ForemastError('No Accounts matching {0}.'.format(provider))
return filtered_accounts
|
[
"def",
"get_accounts",
"(",
"self",
",",
"provider",
"=",
"'aws'",
")",
":",
"url",
"=",
"'{gate}/credentials'",
".",
"format",
"(",
"gate",
"=",
"API_URL",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"verify",
"=",
"GATE_CA_BUNDLE",
",",
"cert",
"=",
"GATE_CLIENT_CERT",
")",
"assert",
"response",
".",
"ok",
",",
"'Failed to get accounts: {0}'",
".",
"format",
"(",
"response",
".",
"text",
")",
"all_accounts",
"=",
"response",
".",
"json",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Accounts in Spinnaker:\\n%s'",
",",
"all_accounts",
")",
"filtered_accounts",
"=",
"[",
"]",
"for",
"account",
"in",
"all_accounts",
":",
"if",
"account",
"[",
"'type'",
"]",
"==",
"provider",
":",
"filtered_accounts",
".",
"append",
"(",
"account",
")",
"if",
"not",
"filtered_accounts",
":",
"raise",
"ForemastError",
"(",
"'No Accounts matching {0}.'",
".",
"format",
"(",
"provider",
")",
")",
"return",
"filtered_accounts"
] |
Get Accounts added to Spinnaker.
Args:
provider (str): What provider to find accounts for.
Returns:
list: list of dicts of Spinnaker credentials matching _provider_.
Raises:
AssertionError: Failure getting accounts from Spinnaker.
|
[
"Get",
"Accounts",
"added",
"to",
"Spinnaker",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/app/create_app.py#L55-L82
|
train
|
foremast/foremast
|
src/foremast/app/create_app.py
|
SpinnakerApp.create_app
|
def create_app(self):
"""Send a POST to spinnaker to create a new application with class variables.
Raises:
AssertionError: Application creation failed.
"""
self.appinfo['accounts'] = self.get_accounts()
self.log.debug('Pipeline Config\n%s', pformat(self.pipeline_config))
self.log.debug('App info:\n%s', pformat(self.appinfo))
jsondata = self.retrieve_template()
wait_for_task(jsondata)
self.log.info("Successfully created %s application", self.appname)
return jsondata
|
python
|
def create_app(self):
"""Send a POST to spinnaker to create a new application with class variables.
Raises:
AssertionError: Application creation failed.
"""
self.appinfo['accounts'] = self.get_accounts()
self.log.debug('Pipeline Config\n%s', pformat(self.pipeline_config))
self.log.debug('App info:\n%s', pformat(self.appinfo))
jsondata = self.retrieve_template()
wait_for_task(jsondata)
self.log.info("Successfully created %s application", self.appname)
return jsondata
|
[
"def",
"create_app",
"(",
"self",
")",
":",
"self",
".",
"appinfo",
"[",
"'accounts'",
"]",
"=",
"self",
".",
"get_accounts",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Pipeline Config\\n%s'",
",",
"pformat",
"(",
"self",
".",
"pipeline_config",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'App info:\\n%s'",
",",
"pformat",
"(",
"self",
".",
"appinfo",
")",
")",
"jsondata",
"=",
"self",
".",
"retrieve_template",
"(",
")",
"wait_for_task",
"(",
"jsondata",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Successfully created %s application\"",
",",
"self",
".",
"appname",
")",
"return",
"jsondata"
] |
Send a POST to spinnaker to create a new application with class variables.
Raises:
AssertionError: Application creation failed.
|
[
"Send",
"a",
"POST",
"to",
"spinnaker",
"to",
"create",
"a",
"new",
"application",
"with",
"class",
"variables",
"."
] |
fb70f29b8ce532f061685a17d120486e47b215ba
|
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/app/create_app.py#L84-L97
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.