repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
ahawker/ulid
|
ulid/base32.py
|
decode_randomness
|
python
|
def decode_randomness(randomness: str) -> bytes:
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
|
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L305-L337
|
[
"def str_to_bytes(value: str, expected_length: int) -> bytes:\n \"\"\"\n Convert the given string to bytes and validate it is within the Base32 character set.\n\n :param value: String to convert to bytes\n :type value: :class:`~str`\n :param expected_length: Expected length of the input string\n :type expected_length: :class:`~int`\n :return: Value converted to bytes.\n :rtype: :class:`~bytes`\n \"\"\"\n length = len(value)\n if length != expected_length:\n raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))\n\n try:\n encoded = value.encode('ascii')\n except UnicodeEncodeError as ex:\n raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))\n\n decoding = DECODING\n\n # Confirm all bytes are valid Base32 decode characters.\n # Note: ASCII encoding handles the out of range checking for us.\n for byte in encoded:\n if decoding[byte] > 31:\n raise ValueError('Non-base32 character found: \"{}\"'.format(chr(byte)))\n\n return encoded\n"
] |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def str_to_bytes(value: str, expected_length: int) -> bytes:
"""
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
"""
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
ahawker/ulid
|
ulid/base32.py
|
str_to_bytes
|
python
|
def str_to_bytes(value: str, expected_length: int) -> bytes:
length = len(value)
if length != expected_length:
raise ValueError('Expects {} characters for decoding; got {}'.format(expected_length, length))
try:
encoded = value.encode('ascii')
except UnicodeEncodeError as ex:
raise ValueError('Expects value that can be encoded in ASCII charset: {}'.format(ex))
decoding = DECODING
# Confirm all bytes are valid Base32 decode characters.
# Note: ASCII encoding handles the out of range checking for us.
for byte in encoded:
if decoding[byte] > 31:
raise ValueError('Non-base32 character found: "{}"'.format(chr(byte)))
return encoded
|
Convert the given string to bytes and validate it is within the Base32 character set.
:param value: String to convert to bytes
:type value: :class:`~str`
:param expected_length: Expected length of the input string
:type expected_length: :class:`~int`
:return: Value converted to bytes.
:rtype: :class:`~bytes`
|
train
|
https://github.com/ahawker/ulid/blob/f6459bafebbd1a1ffd71a8718bd5592c2e4dd59f/ulid/base32.py#L340-L368
| null |
"""
ulid/base32
~~~~~~~~~~~
Functionality for encoding/decoding ULID strings/bytes using Base32 format.
.. note:: This module makes the trade-off of code duplication for inline
computations over multiple function calls for performance reasons. I'll
check metrics in the future to see how much it helps and if it's worth
it to maintain.
* `Base32 Documentation <http://www.crockford.com/wrmg/base32.html>`
* `NUlid Project <https://github.com/RobThree/NUlid>`
"""
import array
from . import hints
#: Base32 character set. Excludes characters "I L O U".
ENCODING = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
#: Array that maps encoded string char byte values to enable O(1) lookups.
DECODING = array.array(
'B',
(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14, 0x15, 0x00,
0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x01, 0x12, 0x13, 0x01, 0x14,
0x15, 0x00, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)
)
def encode(value: hints.Buffer) -> str:
"""
Encode the given :class:`~bytes` instance to a :class:`~str` using Base32 encoding.
.. note:: You should only use this method if you've got a :class:`~bytes` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~bytes` instance, you should call the `encode_*` method explicitly for
better performance.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 6, 10, or 16 bytes long
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 16:
return encode_ulid(value)
if length == 6:
return encode_timestamp(value)
if length == 10:
return encode_randomness(value)
raise ValueError('Expects bytes in sizes of 6, 10, or 16; got {}'.format(length))
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31]
def encode_randomness(randomness: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the last 10 bytes of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param randomness: Bytes to encode
:type randomness: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the randomness is not 10 bytes
"""
length = len(randomness)
if length != 10:
raise ValueError('Expects 10 bytes for randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(randomness[0] & 248) >> 3] + \
encoding[((randomness[0] & 7) << 2) | ((randomness[1] & 192) >> 6)] + \
encoding[(randomness[1] & 62) >> 1] + \
encoding[((randomness[1] & 1) << 4) | ((randomness[2] & 240) >> 4)] + \
encoding[((randomness[2] & 15) << 1) | ((randomness[3] & 128) >> 7)] + \
encoding[(randomness[3] & 124) >> 2] + \
encoding[((randomness[3] & 3) << 3) | ((randomness[4] & 224) >> 5)] + \
encoding[randomness[4] & 31] + \
encoding[(randomness[5] & 248) >> 3] + \
encoding[((randomness[5] & 7) << 2) | ((randomness[6] & 192) >> 6)] + \
encoding[(randomness[6] & 62) >> 1] + \
encoding[((randomness[6] & 1) << 4) | ((randomness[7] & 240) >> 4)] + \
encoding[((randomness[7] & 15) << 1) | ((randomness[8] & 128) >> 7)] + \
encoding[(randomness[8] & 124) >> 2] + \
encoding[((randomness[8] & 3) << 3) | ((randomness[9] & 224) >> 5)] + \
encoding[randomness[9] & 31]
def decode(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: You should only use this method if you've got a :class:`~str` instance
and you are unsure of what it represents. If you know the the _meaning_ of the
:class:`~str` instance, you should call the `decode_*` method explicitly for
better performance.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10, 16, or 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
length = len(value)
# Order here is based on assumed hot path.
if length == 26:
return decode_ulid(value)
if length == 10:
return decode_timestamp(value)
if length == 16:
return decode_randomness(value)
raise ValueError('Expects string in lengths of 10, 16, or 26; got {}'.format(length))
def decode_ulid(value: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param value: String to decode
:type value: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 26 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(value, 26)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF,
((decoding[encoded[10]] << 3) | (decoding[encoded[11]] >> 2)) & 0xFF,
((decoding[encoded[11]] << 6) | (decoding[encoded[12]] << 1) | (decoding[encoded[13]] >> 4)) & 0xFF,
((decoding[encoded[13]] << 4) | (decoding[encoded[14]] >> 1)) & 0xFF,
((decoding[encoded[14]] << 7) | (decoding[encoded[15]] << 2) | (decoding[encoded[16]] >> 3)) & 0xFF,
((decoding[encoded[16]] << 5) | (decoding[encoded[17]])) & 0xFF,
((decoding[encoded[18]] << 3) | (decoding[encoded[19]] >> 2)) & 0xFF,
((decoding[encoded[19]] << 6) | (decoding[encoded[20]] << 1) | (decoding[encoded[21]] >> 4)) & 0xFF,
((decoding[encoded[21]] << 4) | (decoding[encoded[22]] >> 1)) & 0xFF,
((decoding[encoded[22]] << 7) | (decoding[encoded[23]] << 2) | (decoding[encoded[24]] >> 3)) & 0xFF,
((decoding[encoded[24]] << 5) | (decoding[encoded[25]])) & 0xFF
))
def decode_timestamp(timestamp: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the first 10 characters of a ULID, which
are the timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param timestamp: String to decode
:type timestamp: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 10 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(timestamp, 10)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 5) | decoding[encoded[1]]) & 0xFF,
((decoding[encoded[2]] << 3) | (decoding[encoded[3]] >> 2)) & 0xFF,
((decoding[encoded[3]] << 6) | (decoding[encoded[4]] << 1) | (decoding[encoded[5]] >> 4)) & 0xFF,
((decoding[encoded[5]] << 4) | (decoding[encoded[6]] >> 1)) & 0xFF,
((decoding[encoded[6]] << 7) | (decoding[encoded[7]] << 2) | (decoding[encoded[8]] >> 3)) & 0xFF,
((decoding[encoded[8]] << 5) | (decoding[encoded[9]])) & 0xFF
))
def decode_randomness(randomness: str) -> bytes:
"""
Decode the given Base32 encoded :class:`~str` instance to :class:`~bytes`.
The given :class:`~str` are expected to represent the last 16 characters of a ULID, which
are cryptographically secure random values.
.. note:: This uses an optimized strategy from the `NUlid` project for decoding ULID
strings specifically and is not meant for arbitrary decoding.
:param randomness: String to decode
:type randomness: :class:`~str`
:return: Value decoded from Base32 string
:rtype: :class:`~bytes`
:raises ValueError: when value is not 16 characters
:raises ValueError: when value cannot be encoded in ASCII
"""
encoded = str_to_bytes(randomness, 16)
decoding = DECODING
return bytes((
((decoding[encoded[0]] << 3) | (decoding[encoded[1]] >> 2)) & 0xFF,
((decoding[encoded[1]] << 6) | (decoding[encoded[2]] << 1) | (decoding[encoded[3]] >> 4)) & 0xFF,
((decoding[encoded[3]] << 4) | (decoding[encoded[4]] >> 1)) & 0xFF,
((decoding[encoded[4]] << 7) | (decoding[encoded[5]] << 2) | (decoding[encoded[6]] >> 3)) & 0xFF,
((decoding[encoded[6]] << 5) | (decoding[encoded[7]])) & 0xFF,
((decoding[encoded[8]] << 3) | (decoding[encoded[9]] >> 2)) & 0xFF,
((decoding[encoded[9]] << 6) | (decoding[encoded[10]] << 1) | (decoding[encoded[11]] >> 4)) & 0xFF,
((decoding[encoded[11]] << 4) | (decoding[encoded[12]] >> 1)) & 0xFF,
((decoding[encoded[12]] << 7) | (decoding[encoded[13]] << 2) | (decoding[encoded[14]] >> 3)) & 0xFF,
((decoding[encoded[14]] << 5) | (decoding[encoded[15]])) & 0xFF
))
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
comment
|
python
|
def comment(value, comment_text):
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
|
Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L156-L164
|
[
"def comment_doc(doc, comment_text):\n \"\"\"Annotates a Doc with a comment; used by the layout algorithm.\n\n You don't need to call this unless you're doing something low-level\n with Docs; use ``comment`` instead.\n\n ``prettyprinter`` will make sure the parent (or top-level) handler\n will render the comment in a proper way. E.g. if ``doc``\n represents an element in a list, then the ``list`` pretty\n printer will handle where to place the comment.\n \"\"\"\n return annotate(CommentAnnotation(comment_text), doc)\n",
"def comment_value(value, comment_text):\n \"\"\"Annotates a Python value with a comment text.\n\n prettyprinter will inspect and strip the annotation\n during the layout process, and handle rendering the comment\n next to the value in the output.\n\n It is highly unlikely you need to call this function. Use\n ``comment`` instead, which works in almost all cases.\n \"\"\"\n return _CommentedValue(value, comment_text)\n"
] |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
"""
return pretty_call_alt(ctx, fn, args, kwargs)
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
register_pretty
|
python
|
def register_pretty(type=None, predicate=None):
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
|
Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L462-L544
| null |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def comment(value, comment_text):
"""Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
"""
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
"""
return pretty_call_alt(ctx, fn, args, kwargs)
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
commentdoc
|
python
|
def commentdoc(text):
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
|
Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L599-L654
|
[
"def concat(docs):\n \"\"\"Returns a concatenation of the documents in the iterable argument\"\"\"\n return Concat(map(validate_doc, docs))\n",
"def fill(docs):\n return Fill(map(validate_doc, docs))\n",
"def identity(x):\n return x\n",
"def intersperse(x, ys):\n \"\"\"\n Returns an iterable where ``x`` is inserted between\n each element of ``ys``\n\n :type ys: Iterable\n \"\"\"\n it = iter(ys)\n\n try:\n y = next(it)\n except StopIteration:\n return\n\n yield y\n\n for y in it:\n yield x\n yield y\n",
"def annotate(annotation, doc):\n \"\"\"Annotates ``doc`` with the arbitrary value ``annotation``\"\"\"\n return Annotated(doc, annotation)\n",
"def always_break(doc):\n \"\"\"Instructs the layout algorithm that ``doc`` must be\n broken to multiple lines. This instruction propagates\n to all higher levels in the layout, but nested Docs\n may still be laid out flat.\"\"\"\n return AlwaysBreak(validate_doc(doc))\n",
"def flat_choice(when_broken, when_flat):\n \"\"\"Gives the layout algorithm two options. ``when_flat`` Doc will be\n used when the document fit onto a single line, and ``when_broken`` is used\n when the Doc had to be broken into multiple lines.\"\"\"\n return FlatChoice(\n validate_doc(when_broken),\n validate_doc(when_flat)\n )\n"
] |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def comment(value, comment_text):
"""Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
"""
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
"""
return pretty_call_alt(ctx, fn, args, kwargs)
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
pretty_call
|
python
|
def pretty_call(ctx, fn, *args, **kwargs):
return pretty_call_alt(ctx, fn, args, kwargs)
|
Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L728-L761
|
[
"def pretty_call_alt(ctx, fn, args=(), kwargs=()):\n \"\"\"Returns a Doc that represents a function call to :keyword:`fn` with\n the ``args`` and ``kwargs``.\n\n Given an arbitrary context ``ctx``,::\n\n pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])\n\n Will result in output::\n\n sorted([7, 4, 5], reverse=True)\n\n The layout algorithm will automatically break the call to multiple\n lines if needed::\n\n sorted(\n [7, 4, 5],\n reverse=True\n )\n\n ``pretty_call_alt`` automatically handles syntax highlighting.\n\n :param ctx: a context value\n :type ctx: prettyprinter.prettyprinter.PrettyContext\n :param fn: a callable\n :param args: a ``tuple`` of positional arguments to render to the call\n :param kwargs: keyword arguments to render to the call. Either an instance\n of ``OrderedDict``, or an iterable of two-tuples, where the\n first element is a `str` (key), and the second is the Python\n value for that keyword argument.\n :returns: :class:`~prettyprinter.doc.Doc`\n \"\"\"\n\n fndoc = general_identifier(fn)\n\n if ctx.depth_left <= 0:\n return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])\n\n if not kwargs and len(args) == 1:\n sole_arg = args[0]\n unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])\n if type(unwrapped_sole_arg) in (list, dict, tuple):\n return build_fncall(\n ctx,\n fndoc,\n argdocs=[pretty_python_value(sole_arg, ctx)],\n hug_sole_arg=True,\n )\n\n nested_ctx = (\n ctx\n .nested_call()\n .use_multiline_strategy(MULTILINE_STRATEGY_HANG)\n )\n\n if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):\n warnings.warn(\n \"A dict was passed to pretty_call_alt to represent kwargs, \"\n \"but Python 3.5 doesn't maintain key order for dicts. The order \"\n \"of keyword arguments will be undefined in the output. \"\n \"To fix this, pass a list of two-tuples or an instance of \"\n \"OrderedDict instead.\",\n UserWarning\n )\n\n kwargitems = (\n kwargs.items()\n if isinstance(kwargs, (OrderedDict, dict))\n else kwargs\n )\n\n return build_fncall(\n ctx,\n fndoc,\n argdocs=(\n pretty_python_value(arg, nested_ctx)\n for arg in args\n ),\n kwargdocs=(\n (kwarg, pretty_python_value(v, nested_ctx))\n for kwarg, v in kwargitems\n ),\n )\n"
] |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def comment(value, comment_text):
"""Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
"""
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
pretty_call_alt
|
python
|
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
|
Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L764-L846
|
[
"def concat(docs):\n \"\"\"Returns a concatenation of the documents in the iterable argument\"\"\"\n return Concat(map(validate_doc, docs))\n",
"def build_fncall(\n ctx,\n fndoc,\n argdocs=(),\n kwargdocs=(),\n hug_sole_arg=False,\n trailing_comment=None,\n):\n \"\"\"Builds a doc that looks like a function call,\n from docs that represent the function, arguments\n and keyword arguments.\n\n If ``hug_sole_arg`` is True, and the represented\n functional call is done with a single non-keyword\n argument, the function call parentheses will hug\n the sole argument doc without newlines and indentation\n in break mode. This makes a difference in calls\n like this::\n\n > hug_sole_arg = False\n frozenset(\n [\n 1,\n 2,\n 3,\n 4,\n 5\n ]\n )\n > hug_sole_arg = True\n frozenset([\n 1,\n 2,\n 3,\n 4,\n 5,\n ])\n\n If ``trailing_comment`` is provided, the text is\n rendered as a comment after the last argument and\n before the closing parenthesis. This will force\n the function call to be broken to multiple lines.\n \"\"\"\n if callable(fndoc):\n fndoc = general_identifier(fndoc)\n\n has_comment = bool(trailing_comment)\n\n argdocs = list(argdocs)\n kwargdocs = list(kwargdocs)\n\n kwargdocs = [\n # Propagate any comments to the kwarg doc.\n (\n comment_doc(\n concat([\n keyword_arg(binding),\n ASSIGN_OP,\n doc.doc\n ]),\n doc.annotation.value\n )\n if is_commented(doc)\n else concat([\n keyword_arg(binding),\n ASSIGN_OP,\n doc\n ])\n )\n for binding, doc in kwargdocs\n ]\n\n if not (argdocs or kwargdocs):\n return concat([\n fndoc,\n LPAREN,\n RPAREN,\n ])\n\n if (\n hug_sole_arg and\n not kwargdocs and\n len(argdocs) == 1 and\n not is_commented(argdocs[0])\n ):\n return group(\n concat([\n fndoc,\n LPAREN,\n argdocs[0],\n RPAREN\n ])\n )\n\n allarg_docs = [*argdocs, *kwargdocs]\n\n if trailing_comment:\n allarg_docs.append(commentdoc(trailing_comment))\n\n parts = []\n\n for idx, doc in enumerate(allarg_docs):\n last = idx == len(allarg_docs) - 1\n\n if is_commented(doc):\n has_comment = True\n comment_str = doc.annotation.value\n doc = doc.doc\n else:\n comment_str = None\n\n part = concat([doc, NIL if last else COMMA])\n\n if comment_str:\n part = group(\n flat_choice(\n when_flat=concat([\n part,\n ' ',\n commentdoc(comment_str)\n ]),\n when_broken=concat([\n commentdoc(comment_str),\n HARDLINE,\n part,\n ]),\n )\n )\n\n if not last:\n part = concat([part, HARDLINE if has_comment else LINE])\n\n parts.append(part)\n\n outer = (\n always_break\n if has_comment\n else group\n )\n\n return outer(\n concat([\n fndoc,\n LPAREN,\n nest(\n ctx.indent,\n concat([\n SOFTLINE,\n concat(parts),\n ])\n ),\n SOFTLINE,\n RPAREN\n ])\n )\n",
"def pretty_python_value(value, ctx):\n comment = None\n trailing_comment = None\n\n value, comment, trailing_comment = unwrap_comments(value)\n\n is_registered(\n type(value),\n check_superclasses=True,\n check_deferred=True,\n register_deferred=True\n )\n\n if trailing_comment:\n doc = pretty_dispatch(\n value,\n ctx,\n trailing_comment=trailing_comment\n )\n else:\n doc = pretty_dispatch(\n value,\n ctx\n )\n\n if comment:\n return comment_doc(\n doc,\n comment\n )\n return doc\n",
"def unwrap_comments(value):\n comment = None\n trailing_comment = None\n\n while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):\n if isinstance(value, _CommentedValue):\n comment = value.comment\n value = value.value\n elif isinstance(value, _TrailingCommentedValue):\n trailing_comment = value.comment\n value = value.value\n\n return (value, comment, trailing_comment)\n",
"def general_identifier(s):\n if callable(s):\n module, qualname = s.__module__, s.__qualname__\n\n if module in IMPLICIT_MODULES:\n if module == 'builtins':\n return builtin_identifier(qualname)\n return identifier(qualname)\n return identifier('{}.{}'.format(module, qualname))\n return identifier(s)\n"
] |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def comment(value, comment_text):
"""Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
"""
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
"""
return pretty_call_alt(ctx, fn, args, kwargs)
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
build_fncall
|
python
|
def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
)
|
Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L849-L1003
|
[
"def group(doc):\n \"\"\"Annotates doc with special meaning to the layout algorithm, so that the\n document is attempted to output on a single line if it is possible within\n the layout constraints. To lay out the doc on a single line, the `when_flat`\n branch of ``FlatChoice`` is used.\"\"\"\n return Group(validate_doc(doc))\n",
"def concat(docs):\n \"\"\"Returns a concatenation of the documents in the iterable argument\"\"\"\n return Concat(map(validate_doc, docs))\n",
"def nest(i, doc):\n return Nest(i, validate_doc(doc))\n",
"def always_break(doc):\n \"\"\"Instructs the layout algorithm that ``doc`` must be\n broken to multiple lines. This instruction propagates\n to all higher levels in the layout, but nested Docs\n may still be laid out flat.\"\"\"\n return AlwaysBreak(validate_doc(doc))\n",
"def is_commented(value):\n return (\n isinstance(value, Annotated) and\n isinstance(value.annotation, CommentAnnotation)\n )\n",
"def general_identifier(s):\n if callable(s):\n module, qualname = s.__module__, s.__qualname__\n\n if module in IMPLICIT_MODULES:\n if module == 'builtins':\n return builtin_identifier(qualname)\n return identifier(qualname)\n return identifier('{}.{}'.format(module, qualname))\n return identifier(s)\n",
"def commentdoc(text):\n \"\"\"Returns a Doc representing a comment `text`. `text` is\n treated as words, and any whitespace may be used to break\n the comment to multiple lines.\"\"\"\n if not text:\n raise ValueError(\n 'Expected non-empty comment str, got {}'.format(repr(text))\n )\n\n commentlines = []\n for line in text.splitlines():\n alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))\n starts_with_whitespace = bool(\n WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])\n )\n\n if starts_with_whitespace:\n prefix = alternating_words_ws[0]\n alternating_words_ws = alternating_words_ws[1:]\n else:\n prefix = NIL\n\n if len(alternating_words_ws) % 2 == 0:\n # The last part must be whitespace.\n alternating_words_ws = alternating_words_ws[:-1]\n\n for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):\n part, is_ws = tup\n if is_ws:\n alternating_words_ws[idx] = flat_choice(\n when_flat=part,\n when_broken=always_break(\n concat([\n HARDLINE,\n '# ',\n ])\n )\n )\n\n commentlines.append(\n concat([\n '# ',\n prefix,\n fill(alternating_words_ws)\n ])\n )\n\n outer = identity\n\n if len(commentlines) > 1:\n outer = always_break\n\n return annotate(\n Token.COMMENT_SINGLE,\n outer(concat(intersperse(HARDLINE, commentlines)))\n )\n"
] |
import inspect
import math
import re
import sys
import warnings
import ast
from collections import OrderedDict
from functools import singledispatch, partial
from itertools import chain, cycle
from traceback import format_exception
from types import (
FunctionType,
BuiltinFunctionType,
BuiltinMethodType
)
from weakref import WeakKeyDictionary
from .doc import (
always_break,
annotate,
concat,
contextual,
flat_choice,
fill,
group,
nest,
NIL,
LINE,
SOFTLINE,
HARDLINE
)
from .doctypes import (
Annotated,
Doc
)
from .layout import layout_smart
from .syntax import Token
from .utils import identity, intersperse, take
PY_VERSION_INFO = sys.version_info
DICT_KEY_ORDER_SUPPORTED = PY_VERSION_INFO >= (3, 6)
UNSET_SENTINEL = object()
COMMA = annotate(Token.PUNCTUATION, ',')
COLON = annotate(Token.PUNCTUATION, ':')
ELLIPSIS = annotate(Token.PUNCTUATION, '...')
LPAREN = annotate(Token.PUNCTUATION, '(')
RPAREN = annotate(Token.PUNCTUATION, ')')
LBRACKET = annotate(Token.PUNCTUATION, '[')
RBRACKET = annotate(Token.PUNCTUATION, ']')
LBRACE = annotate(Token.PUNCTUATION, '{')
RBRACE = annotate(Token.PUNCTUATION, '}')
NEG_OP = annotate(Token.OPERATOR, '-')
MUL_OP = annotate(Token.OPERATOR, '*')
ADD_OP = annotate(Token.OPERATOR, '+')
ASSIGN_OP = annotate(Token.OPERATOR, '=')
WHITESPACE_PATTERN_TEXT = re.compile(r'(\s+)')
WHITESPACE_PATTERN_BYTES = re.compile(rb'(\s+)')
NONWORD_PATTERN_TEXT = re.compile(r'(\W+)')
NONWORD_PATTERN_BYTES = re.compile(rb'(\W+)')
# For dict keys
"""
(
'aaaaaaaaaa'
'aaaaaa'
)
"""
MULTILINE_STRATEGY_PARENS = 'MULTILINE_STRATEGY_PARENS'
# For dict values
"""
'aaaaaaaaaa'
'aaaaa'
"""
MULTILINE_STRATEGY_INDENTED = 'MULTILINE_STRATEGY_INDENTED'
# For sequence elements
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_HANG = 'MULTILINE_STRATEGY_HANG'
# For top level strs
"""
'aaaaaaaaa'
'aaaaaa'
"""
MULTILINE_STRATEGY_PLAIN = 'MULTILINE_STRATEGY_PLAIN'
IMPLICIT_MODULES = {
'__main__',
'builtins',
}
class CommentAnnotation:
def __init__(self, value):
assert isinstance(value, str)
self.value = value
def __repr__(self):
return 'ValueComment({})'.format(repr(self.value))
class _CommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
class _TrailingCommentedValue:
def __init__(self, value, comment):
self.value = value
self.comment = comment
def comment_value(value, comment_text):
"""Annotates a Python value with a comment text.
prettyprinter will inspect and strip the annotation
during the layout process, and handle rendering the comment
next to the value in the output.
It is highly unlikely you need to call this function. Use
``comment`` instead, which works in almost all cases.
"""
return _CommentedValue(value, comment_text)
def comment_doc(doc, comment_text):
"""Annotates a Doc with a comment; used by the layout algorithm.
You don't need to call this unless you're doing something low-level
with Docs; use ``comment`` instead.
``prettyprinter`` will make sure the parent (or top-level) handler
will render the comment in a proper way. E.g. if ``doc``
represents an element in a list, then the ``list`` pretty
printer will handle where to place the comment.
"""
return annotate(CommentAnnotation(comment_text), doc)
def comment(value, comment_text):
"""Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc.
"""
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text)
def trailing_comment(value, comment_text):
"""Annotates a value with a comment text, so that
the comment will be rendered "trailing", e.g. in place
of the last element in a list, set or tuple, or after
the last argument in a function.
This will force the rendering of ``value`` to be broken
to multiple lines as Python does not have inline comments.
>>> trailing_comment(['value'], '...and more')
[
'value',
# ...and more
]
"""
return _TrailingCommentedValue(value, comment_text)
def unwrap_comments(value):
comment = None
trailing_comment = None
while isinstance(value, (_CommentedValue, _TrailingCommentedValue)):
if isinstance(value, _CommentedValue):
comment = value.comment
value = value.value
elif isinstance(value, _TrailingCommentedValue):
trailing_comment = value.comment
value = value.value
return (value, comment, trailing_comment)
def is_commented(value):
return (
isinstance(value, Annotated) and
isinstance(value.annotation, CommentAnnotation)
)
def builtin_identifier(s):
return annotate(Token.NAME_BUILTIN, s)
def identifier(s):
return annotate(Token.NAME_FUNCTION, s)
def keyword_arg(s):
return annotate(Token.NAME_VARIABLE, s)
def general_identifier(s):
if callable(s):
module, qualname = s.__module__, s.__qualname__
if module in IMPLICIT_MODULES:
if module == 'builtins':
return builtin_identifier(qualname)
return identifier(qualname)
return identifier('{}.{}'.format(module, qualname))
return identifier(s)
def classattr(cls, attrname):
return concat([
general_identifier(cls),
identifier('.{}'.format(attrname))
])
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def assoc(self, key, value):
"""
Return a modified PrettyContext with ``key`` set to ``value``
"""
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
def _warn_about_bad_printer(pretty_fn, value, exc):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, raised an exception. "
"Falling back to default repr.\n\n{}".format(
type(value).__name__,
fnname,
''.join(format_exception(type(exc), exc, exc.__traceback__))
),
UserWarning
)
def _run_pretty(pretty_fn, value, ctx, trailing_comment=None):
if ctx.is_visited(value):
return _pretty_recursion(value)
ctx.start_visit(value)
if trailing_comment:
try:
doc = pretty_fn(
value,
ctx,
trailing_comment=trailing_comment
)
except TypeError as e:
# This is probably because pretty_fn does not support
# trailing_comment, but let's make sure.
sig = inspect.signature(pretty_fn)
try:
sig.bind(value, ctx, trailing_comment=trailing_comment)
except TypeError:
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
warnings.warn(
"The pretty printer for {}, {}, does not support rendering "
"trailing comments. It will not show up in output.".format(
type(value).__name__, fnname
)
)
doc = pretty_fn(value, ctx)
else:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
else:
try:
doc = pretty_fn(value, ctx)
except Exception as e:
_warn_about_bad_printer(pretty_fn, value, exc=e)
doc = repr(value)
if not (
isinstance(doc, str) or
isinstance(doc, Doc)
):
fnname = '{}.{}'.format(
pretty_fn.__module__,
pretty_fn.__qualname__
)
raise ValueError(
'Functions decorated with register_pretty must return '
'an instance of str or Doc. {} returned '
'{} instead.'.format(fnname, repr(doc))
)
ctx.end_visit(value)
return doc
_DEFERRED_DISPATCH_BY_NAME = {}
def get_deferred_key(type):
return type.__module__ + '.' + type.__qualname__
_PREDICATE_REGISTRY = []
def _repr_pretty(value, ctx):
for predicate, fn in _PREDICATE_REGISTRY:
if predicate(value):
return fn(value, ctx)
return repr(value)
_BASE_DISPATCH = partial(_run_pretty, _repr_pretty)
pretty_dispatch = singledispatch(_BASE_DISPATCH)
def pretty_python_value(value, ctx):
comment = None
trailing_comment = None
value, comment, trailing_comment = unwrap_comments(value)
is_registered(
type(value),
check_superclasses=True,
check_deferred=True,
register_deferred=True
)
if trailing_comment:
doc = pretty_dispatch(
value,
ctx,
trailing_comment=trailing_comment
)
else:
doc = pretty_dispatch(
value,
ctx
)
if comment:
return comment_doc(
doc,
comment
)
return doc
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator
def is_registered(
type,
*,
check_superclasses=False,
check_deferred=True,
register_deferred=True
):
if not check_deferred and register_deferred:
raise ValueError(
'register_deferred may not be True when check_deferred is False'
)
if type in pretty_dispatch.registry:
return True
if check_deferred:
# Check deferred printers for the type exactly.
deferred_key = get_deferred_key(type)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(type)(deferred_dispatch)
return True
if not check_superclasses:
return False
if check_deferred:
# Check deferred printers for supertypes.
for supertype in type.__mro__[1:]:
deferred_key = get_deferred_key(supertype)
if deferred_key in _DEFERRED_DISPATCH_BY_NAME:
if register_deferred:
deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(
deferred_key
)
register_pretty(supertype)(deferred_dispatch)
return True
return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH
def bracket(ctx, left, child, right):
return concat([
left,
nest(ctx.indent, concat([SOFTLINE, child])),
SOFTLINE,
right
])
def commentdoc(text):
"""Returns a Doc representing a comment `text`. `text` is
treated as words, and any whitespace may be used to break
the comment to multiple lines."""
if not text:
raise ValueError(
'Expected non-empty comment str, got {}'.format(repr(text))
)
commentlines = []
for line in text.splitlines():
alternating_words_ws = list(filter(None, WHITESPACE_PATTERN_TEXT.split(line)))
starts_with_whitespace = bool(
WHITESPACE_PATTERN_TEXT.match(alternating_words_ws[0])
)
if starts_with_whitespace:
prefix = alternating_words_ws[0]
alternating_words_ws = alternating_words_ws[1:]
else:
prefix = NIL
if len(alternating_words_ws) % 2 == 0:
# The last part must be whitespace.
alternating_words_ws = alternating_words_ws[:-1]
for idx, tup in enumerate(zip(alternating_words_ws, cycle([False, True]))):
part, is_ws = tup
if is_ws:
alternating_words_ws[idx] = flat_choice(
when_flat=part,
when_broken=always_break(
concat([
HARDLINE,
'# ',
])
)
)
commentlines.append(
concat([
'# ',
prefix,
fill(alternating_words_ws)
])
)
outer = identity
if len(commentlines) > 1:
outer = always_break
return annotate(
Token.COMMENT_SINGLE,
outer(concat(intersperse(HARDLINE, commentlines)))
)
def sequence_of_docs(ctx, left, docs, right, dangle=False, force_break=False):
docs = list(docs)
# Performance optimization:
# in case of really long sequences,
# the layout algorithm can be quite slow.
# No branching here is needed if the sequence
# is long enough that even with the shortest
# element output, it does not fit the ribbon width.
minimum_output_len = (
2 + # Assume left and right are one character each
len(', ') * (len(docs) - 1) +
len(docs) # each element must take at least one character
)
MAX_PRACTICAL_RIBBON_WIDTH = 150
will_break = force_break or minimum_output_len > MAX_PRACTICAL_RIBBON_WIDTH
has_comment = any(is_commented(doc) for doc in docs)
parts = []
for idx, doc in enumerate(docs):
last = idx == len(docs) - 1
if is_commented(doc):
comment_str = doc.annotation.value
# Try to fit the comment at the end of the same line.
flat_version = concat([
doc,
COMMA if not last else NIL,
' ',
commentdoc(comment_str),
HARDLINE if not last else NIL
])
# If the value is broken to multiple lines, add
# comment on the line above.
broken_version = concat([
commentdoc(comment_str),
HARDLINE,
doc,
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
parts.append(
group(
flat_choice(
when_flat=flat_version,
when_broken=broken_version,
)
)
)
else:
parts.append(doc)
if not last:
parts.append(
concat([COMMA, LINE])
)
if dangle:
parts.append(COMMA)
outer = (
always_break
if will_break or has_comment
else group
)
return outer(bracket(ctx, left, concat(parts), right))
def pretty_call(ctx, fn, *args, **kwargs):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the remaining positional and keyword arguments.
You can only use this function on Python 3.6+. On Python 3.5, the order
of keyword arguments is not maintained, and you have to use
:func:`~prettyprinter.pretty_call_alt`.
Given an arbitrary context ``ctx``,::
pretty_call(ctx, sorted, [7, 4, 5], reverse=True)
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: positional arguments to render to the call
:param kwargs: keyword arguments to render to the call
:returns: :class:`~prettyprinter.doc.Doc`
"""
return pretty_call_alt(ctx, fn, args, kwargs)
def pretty_call_alt(ctx, fn, args=(), kwargs=()):
"""Returns a Doc that represents a function call to :keyword:`fn` with
the ``args`` and ``kwargs``.
Given an arbitrary context ``ctx``,::
pretty_call_alt(ctx, sorted, args=([7, 4, 5], ), kwargs=[('reverse', True)])
Will result in output::
sorted([7, 4, 5], reverse=True)
The layout algorithm will automatically break the call to multiple
lines if needed::
sorted(
[7, 4, 5],
reverse=True
)
``pretty_call_alt`` automatically handles syntax highlighting.
:param ctx: a context value
:type ctx: prettyprinter.prettyprinter.PrettyContext
:param fn: a callable
:param args: a ``tuple`` of positional arguments to render to the call
:param kwargs: keyword arguments to render to the call. Either an instance
of ``OrderedDict``, or an iterable of two-tuples, where the
first element is a `str` (key), and the second is the Python
value for that keyword argument.
:returns: :class:`~prettyprinter.doc.Doc`
"""
fndoc = general_identifier(fn)
if ctx.depth_left <= 0:
return concat([fndoc, LPAREN, ELLIPSIS, RPAREN])
if not kwargs and len(args) == 1:
sole_arg = args[0]
unwrapped_sole_arg, _comment, _trailing_comment = unwrap_comments(args[0])
if type(unwrapped_sole_arg) in (list, dict, tuple):
return build_fncall(
ctx,
fndoc,
argdocs=[pretty_python_value(sole_arg, ctx)],
hug_sole_arg=True,
)
nested_ctx = (
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
if not DICT_KEY_ORDER_SUPPORTED and isinstance(kwargs, dict):
warnings.warn(
"A dict was passed to pretty_call_alt to represent kwargs, "
"but Python 3.5 doesn't maintain key order for dicts. The order "
"of keyword arguments will be undefined in the output. "
"To fix this, pass a list of two-tuples or an instance of "
"OrderedDict instead.",
UserWarning
)
kwargitems = (
kwargs.items()
if isinstance(kwargs, (OrderedDict, dict))
else kwargs
)
return build_fncall(
ctx,
fndoc,
argdocs=(
pretty_python_value(arg, nested_ctx)
for arg in args
),
kwargdocs=(
(kwarg, pretty_python_value(v, nested_ctx))
for kwarg, v in kwargitems
),
)
@register_pretty(type)
def pretty_type(_type, ctx):
if _type is type(None): # noqa
# NoneType is not available in the global namespace,
# clearer to print type(None)
return pretty_call_alt(ctx, type, args=(None, ))
result = general_identifier(_type)
# For native types, we can print the class identifier, e.g.
# >>> int
# int
#
# But for others, such as:
# >>> import functools; functools.partial
# functools.partial
#
# It may be unclear what kind of value it is, unless the user already
# knows it's a class. The default repr from Python is
# <class 'functools.partial'>, so we'll imitate that by adding
# a comment indicating that the value is a class.
module = _type.__module__
if module in IMPLICIT_MODULES:
return result
return comment(
result,
'class'
)
@register_pretty(FunctionType)
def pretty_function(fn, ctx):
return comment(
general_identifier(fn),
'function'
)
@register_pretty(BuiltinMethodType)
def pretty_builtin_method(method, ctx):
return comment(
general_identifier(method),
'built-in method'
)
@register_pretty(BuiltinFunctionType)
def pretty_builtin_function(fn, ctx):
return comment(
general_identifier(fn),
'built-in function'
)
namedtuple_clsattrs = (
'__slots__',
'_make',
'_replace',
'_asdict'
)
c_namedtuple_identify_by_clsattrs = (
'n_fields',
'n_sequence_fields',
'n_unnamed_fields'
)
def _is_namedtuple(value):
cls = type(value)
for attrname in namedtuple_clsattrs:
try:
getattr(cls, attrname)
except AttributeError:
return False
return True
def _is_cnamedtuple(value):
cls = type(value)
for attrname in c_namedtuple_identify_by_clsattrs:
try:
val = getattr(cls, attrname)
except AttributeError:
return False
else:
if not isinstance(val, int):
return False
return True
def pretty_namedtuple(value, ctx, trailing_comment=None):
constructor = type(value)
kwargs = zip(constructor._fields, value)
return pretty_call_alt(ctx, constructor, kwargs=kwargs)
# Given a cnamedtuple value, returns a tuple
# of fieldnames. Each fieldname at ith index of
# the tuple corresponds to the ith element in the cnamedtuple.
def resolve_cnamedtuple_fieldnames(value):
# The cnamedtuple repr returns a non-evaluable representation
# of the value. It has the keyword arguments for each element
# of the named tuple in the correct order. You can see the
# source here:
# https://github.com/python/cpython/blob/53b9e1a1c1d86187ad6fbee492b697ef8be74205/Objects/structseq.c#L168-L241
# As long as the repr is implemented like that, we can count
# on this function to work.
expr_node = ast.parse(repr(value), mode='eval')
call_node = expr_node.body
return tuple(
keyword_node.arg
for keyword_node in call_node.keywords
)
# Keys: classes/constructors
# Values: a tuple of fieldnames is resolving them was successful.
# Otherwise, an exception that was raised when attempting
# to resolve the fieldnames.
_cnamedtuple_fieldnames_by_class = WeakKeyDictionary()
# Examples of cnamedtuples:
# - return value of time.strptime()
# - return value of os.uname()
def pretty_cnamedtuple(value, ctx, trailing_comment=None):
cls = type(value)
if cls not in _cnamedtuple_fieldnames_by_class:
try:
fieldnames = resolve_cnamedtuple_fieldnames(value)
except Exception as exc:
fieldnames = exc
_cnamedtuple_fieldnames_by_class[cls] = fieldnames
fieldnames = _cnamedtuple_fieldnames_by_class[cls]
if isinstance(fieldnames, Exception):
raise fieldnames
return pretty_call_alt(
ctx,
cls,
args=tuple([
tuple(
comment(val, fieldname)
for val, fieldname in zip(value, fieldnames)
)
])
)
@register_pretty(tuple)
@register_pretty(list)
@register_pretty(set)
def pretty_bracketable_iterable(value, ctx, trailing_comment=None):
constructor = type(value)
if isinstance(value, tuple):
if _is_cnamedtuple(value):
try:
return pretty_cnamedtuple(
value,
ctx,
trailing_comment=trailing_comment
)
except Exception:
pass # render as a normal tuple
elif _is_namedtuple(value):
return pretty_namedtuple(value, ctx, trailing_comment=trailing_comment)
is_native_type = constructor in (tuple, list, set)
if len(value) > ctx.max_seq_len:
truncation_comment = '...and {} more elements'.format(
len(value) - ctx.max_seq_len
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
dangle = False
if isinstance(value, list):
left, right = LBRACKET, RBRACKET
elif isinstance(value, tuple):
left, right = LPAREN, RPAREN
if len(value) == 1:
dangle = True
elif isinstance(value, set):
left, right = LBRACE, RBRACE
if not value:
if isinstance(value, (list, tuple)):
if is_native_type:
return concat([left, right])
return pretty_call_alt(ctx, constructor)
else:
# E.g. set() or SubclassOfSet()
return pretty_call_alt(ctx, constructor)
if ctx.depth_left == 0:
if isinstance(value, (list, tuple)):
literal = concat([left, ELLIPSIS, right])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
else:
return pretty_call_alt(ctx, constructor, args=(..., ))
if len(value) == 1:
sole_value = list(value)[0]
els = [
pretty_python_value(
sole_value,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
)
)
]
else:
els = (
pretty_python_value(
el,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_HANG)
)
)
for el in take(ctx.max_seq_len, value)
)
if trailing_comment:
els = chain(els, [commentdoc(trailing_comment)])
dangle = False
literal = sequence_of_docs(
ctx,
left,
els,
right,
dangle=dangle,
force_break=bool(trailing_comment)
)
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
@register_pretty(frozenset)
def pretty_frozenset(value, ctx):
constructor = type(value)
if value:
return pretty_call_alt(ctx, constructor, args=(list(value), ))
return pretty_call_alt(ctx, constructor)
class _AlwaysSortable(object):
__slots__ = ('value', )
def __init__(self, value):
self.value = value
def sortable_value(self):
return (str(type(self)), id(self))
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.sortable_value() < other.sortable_value()
@register_pretty(dict)
def pretty_dict(d, ctx, trailing_comment=None):
constructor = type(d)
is_native_type = constructor is dict
if ctx.depth_left == 0:
literal = concat([LBRACE, ELLIPSIS, RBRACE])
if is_native_type:
return literal
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(literal, ),
hug_sole_arg=True
)
if len(d) > ctx.max_seq_len:
count_truncated = len(d) - ctx.max_seq_len
truncation_comment = '...and {} more elements'.format(
count_truncated
)
trailing_comment = (
truncation_comment + '. ' + trailing_comment
if trailing_comment
else truncation_comment
)
has_comment = bool(trailing_comment)
sorted_keys = (
sorted(d.keys(), key=_AlwaysSortable)
if ctx.sort_dict_keys
else d.keys()
)
pairs = []
for k in take(ctx.max_seq_len, sorted_keys):
v = d[k]
if isinstance(k, (str, bytes)):
kdoc = pretty_str(
k,
# not a nested call on purpose
ctx=ctx.use_multiline_strategy(MULTILINE_STRATEGY_PARENS),
)
else:
kdoc = pretty_python_value(
k,
ctx=ctx.nested_call()
)
vdoc = pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_INDENTED)
),
)
kcomment = None
if is_commented(kdoc):
has_comment = True
kcomment = kdoc.annotation.value
kdoc = kdoc.doc
vcomment = None
if is_commented(vdoc):
has_comment = True
vcomment = vdoc.annotation.value
vdoc = vdoc.doc
pairs.append((k, v, kdoc, vdoc, kcomment, vcomment))
parts = []
for idx, tup in enumerate(pairs):
last = idx == len(pairs) - 1
k, v, kdoc, vdoc, kcomment, vcomment = tup
if not (kcomment or vcomment):
parts.append(
concat([
kdoc,
concat([COLON, ' ']),
vdoc,
NIL if last else COMMA,
NIL if last else LINE,
]),
)
continue
if kcomment:
kcommented = concat([
commentdoc(kcomment),
HARDLINE,
kdoc,
])
else:
kcommented = kdoc
if vcomment:
vcommented = group(
flat_choice(
# Add comment at the end of the line
when_flat=concat([
vdoc,
NIL if last else COMMA,
' ',
commentdoc(vcomment),
NIL if last else HARDLINE,
]),
# Put comment above the value
# on its own line
when_broken=concat([
nest(
ctx.indent,
concat([
HARDLINE,
commentdoc(vcomment),
HARDLINE,
# Rerender vdoc with plain multiline strategy,
# since we already have an indentation.
pretty_python_value(
v,
ctx=(
ctx
.nested_call()
.use_multiline_strategy(MULTILINE_STRATEGY_PLAIN)
),
),
COMMA if not last else NIL,
HARDLINE if not last else NIL
])
),
])
)
)
else:
vcommented = concat([
vdoc,
COMMA if not last else NIL,
LINE if not last else NIL
])
parts.append(
concat([
kcommented,
concat([COLON, ' ']),
vcommented
])
)
if trailing_comment:
parts.append(concat([
HARDLINE,
commentdoc(trailing_comment)
]))
doc = bracket(
ctx,
LBRACE,
concat(parts),
RBRACE,
)
if len(pairs) > 2 or has_comment:
doc = always_break(doc)
else:
doc = group(doc)
if is_native_type:
return doc
if not parts:
return pretty_call_alt(ctx, constructor)
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, ),
hug_sole_arg=True
)
INF_FLOAT = float('inf')
NEG_INF_FLOAT = float('-inf')
@register_pretty(float)
def pretty_float(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
if value == INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('inf', ))
elif value == NEG_INF_FLOAT:
return pretty_call_alt(ctx, constructor, args=('-inf', ))
elif math.isnan(value):
return pretty_call_alt(ctx, constructor, args=('nan', ))
doc = annotate(Token.NUMBER_FLOAT, repr(value))
if constructor is float:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(int)
def pretty_int(value, ctx):
constructor = type(value)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
doc = annotate(Token.NUMBER_INT, repr(value))
if constructor is int:
return doc
return build_fncall(ctx, general_identifier(constructor), argdocs=(doc, ))
@register_pretty(type(...))
def pretty_ellipsis(value, ctx):
return ELLIPSIS
@register_pretty(bool)
def pretty_bool(value, ctx):
constructor = type(value)
doc = annotate(Token.KEYWORD_CONSTANT, 'True' if value else 'False')
if constructor is bool:
return doc
return build_fncall(
ctx,
general_identifier(constructor),
argdocs=(doc, )
)
NONE_DOC = annotate(Token.KEYWORD_CONSTANT, 'None')
@register_pretty(type(None))
def pretty_none(value, ctx):
return NONE_DOC
SINGLE_QUOTE_TEXT = "'"
SINGLE_QUOTE_BYTES = b"'"
DOUBLE_QUOTE_TEXT = '"'
DOUBLE_QUOTE_BYTES = b'"'
def determine_quote_strategy(s):
if isinstance(s, str):
single_quote = SINGLE_QUOTE_TEXT
double_quote = DOUBLE_QUOTE_TEXT
else:
single_quote = SINGLE_QUOTE_BYTES
double_quote = DOUBLE_QUOTE_BYTES
contains_single = single_quote in s
contains_double = double_quote in s
if not contains_single:
return SINGLE_QUOTE_TEXT
if not contains_double:
return DOUBLE_QUOTE_TEXT
assert contains_single and contains_double
single_count = s.count(single_quote)
double_count = s.count(double_quote)
if single_count <= double_count:
return SINGLE_QUOTE_TEXT
return DOUBLE_QUOTE_TEXT
def escape_str_for_quote(use_quote, s):
escaped_with_quotes = repr(s)
repr_used_quote = escaped_with_quotes[-1]
# string may have a prefix
first_quote_at_index = escaped_with_quotes.find(repr_used_quote)
repr_escaped = escaped_with_quotes[first_quote_at_index + 1:-1]
if repr_used_quote == use_quote:
# repr produced the quotes we wanted -
# escaping is correct.
return repr_escaped
# repr produced different quotes, which escapes
# alternate quotes.
if use_quote == SINGLE_QUOTE_TEXT:
# repr used double quotes
return (
repr_escaped
.replace('\\"', DOUBLE_QUOTE_TEXT)
.replace(SINGLE_QUOTE_TEXT, "\\'")
)
else:
# repr used single quotes
return (
repr_escaped
.replace("\\'", SINGLE_QUOTE_TEXT)
.replace(DOUBLE_QUOTE_TEXT, '\\"')
)
STR_LITERAL_ESCAPES = re.compile(
r'''((?:\\[\\abfnrtv"'])|'''
r'(?:\\N\{.*?\})|'
r'(?:\\u[a-fA-F0-9]{4})|'
r'(?:\\U[a-fA-F0-9]{8})|'
r'(?:\\x[a-fA-F0-9]{2})|'
r'(?:\\[0-7]{1,3}))'
)
def highlight_escapes(s):
if not s:
return NIL
matches = STR_LITERAL_ESCAPES.split(s)
starts_with_match = bool(STR_LITERAL_ESCAPES.match(matches[0]))
docs = []
for part, is_escaped in zip(
matches,
cycle([starts_with_match, not starts_with_match])
):
if not part:
continue
docs.append(
annotate(
(
Token.STRING_ESCAPE
if is_escaped
else Token.LITERAL_STRING
),
part
)
)
return concat(docs)
def pretty_single_line_str(s, indent, use_quote=None):
prefix = (
annotate(Token.STRING_AFFIX, 'b')
if isinstance(s, bytes)
else ''
)
if use_quote is None:
use_quote = determine_quote_strategy(s)
escaped = escape_str_for_quote(use_quote, s)
escapes_highlighted = highlight_escapes(escaped)
return concat([
prefix,
annotate(
Token.LITERAL_STRING,
concat([
use_quote,
escapes_highlighted,
use_quote
])
)
])
def split_at(idx, sequence):
return (sequence[:idx], sequence[idx:])
def escaped_len(s, use_quote):
return len(escape_str_for_quote(use_quote, s))
def str_to_lines(max_len, use_quote, s, pattern=None):
assert max_len > 0, "max_len must be positive"
if len(s) <= max_len:
if s:
yield s
return
if pattern is None:
if isinstance(s, str):
whitespace_pattern = WHITESPACE_PATTERN_TEXT
nonword_pattern = NONWORD_PATTERN_TEXT
else:
assert isinstance(s, bytes)
whitespace_pattern = WHITESPACE_PATTERN_BYTES
nonword_pattern = NONWORD_PATTERN_BYTES
alternating_words_ws = whitespace_pattern.split(s)
pattern = whitespace_pattern
if len(alternating_words_ws) <= 1:
# no whitespace: try splitting with nonword pattern.
alternating_words_ws = nonword_pattern.split(s)
pattern = nonword_pattern
else:
alternating_words_ws = pattern.split(s)
if isinstance(s, str):
empty = ''
else:
assert isinstance(s, bytes)
empty = b''
starts_with_whitespace = bool(pattern.match(alternating_words_ws[0]))
# List[Tuple[str, bool]]
# The boolean associated with each part indicates if it is a
# whitespce/non-word part or not.
tagged_alternating = iter(
zip(
alternating_words_ws,
cycle([starts_with_whitespace, not starts_with_whitespace])
)
)
next_part = None
next_is_whitespace = None
curr_line_parts = []
curr_line_len = 0
while True:
if not next_part:
try:
next_part, next_is_whitespace = next(tagged_alternating)
except StopIteration:
break
if not next_part:
continue
# We think of the current line as including next_part,
# but as an optimization we don't append to curr_line_parts,
# as we often would have to pop it back out.
next_escaped_len = escaped_len(next_part, use_quote)
curr_line_len += next_escaped_len
if curr_line_len == max_len:
if not next_is_whitespace and len(curr_line_parts) > 1:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
else:
yield empty.join(chain(curr_line_parts, [next_part]))
curr_line_parts = []
curr_line_len = 0
next_part = None
next_is_whitespace = None
elif curr_line_len > max_len:
if not next_is_whitespace and curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
# Leave next_part and next_is_whitespace as is
# to be processed on next iteration
continue
remaining_len = max_len - (curr_line_len - next_escaped_len)
this_line_part, next_line_part = split_at(max(remaining_len, 0), next_part)
if this_line_part:
curr_line_parts.append(this_line_part)
if curr_line_parts:
yield empty.join(curr_line_parts)
curr_line_parts = []
curr_line_len = 0
if next_line_part:
next_part = next_line_part
else:
next_part = None
else:
curr_line_parts.append(next_part)
next_part = None
next_is_whitespace = None
if curr_line_parts:
yield empty.join(curr_line_parts)
@register_pretty(str)
@register_pretty(bytes)
def pretty_str(s, ctx, split_pattern=None):
# Subclasses of str/bytes
# will be printed as StrSubclass('the actual string')
constructor = type(s)
is_native_type = constructor in (str, bytes)
if ctx.depth_left == 0:
return pretty_call_alt(ctx, constructor, args=(..., ))
multiline_strategy = ctx.multiline_strategy
prettyprinter_indent = ctx.indent
def evaluator(indent, column, page_width, ribbon_width):
nonlocal multiline_strategy
columns_left_in_line = page_width - column
columns_left_in_ribbon = indent + ribbon_width - column
available_width = min(columns_left_in_line, columns_left_in_ribbon)
singleline_str_chars = len(s) + len('""')
flat_version = pretty_single_line_str(s, prettyprinter_indent)
if singleline_str_chars <= available_width:
if is_native_type:
return flat_version
return build_fncall(ctx, constructor, argdocs=[flat_version])
# multiline string
each_line_starts_on_col = indent
each_line_ends_on_col = min(page_width, each_line_starts_on_col + ribbon_width)
each_line_max_str_len = max(
each_line_ends_on_col - each_line_starts_on_col - 2,
# If we're printing the string inside a highly nested data
# structure, we may naturally run out of available width.
# In these cases, we need to give some space for printing
# such that we don't get stuck in an infinite loop when
# str_to_lines is called.
8 + len('""')
)
use_quote = determine_quote_strategy(s)
lines = list(str_to_lines(
max_len=each_line_max_str_len,
use_quote=use_quote,
s=s,
pattern=split_pattern,
))
if len(lines) == 1:
return flat_version
parts = intersperse(
HARDLINE,
(
pretty_single_line_str(
line,
indent=prettyprinter_indent,
use_quote=use_quote,
)
for line in lines
)
)
if not is_native_type:
multiline_strategy = MULTILINE_STRATEGY_PLAIN
if multiline_strategy == MULTILINE_STRATEGY_PLAIN:
res = always_break(concat(parts))
if is_native_type:
return res
return build_fncall(ctx, constructor, argdocs=[res])
elif multiline_strategy == MULTILINE_STRATEGY_HANG:
return always_break(
nest(
prettyprinter_indent,
concat(parts)
)
)
else:
if multiline_strategy == MULTILINE_STRATEGY_PARENS:
left_paren, right_paren = LPAREN, RPAREN
else:
assert multiline_strategy == MULTILINE_STRATEGY_INDENTED
left_paren, right_paren = '', ''
return always_break(
concat([
left_paren,
nest(
prettyprinter_indent,
concat([
HARDLINE,
*parts,
])
),
(
HARDLINE
if multiline_strategy == MULTILINE_STRATEGY_PARENS
else NIL
),
right_paren
])
)
return contextual(evaluator)
def _pretty_recursion(value):
return '<Recursion on {} with id={}>'.format(
type(value).__name__,
id(value)
)
def python_to_sdocs(
value,
indent,
width,
depth,
ribbon_width,
max_seq_len,
sort_dict_keys
):
if depth is None:
depth = float('inf')
doc = pretty_python_value(
value,
ctx=PrettyContext(
indent=indent,
depth_left=depth,
visited=set(),
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys
)
)
if is_commented(doc):
doc = group(
flat_choice(
when_flat=concat([
doc,
' ',
commentdoc(doc.annotation.value),
]),
when_broken=concat([
commentdoc(doc.annotation.value),
HARDLINE,
doc
])
)
)
ribbon_frac = min(1.0, ribbon_width / width)
return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)
|
tommikaikkonen/prettyprinter
|
prettyprinter/prettyprinter.py
|
PrettyContext.assoc
|
python
|
def assoc(self, key, value):
return self._replace(user_ctx={
**self.user_ctx,
key: value,
})
|
Return a modified PrettyContext with ``key`` set to ``value``
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/prettyprinter.py#L297-L304
|
[
"def _replace(self, **kwargs):\n passed_keys = set(kwargs.keys())\n fieldnames = type(self).__slots__\n assert passed_keys.issubset(set(fieldnames))\n return PrettyContext(\n **{\n k: (\n kwargs[k]\n if k in passed_keys\n else getattr(self, k)\n )\n for k in fieldnames\n }\n )\n"
] |
class PrettyContext:
"""
An immutable object used to track context during construction of
layout primitives. An instance of PrettyContext is passed to every
pretty printer definition.
As a performance optimization, the ``visited`` set is implemented
as mutable.
"""
__slots__ = (
'indent',
'depth_left',
'visited',
'multiline_strategy',
'max_seq_len',
'sort_dict_keys',
'user_ctx'
)
def __init__(
self,
indent,
depth_left,
visited=None,
multiline_strategy=MULTILINE_STRATEGY_PLAIN,
max_seq_len=1000,
sort_dict_keys=False,
user_ctx=None
):
self.indent = indent
self.depth_left = depth_left
self.multiline_strategy = multiline_strategy
self.max_seq_len = max_seq_len
self.sort_dict_keys = sort_dict_keys
if visited is None:
visited = set()
self.visited = visited
self.user_ctx = user_ctx or {}
def _replace(self, **kwargs):
passed_keys = set(kwargs.keys())
fieldnames = type(self).__slots__
assert passed_keys.issubset(set(fieldnames))
return PrettyContext(
**{
k: (
kwargs[k]
if k in passed_keys
else getattr(self, k)
)
for k in fieldnames
}
)
def use_multiline_strategy(self, strategy):
return self._replace(multiline_strategy=strategy)
def set(self, key, value):
warnings.warn(
"PrettyContext.set will be deprecated in the future in favor of "
"renamed PrettyPrinter.assoc. You can fix this warning by "
"changing .set method calls to .assoc",
PendingDeprecationWarning
)
return self.assoc(key, value)
def get(self, key, default=None):
return self.user_ctx.get(key, default)
def nested_call(self):
return self._replace(depth_left=self.depth_left - 1)
def start_visit(self, value):
self.visited.add(id(value))
def end_visit(self, value):
self.visited.remove(id(value))
def is_visited(self, value):
return id(value) in self.visited
|
tommikaikkonen/prettyprinter
|
prettyprinter/doc.py
|
align
|
python
|
def align(doc):
validate_doc(doc)
def evaluator(indent, column, page_width, ribbon_width):
return Nest(column - indent, doc)
return contextual(evaluator)
|
Aligns each new line in ``doc`` with the first new line.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/doc.py#L57-L64
|
[
"def validate_doc(doc):\n if not isinstance(doc, Doc) and not isinstance(doc, str):\n raise ValueError('Invalid doc: {}'.format(repr(doc)))\n\n return doc\n",
"def contextual(fn):\n \"\"\"Returns a Doc that is lazily evaluated when deciding the layout.\n\n ``fn`` must be a function that accepts four arguments:\n\n - ``indent`` (``int``): the current indentation level, 0 or more\n - ``column`` (``int``) the current output column in the output line\n - ``page_width`` (``int``) the requested page width (character count)\n - ``ribbon_width`` (``int``) the requested ribbon width (character count)\n \"\"\"\n return Contextual(fn)\n"
] |
from .doctypes import ( # noqa
AlwaysBreak,
Concat,
Contextual,
Doc,
FlatChoice,
Fill,
Group,
Nest,
Annotated,
NIL,
LINE,
SOFTLINE,
HARDLINE,
)
from .utils import intersperse # noqa
def validate_doc(doc):
if not isinstance(doc, Doc) and not isinstance(doc, str):
raise ValueError('Invalid doc: {}'.format(repr(doc)))
return doc
def group(doc):
"""Annotates doc with special meaning to the layout algorithm, so that the
document is attempted to output on a single line if it is possible within
the layout constraints. To lay out the doc on a single line, the `when_flat`
branch of ``FlatChoice`` is used."""
return Group(validate_doc(doc))
def concat(docs):
"""Returns a concatenation of the documents in the iterable argument"""
return Concat(map(validate_doc, docs))
def annotate(annotation, doc):
"""Annotates ``doc`` with the arbitrary value ``annotation``"""
return Annotated(doc, annotation)
def contextual(fn):
"""Returns a Doc that is lazily evaluated when deciding the layout.
``fn`` must be a function that accepts four arguments:
- ``indent`` (``int``): the current indentation level, 0 or more
- ``column`` (``int``) the current output column in the output line
- ``page_width`` (``int``) the requested page width (character count)
- ``ribbon_width`` (``int``) the requested ribbon width (character count)
"""
return Contextual(fn)
def hang(i, doc):
return align(
Nest(i, validate_doc(doc))
)
def nest(i, doc):
return Nest(i, validate_doc(doc))
def fill(docs):
return Fill(map(validate_doc, docs))
def always_break(doc):
"""Instructs the layout algorithm that ``doc`` must be
broken to multiple lines. This instruction propagates
to all higher levels in the layout, but nested Docs
may still be laid out flat."""
return AlwaysBreak(validate_doc(doc))
def flat_choice(when_broken, when_flat):
"""Gives the layout algorithm two options. ``when_flat`` Doc will be
used when the document fit onto a single line, and ``when_broken`` is used
when the Doc had to be broken into multiple lines."""
return FlatChoice(
validate_doc(when_broken),
validate_doc(when_flat)
)
|
tommikaikkonen/prettyprinter
|
prettyprinter/layout.py
|
smart_fitting_predicate
|
python
|
def smart_fitting_predicate(
page_width,
ribbon_frac,
min_nesting_level,
max_width,
triplestack
):
chars_left = max_width
while chars_left >= 0:
if not triplestack:
return True
indent, mode, doc = triplestack.pop()
if doc is NIL:
continue
elif isinstance(doc, str):
chars_left -= len(doc)
elif isinstance(doc, Concat):
# Recursive call in Strictly Pretty: docs within Concat
# are processed in order, with keeping the current
# indentation and mode.
# We want the leftmost element at the top of the stack,
# so we append the concatenated documents in reverse order.
triplestack.extend(
(indent, mode, doc)
for doc in reversed(doc.docs)
)
elif isinstance(doc, Annotated):
triplestack.append((indent, mode, doc.doc))
elif isinstance(doc, Fill):
# Same as the Concat case.
triplestack.extend(
(indent, mode, doc)
for doc in reversed(doc.docs)
)
elif isinstance(doc, Nest):
# Nest is a combination of an indent and a doc.
# Increase indentation, then add the doc for processing.
triplestack.append((indent + doc.indent, mode, doc.doc))
elif isinstance(doc, AlwaysBreak):
return False
elif doc is HARDLINE:
# In the fast algorithm, when we see a line,
# we return True. Here, as long as the minimum indentation
# level is satisfied, we continue processing the next line.
# This causes the longer runtime.
if indent > min_nesting_level:
chars_left = page_width - indent
else:
return True
elif isinstance(doc, FlatChoice):
if mode is FLAT_MODE:
triplestack.append((indent, mode, doc.when_flat))
elif mode is BREAK_MODE:
triplestack.append((indent, mode, doc.when_broken))
else:
raise ValueError
elif isinstance(doc, Group):
# Group just changes the mode.
triplestack.append((indent, FLAT_MODE, doc.doc))
elif isinstance(doc, Contextual):
ribbon_width = max(0, min(page_width, round(ribbon_frac * page_width)))
evaluated_doc = doc.fn(
indent=indent,
column=max_width - chars_left,
page_width=page_width,
ribbon_width=ribbon_width,
)
normalized = normalize_doc(evaluated_doc)
triplestack.append((indent, mode, normalized))
elif isinstance(doc, SAnnotationPush):
continue
elif isinstance(doc, SAnnotationPop):
continue
else:
raise ValueError((indent, mode, doc))
return False
|
Lookahead until the last doc at the current indentation level.
Pretty, but not as fast.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/layout.py#L124-L208
|
[
"def normalize_doc(doc):\n if isinstance(doc, str):\n if doc == '':\n return NIL\n return doc\n return doc.normalize()\n"
] |
"""
The layout algorithm here was inspired by the following
papers and libraries:
- Wadler, P. (1998). A prettier printer
https://homepages.inf.ed.ac.uk/wadler/papers/prettier/prettier.pdf
- Lindig, C. (2000) Strictly Pretty
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.2200
- Extensions to the Wadler pretty printer by Daniel Leijen in the
Haskell package 'wl-pprint'
https://hackage.haskell.org/package/wl-pprint-1.2/docs/Text-PrettyPrint-Leijen.html
- The Haskell 'prettyprinter' package, which builds on top of the
'ansi-wl-pprint' package.
https://hackage.haskell.org/package/prettyprinter
- The JavaScript Prettier library
https://github.com/prettier/prettier
"""
from copy import copy
from .doctypes import (
NIL,
HARDLINE,
AlwaysBreak,
Annotated,
Concat,
Contextual,
FlatChoice,
Fill,
Group,
Nest,
normalize_doc,
)
from .sdoctypes import (
SLine,
SAnnotationPop,
SAnnotationPush,
)
BREAK_MODE = 0
FLAT_MODE = 1
def fast_fitting_predicate(
page_width, # Ignored.
ribbon_frac, # Ignored.
min_nesting_level, # Ignored.
max_width,
triplestack
):
"""
One element lookahead. Fast, but not the prettiest.
"""
chars_left = max_width
while chars_left >= 0:
if not triplestack:
return True
indent, mode, doc = triplestack.pop()
if doc is NIL:
continue
elif isinstance(doc, str):
chars_left -= len(doc)
elif isinstance(doc, Concat):
# Recursive call in Strictly Pretty: docs within Concat
# are processed in order, with keeping the current
# indentation and mode.
# We want the leftmost element at the top of the stack,
# so we append the concatenated documents in reverse order.
triplestack.extend(
(indent, mode, doc)
for doc in reversed(doc.docs)
)
elif isinstance(doc, Annotated):
triplestack.append((indent, mode, doc.doc))
elif isinstance(doc, Fill):
triplestack.extend(
(indent, mode, doc)
for doc in reversed(doc.docs)
)
elif isinstance(doc, Nest):
# Nest is a combination of an indent and a doc.
# Increase indentation, then add the doc for processing.
triplestack.append((indent + doc.indent, mode, doc.doc))
elif isinstance(doc, AlwaysBreak):
return False
triplestack.append((indent, BREAK_MODE, doc.doc))
elif doc is HARDLINE:
return True
elif isinstance(doc, FlatChoice):
if mode is FLAT_MODE:
triplestack.append((indent, mode, doc.when_flat))
elif mode is BREAK_MODE:
triplestack.append((indent, mode, doc.when_broken))
else:
raise ValueError
elif isinstance(doc, Group):
# Group just changes the mode.
triplestack.append((indent, FLAT_MODE, doc.doc))
elif isinstance(doc, Contextual):
ribbon_width = max(0, min(page_width, round(ribbon_frac * page_width)))
evaluated_doc = doc.fn(
indent=indent,
column=max_width - chars_left,
page_width=page_width,
ribbon_width=ribbon_width,
)
normalized = normalize_doc(evaluated_doc)
triplestack.append((indent, mode, normalized))
elif isinstance(doc, SAnnotationPush):
continue
elif isinstance(doc, SAnnotationPop):
continue
else:
raise ValueError((indent, mode, doc))
return False
def best_layout(
doc,
width,
ribbon_frac,
fitting_predicate,
outcol=0,
mode=BREAK_MODE
):
normalized = normalize_doc(doc)
ribbon_width = max(0, min(width, round(ribbon_frac * width)))
# The Strictly Pretty paper shows a recursive algorithm.
# This is the stack-and-loop version of it.
triplestack = [(outcol, mode, normalized)]
while triplestack:
indent, mode, doc = triplestack.pop()
if doc is NIL:
# Nothing to do here.
continue
if doc is HARDLINE:
yield SLine(indent)
outcol = indent
elif isinstance(doc, str):
yield doc
outcol += len(doc)
elif isinstance(doc, Concat):
# Add the docs to the stack and process them.
# The first doc in the concatenation must
# end up at the top of the stack, hence the reversing.
triplestack.extend(
(indent, mode, child)
for child in reversed(doc.docs)
)
elif isinstance(doc, Contextual):
evaluated_doc = doc.fn(
indent=indent,
column=outcol,
page_width=width,
ribbon_width=ribbon_width,
)
normalized = normalize_doc(evaluated_doc)
triplestack.append((indent, mode, normalized))
elif isinstance(doc, Annotated):
yield SAnnotationPush(doc.annotation)
# Usually, the triplestack is solely a stack of docs.
# SAnnotationPop is a special case: when we find an annotated doc,
# we output the SAnnotationPush SDoc directly. The equivalent
# SAnnotationPop must be output after all the nested docs have been
# processed. An easy way to do this is to add the SAnnotationPop
# directly to the stack and output it when we see it.
triplestack.append((indent, mode, SAnnotationPop(doc.annotation)))
triplestack.append((indent, mode, doc.doc))
elif isinstance(doc, FlatChoice):
if mode is BREAK_MODE:
triplestack.append((indent, mode, doc.when_broken))
elif mode is FLAT_MODE:
triplestack.append((indent, mode, doc.when_flat))
else:
raise ValueError
elif isinstance(doc, Nest):
# Increase indentation and process the nested doc.
triplestack.append((indent + doc.indent, mode, doc.doc))
elif isinstance(doc, Group):
new_triplestack = copy(triplestack)
# The line will consist of the Grouped doc, as well as the rest
# of the docs in the stack.
new_triplestack.append((indent, FLAT_MODE, doc.doc))
min_nesting_level = min(outcol, indent)
columns_left_in_line = width - outcol
columns_left_in_ribbon = indent + ribbon_width - outcol
available_width = min(columns_left_in_line, columns_left_in_ribbon)
if fitting_predicate(
page_width=width,
ribbon_frac=ribbon_frac,
min_nesting_level=min_nesting_level,
max_width=available_width,
triplestack=new_triplestack
):
# This group will fit on a single line. Continue processing
# the grouped doc in flat mode.
triplestack.append((indent, FLAT_MODE, doc.doc))
else:
triplestack.append((indent, BREAK_MODE, doc.doc))
elif isinstance(doc, Fill):
# docs must be alternating whitespace
docs = doc.docs
if not docs:
continue
first_doc = docs[0]
flat_content_triple = (indent, FLAT_MODE, first_doc)
broken_content_triple = (indent, BREAK_MODE, first_doc)
# this is just copy pasted from the group case...
min_nesting_level = min(outcol, indent)
columns_left_in_line = width - outcol
columns_left_in_ribbon = indent + ribbon_width - outcol
available_width = min(columns_left_in_line, columns_left_in_ribbon)
does_fit = fast_fitting_predicate(
page_width=width,
ribbon_frac=ribbon_frac,
min_nesting_level=min_nesting_level,
max_width=available_width,
triplestack=[flat_content_triple]
)
if len(docs) == 1:
if does_fit:
triplestack.append(flat_content_triple)
else:
triplestack.append(broken_content_triple)
continue
whitespace = docs[1]
flat_whitespace_triple = (indent, FLAT_MODE, whitespace)
broken_whitespace_triple = (indent, BREAK_MODE, whitespace)
if len(docs) == 2:
if does_fit:
triplestack.append(flat_whitespace_triple)
triplestack.append(flat_content_triple)
else:
triplestack.append(broken_whitespace_triple)
triplestack.append(broken_content_triple)
continue
remaining = docs[2:]
remaining_triple = (indent, mode, Fill(remaining))
fst_and_snd_content_flat_triple = (indent, FLAT_MODE, Concat(docs[:2]))
fst_and_snd_content_does_fit = fast_fitting_predicate(
page_width=width,
ribbon_frac=ribbon_frac,
min_nesting_level=min_nesting_level,
max_width=available_width,
triplestack=[fst_and_snd_content_flat_triple]
)
if fst_and_snd_content_does_fit:
triplestack.append(remaining_triple)
triplestack.append(flat_whitespace_triple)
triplestack.append(flat_content_triple)
elif does_fit:
triplestack.append(remaining_triple)
triplestack.append(broken_whitespace_triple)
triplestack.append(flat_content_triple)
else:
triplestack.append(remaining_triple)
triplestack.append(broken_whitespace_triple)
triplestack.append(broken_content_triple)
elif isinstance(doc, AlwaysBreak):
triplestack.append((indent, BREAK_MODE, doc.doc))
elif isinstance(doc, SAnnotationPop):
yield doc
else:
raise ValueError((indent, mode, doc))
def layout_smart(doc, width=79, ribbon_frac=0.9):
return best_layout(
doc,
width,
ribbon_frac,
fitting_predicate=smart_fitting_predicate,
)
def layout_fast(doc, width=79, ribbon_frac=0.9):
return best_layout(
doc,
width,
ribbon_frac,
fitting_predicate=fast_fitting_predicate,
)
|
tommikaikkonen/prettyprinter
|
prettyprinter/color.py
|
set_default_style
|
python
|
def set_default_style(style):
global default_style
if style == 'dark':
style = default_dark_style
elif style == 'light':
style = default_light_style
if not issubclass(style, Style):
raise TypeError(
"style must be a subclass of pygments.styles.Style or "
"one of 'dark', 'light'. Got {}".format(repr(style))
)
default_style = style
|
Sets default global style to be used by ``prettyprinter.cpprint``.
:param style: the style to set, either subclass of
``pygments.styles.Style`` or one of ``'dark'``, ``'light'``
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/color.py#L134-L151
| null |
import os
import colorful
from pygments import token, styles
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
from .sdoctypes import (
SLine,
SAnnotationPush,
SAnnotationPop,
)
from .syntax import Token
from .render import as_lines
from .utils import rfind_idx
_SYNTAX_TOKEN_TO_PYGMENTS_TOKEN = {
Token.KEYWORD_CONSTANT: token.Keyword.Constant,
Token.NAME_BUILTIN: token.Name.Builtin,
Token.NAME_ENTITY: token.Name.Entity,
Token.NAME_FUNCTION: token.Name.Function,
Token.NAME_VARIABLE: token.Name.Variable,
Token.LITERAL_STRING: token.String,
Token.STRING_AFFIX: token.String.Affix,
Token.STRING_ESCAPE: token.String.Escape,
Token.NUMBER_INT: token.Number,
Token.NUMBER_BINARY: token.Number.Bin,
Token.NUMBER_INT: token.Number.Integer,
Token.NUMBER_FLOAT: token.Number.Float,
Token.OPERATOR: token.Operator,
Token.PUNCTUATION: token.Punctuation,
Token.COMMENT_SINGLE: token.Comment.Single,
}
# From https://github.com/primer/github-syntax-theme-generator/blob/master/lib/themes/light.json # noqa
# GitHub has MIT licenesed the theme, see
# https://github.com/primer/github-syntax-theme-generator/blob/master/LICENSE
class GitHubLightStyle(Style):
background_color = "#ffffff" # done
highlight_color = "#fafbfc" # done
styles = {
# No corresponding class for the following:
Text: "#24292e",
Whitespace: "",
Error: "bold #b31d28",
Other: "",
Comment: "#6a737d", # done
Comment.Multiline: "",
Comment.Preproc: "",
Comment.Single: "",
Comment.Special: "",
Keyword: "#d73a49", # class: 'k'
Keyword.Constant: "#005cc5", # done
Keyword.Declaration: "#d73a49",
Keyword.Namespace: "#d73a49",
Keyword.Pseudo: "",
Keyword.Reserved: "",
Keyword.Type: "",
Operator: "#d73a49", # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: "", # class: 'p'
Name: "#6f42c1", # class: 'n'
Name.Attribute: "#24292e", # class: 'na' - to be revised
Name.Builtin: "#005cc5", # class: 'nb'
Name.Builtin.Pseudo: "#005cc5", # class: 'bp'
Name.Class: "#6f42c1", # class: 'nc' - to be revised
Name.Constant: "#005cc5", # class: 'no' - to be revised
Name.Decorator: "#6f42c1", # done
Name.Entity: "#6f42c1", # done
Name.Exception: "#005cc5", # done
Name.Function: "#6f42c1", # done
Name.Function.Magic: "#005cc5", # done
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: "", # class: 'nn' - to be revised
Name.Other: "#005cc5", # class: 'nx'
Name.Tag: "#22863a", # done
Name.Variable: "#e36209", # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: "#005cc5", # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: "#005cc5", # class: 'l'
Literal.Date: "#005cc5", # class: 'ld'
String: "#032f62", # done
String.Backtick: "", # class: 'sb'
String.Char: "", # class: 'sc'
String.Doc: "", # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: "#22863a", # done
String.Heredoc: "", # class: 'sh'
String.Interpol: "#005cc5", # done
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: "#f92672", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
Generic.Inserted: "#22863a bg: #f0fff4", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold #005cc5", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
default_dark_style = styles.get_style_by_name('monokai')
default_light_style = GitHubLightStyle
is_light_bg = bool(os.environ.get('PYPRETTYPRINTER_LIGHT_BACKGROUND', False))
default_style = default_light_style if is_light_bg else default_dark_style
def styleattrs_to_colorful(attrs):
c = colorful.reset
if attrs['color'] or attrs['bgcolor']:
# Colorful doesn't have a way to directly set Hex/RGB
# colors- until I find a better way, we do it like this :)
accessor = ''
if attrs['color']:
colorful.update_palette({'prettyprinterCurrFg': attrs['color']})
accessor = 'prettyprinterCurrFg'
if attrs['bgcolor']:
colorful.update_palette({'prettyprinterCurrBg': attrs['bgcolor']})
accessor += '_on_prettyprinterCurrBg'
c &= getattr(colorful, accessor)
if attrs['bold']:
c &= colorful.bold
if attrs['italic']:
c &= colorful.italic
if attrs['underline']:
c &= colorful.underline
return c
def colored_render_to_stream(
stream,
sdocs,
style,
newline='\n',
separator=' '
):
if style is None:
style = default_style
evald = list(sdocs)
if not evald:
return
color_cache = {}
colorstack = []
sdoc_lines = as_lines(evald)
for sdoc_line in sdoc_lines:
last_text_sdoc_idx = rfind_idx(
lambda sdoc: isinstance(sdoc, str),
sdoc_line
)
# Edge case: trailing whitespace on a line.
# Currently happens on multiline str value in a dict:
# there's a trailing whitespace after the colon that's
# hard to eliminate at the doc level.
if last_text_sdoc_idx != -1:
last_text_sdoc = sdoc_line[last_text_sdoc_idx]
sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()
for sdoc in sdoc_line:
if isinstance(sdoc, str):
stream.write(sdoc)
elif isinstance(sdoc, SLine):
stream.write(newline + separator * sdoc.indent)
elif isinstance(sdoc, SAnnotationPush):
if isinstance(sdoc.value, Token):
try:
color = color_cache[sdoc.value]
except KeyError:
pygments_token = _SYNTAX_TOKEN_TO_PYGMENTS_TOKEN[
sdoc.value
]
tokenattrs = style.style_for_token(pygments_token)
color = styleattrs_to_colorful(tokenattrs)
color_cache[sdoc.value] = color
colorstack.append(color)
stream.write(str(color))
elif isinstance(sdoc, SAnnotationPop):
try:
colorstack.pop()
except IndexError:
continue
if colorstack:
stream.write(str(colorstack[-1]))
else:
stream.write(str(colorful.reset))
if colorstack:
stream.write(str(colorful.reset))
|
tommikaikkonen/prettyprinter
|
prettyprinter/extras/ipython_repr_pretty.py
|
CompatRepresentationPrinter.indent
|
python
|
def indent(self, indent):
curr_docparts = self._docparts
self._docparts = []
self.indentation += indent
try:
yield
finally:
self.indentation -= indent
indented_docparts = self._docparts
self._docparts = curr_docparts
self._docparts.append(nest(indent, concat(indented_docparts)))
|
with statement support for indenting/dedenting.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/extras/ipython_repr_pretty.py#L81-L92
|
[
"def concat(docs):\n \"\"\"Returns a concatenation of the documents in the iterable argument\"\"\"\n return Concat(map(validate_doc, docs))\n",
"def nest(i, doc):\n return Nest(i, validate_doc(doc))\n"
] |
class CompatRepresentationPrinter(OriginalRepresentationPrinter):
def __init__(self, *args, **kwargs):
self._prettyprinter_ctx = kwargs.pop('prettyprinter_ctx')
super().__init__(*args, **kwargs)
# self.output should be assigned by the superclass
assert isinstance(self.output, NoopStream)
self._pending_wrapper = identity
self._docparts = []
def text(self, obj):
super().text(obj)
self._docparts.append(obj)
def breakable(self, sep=' '):
super().breakable(sep)
self._docparts.append(
flat_choice(when_flat=sep, when_broken=HARDLINE)
)
def begin_group(self, indent=0, open=''):
super().begin_group(indent, open)
def wrapper(doc):
if indent:
doc = nest(indent, doc)
return group(doc)
self._pending_wrapper = compose(wrapper, self._pending_wrapper)
def end_group(self, dedent=0, close=''):
super().end_group(dedent, close)
# dedent is ignored; it is not supported to
# have different indentation when starting and
# ending the group.
doc = self._pending_wrapper(concat(self._docparts))
self._docparts = [doc]
self._pending_wrapper = identity
@contextmanager
def pretty(self, obj):
self._docparts.append(
pretty_python_value(obj, self._prettyprinter_ctx)
)
|
tommikaikkonen/prettyprinter
|
prettyprinter/utils.py
|
intersperse
|
python
|
def intersperse(x, ys):
it = iter(ys)
try:
y = next(it)
except StopIteration:
return
yield y
for y in it:
yield x
yield y
|
Returns an iterable where ``x`` is inserted between
each element of ``ys``
:type ys: Iterable
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/utils.py#L5-L23
| null |
from itertools import islice
import shutil
def find(predicate, iterable, default=None):
filtered = iter((x for x in iterable if predicate(x)))
return next(filtered, default)
def rfind_idx(predicate, seq):
length = len(seq)
for i, el in enumerate(reversed(seq)):
if predicate(el):
return length - i - 1
return -1
def identity(x):
return x
def get_terminal_width(default=79):
return shutil.get_terminal_size((default, None)).columns
def take(n, iterable):
return islice(iterable, n)
def compose(f, g):
if f is identity:
return g
if g is identity:
return f
def composed(x):
return f(g(x))
composed.__name__ = 'composed_{}_then_{}'.format(
g.__name__,
f.__name__
)
return composed
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
pformat
|
python
|
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
|
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L110-L139
|
[
"def python_to_sdocs(\n value,\n indent,\n width,\n depth,\n ribbon_width,\n max_seq_len,\n sort_dict_keys\n):\n if depth is None:\n depth = float('inf')\n\n doc = pretty_python_value(\n value,\n ctx=PrettyContext(\n indent=indent,\n depth_left=depth,\n visited=set(),\n max_seq_len=max_seq_len,\n sort_dict_keys=sort_dict_keys\n )\n )\n\n if is_commented(doc):\n doc = group(\n flat_choice(\n when_flat=concat([\n doc,\n ' ',\n commentdoc(doc.annotation.value),\n ]),\n when_broken=concat([\n commentdoc(doc.annotation.value),\n HARDLINE,\n doc\n ])\n )\n )\n\n ribbon_frac = min(1.0, ribbon_width / width)\n\n return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)\n",
"def default_render_to_stream(stream, sdocs, newline='\\n', separator=' '):\n evald = list(sdocs)\n\n if not evald:\n return\n\n for sdoc_line in as_lines(evald):\n last_text_sdoc_idx = rfind_idx(\n lambda sdoc: isinstance(sdoc, str),\n sdoc_line\n )\n\n # Edge case: trailing whitespace on a line.\n # Currently happens on multiline str value in a dict:\n # there's a trailing whitespace after the colon that's\n # hard to eliminate at the doc level.\n if last_text_sdoc_idx != -1:\n last_text_sdoc = sdoc_line[last_text_sdoc_idx]\n sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()\n\n for sdoc in sdoc_line:\n if isinstance(sdoc, str):\n stream.write(sdoc)\n elif isinstance(sdoc, SLine):\n stream.write(newline + separator * sdoc.indent)\n",
"def _merge_defaults(\n *, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys\n):\n kwargs = locals()\n return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default\n for key, default in _default_config.items()}\n"
] |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
"""
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
def pretty_repr(instance):
"""
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
"""
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
pprint
|
python
|
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
|
Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L142-L195
|
[
"def python_to_sdocs(\n value,\n indent,\n width,\n depth,\n ribbon_width,\n max_seq_len,\n sort_dict_keys\n):\n if depth is None:\n depth = float('inf')\n\n doc = pretty_python_value(\n value,\n ctx=PrettyContext(\n indent=indent,\n depth_left=depth,\n visited=set(),\n max_seq_len=max_seq_len,\n sort_dict_keys=sort_dict_keys\n )\n )\n\n if is_commented(doc):\n doc = group(\n flat_choice(\n when_flat=concat([\n doc,\n ' ',\n commentdoc(doc.annotation.value),\n ]),\n when_broken=concat([\n commentdoc(doc.annotation.value),\n HARDLINE,\n doc\n ])\n )\n )\n\n ribbon_frac = min(1.0, ribbon_width / width)\n\n return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)\n",
"def default_render_to_stream(stream, sdocs, newline='\\n', separator=' '):\n evald = list(sdocs)\n\n if not evald:\n return\n\n for sdoc_line in as_lines(evald):\n last_text_sdoc_idx = rfind_idx(\n lambda sdoc: isinstance(sdoc, str),\n sdoc_line\n )\n\n # Edge case: trailing whitespace on a line.\n # Currently happens on multiline str value in a dict:\n # there's a trailing whitespace after the colon that's\n # hard to eliminate at the doc level.\n if last_text_sdoc_idx != -1:\n last_text_sdoc = sdoc_line[last_text_sdoc_idx]\n sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()\n\n for sdoc in sdoc_line:\n if isinstance(sdoc, str):\n stream.write(sdoc)\n elif isinstance(sdoc, SLine):\n stream.write(newline + separator * sdoc.indent)\n",
"def _merge_defaults(\n *, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys\n):\n kwargs = locals()\n return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default\n for key, default in _default_config.items()}\n"
] |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
"""
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
def pretty_repr(instance):
"""
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
"""
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
cpprint
|
python
|
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
|
Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L198-L257
|
[
"def colored_render_to_stream(\n stream,\n sdocs,\n style,\n newline='\\n',\n separator=' '\n):\n if style is None:\n style = default_style\n\n evald = list(sdocs)\n\n if not evald:\n return\n\n color_cache = {}\n\n colorstack = []\n\n sdoc_lines = as_lines(evald)\n\n for sdoc_line in sdoc_lines:\n last_text_sdoc_idx = rfind_idx(\n lambda sdoc: isinstance(sdoc, str),\n sdoc_line\n )\n\n # Edge case: trailing whitespace on a line.\n # Currently happens on multiline str value in a dict:\n # there's a trailing whitespace after the colon that's\n # hard to eliminate at the doc level.\n if last_text_sdoc_idx != -1:\n last_text_sdoc = sdoc_line[last_text_sdoc_idx]\n sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()\n\n for sdoc in sdoc_line:\n if isinstance(sdoc, str):\n stream.write(sdoc)\n elif isinstance(sdoc, SLine):\n stream.write(newline + separator * sdoc.indent)\n elif isinstance(sdoc, SAnnotationPush):\n if isinstance(sdoc.value, Token):\n try:\n color = color_cache[sdoc.value]\n except KeyError:\n pygments_token = _SYNTAX_TOKEN_TO_PYGMENTS_TOKEN[\n sdoc.value\n ]\n tokenattrs = style.style_for_token(pygments_token)\n color = styleattrs_to_colorful(tokenattrs)\n color_cache[sdoc.value] = color\n\n colorstack.append(color)\n stream.write(str(color))\n\n elif isinstance(sdoc, SAnnotationPop):\n try:\n colorstack.pop()\n except IndexError:\n continue\n\n if colorstack:\n stream.write(str(colorstack[-1]))\n else:\n stream.write(str(colorful.reset))\n\n if colorstack:\n stream.write(str(colorful.reset))\n",
"def python_to_sdocs(\n value,\n indent,\n width,\n depth,\n ribbon_width,\n max_seq_len,\n sort_dict_keys\n):\n if depth is None:\n depth = float('inf')\n\n doc = pretty_python_value(\n value,\n ctx=PrettyContext(\n indent=indent,\n depth_left=depth,\n visited=set(),\n max_seq_len=max_seq_len,\n sort_dict_keys=sort_dict_keys\n )\n )\n\n if is_commented(doc):\n doc = group(\n flat_choice(\n when_flat=concat([\n doc,\n ' ',\n commentdoc(doc.annotation.value),\n ]),\n when_broken=concat([\n commentdoc(doc.annotation.value),\n HARDLINE,\n doc\n ])\n )\n )\n\n ribbon_frac = min(1.0, ribbon_width / width)\n\n return layout_smart(doc, width=width, ribbon_frac=ribbon_frac)\n",
"def _merge_defaults(\n *, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys\n):\n kwargs = locals()\n return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default\n for key, default in _default_config.items()}\n"
] |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
"""
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
def pretty_repr(instance):
"""
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
"""
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
install_extras
|
python
|
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
|
Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L273-L341
| null |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
"""
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
def pretty_repr(instance):
"""
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
"""
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
set_default_config
|
python
|
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
|
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L344-L382
|
[
"def set_default_style(style):\n \"\"\"Sets default global style to be used by ``prettyprinter.cpprint``.\n\n :param style: the style to set, either subclass of\n ``pygments.styles.Style`` or one of ``'dark'``, ``'light'``\n \"\"\"\n global default_style\n if style == 'dark':\n style = default_dark_style\n elif style == 'light':\n style = default_light_style\n\n if not issubclass(style, Style):\n raise TypeError(\n \"style must be a subclass of pygments.styles.Style or \"\n \"one of 'dark', 'light'. Got {}\".format(repr(style))\n )\n default_style = style\n"
] |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
def pretty_repr(instance):
"""
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
"""
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
tommikaikkonen/prettyprinter
|
prettyprinter/__init__.py
|
pretty_repr
|
python
|
def pretty_repr(instance):
instance_type = type(instance)
if not is_registered(
instance_type,
check_superclasses=True,
check_deferred=True,
register_deferred=True
):
warnings.warn(
"pretty_repr is assigned as the __repr__ method of "
"'{}'. However, no pretty printer is registered for that type, "
"its superclasses or its subclasses. Falling back to the default "
"repr implementation. To fix this warning, register a pretty "
"printer using prettyprinter.register_pretty.".format(
instance_type.__qualname__
),
UserWarning
)
return object.__repr__(instance)
return pformat(instance)
|
A function assignable to the ``__repr__`` dunder method, so that
the ``prettyprinter`` definition for the type is used to provide
repr output. Usage:
.. code:: python
from prettyprinter import pretty_repr
class MyClass:
__repr__ = pretty_repr
|
train
|
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L385-L419
|
[
"def is_registered(\n type,\n *,\n check_superclasses=False,\n check_deferred=True,\n register_deferred=True\n):\n if not check_deferred and register_deferred:\n raise ValueError(\n 'register_deferred may not be True when check_deferred is False'\n )\n\n if type in pretty_dispatch.registry:\n return True\n\n if check_deferred:\n # Check deferred printers for the type exactly.\n deferred_key = get_deferred_key(type)\n if deferred_key in _DEFERRED_DISPATCH_BY_NAME:\n if register_deferred:\n deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(\n deferred_key\n )\n register_pretty(type)(deferred_dispatch)\n return True\n\n if not check_superclasses:\n return False\n\n if check_deferred:\n # Check deferred printers for supertypes.\n for supertype in type.__mro__[1:]:\n deferred_key = get_deferred_key(supertype)\n if deferred_key in _DEFERRED_DISPATCH_BY_NAME:\n if register_deferred:\n deferred_dispatch = _DEFERRED_DISPATCH_BY_NAME.pop(\n deferred_key\n )\n register_pretty(supertype)(deferred_dispatch)\n return True\n return pretty_dispatch.dispatch(type) is not _BASE_DISPATCH\n",
"def pformat(\n object,\n indent=_UNSET_SENTINEL,\n width=_UNSET_SENTINEL,\n depth=_UNSET_SENTINEL,\n *,\n ribbon_width=_UNSET_SENTINEL,\n max_seq_len=_UNSET_SENTINEL,\n compact=_UNSET_SENTINEL,\n sort_dict_keys=_UNSET_SENTINEL\n):\n \"\"\"\n Returns a pretty printed representation of the object as a ``str``.\n Accepts the same parameters as :func:`~prettyprinter.pprint`.\n The output is not colored.\n \"\"\"\n sdocs = python_to_sdocs(\n object,\n **_merge_defaults(\n indent=indent,\n width=width,\n depth=depth,\n ribbon_width=ribbon_width,\n max_seq_len=max_seq_len,\n sort_dict_keys=sort_dict_keys,\n )\n )\n stream = StringIO()\n default_render_to_stream(stream, sdocs)\n return stream.getvalue()\n"
] |
# -*- coding: utf-8 -*-
"""Top-level package for prettyprinter."""
__author__ = """Tommi Kaikkonen"""
__email__ = 'kaikkonentommi@gmail.com'
__version__ = '0.17.0'
from io import StringIO
from importlib import import_module
from types import MappingProxyType
import sys
import warnings
from pprint import isrecursive, isreadable, saferepr
from .color import colored_render_to_stream, set_default_style
from .prettyprinter import (
is_registered,
python_to_sdocs,
register_pretty,
pretty_call,
pretty_call_alt,
comment,
trailing_comment,
)
from .render import default_render_to_stream
# Registers standard library types
# as a side effect
import prettyprinter.pretty_stdlib # noqa
__all__ = [
'cpprint',
'pprint',
'pformat',
'pretty_repr',
'install_extras',
'set_default_style',
'set_default_config',
'get_default_config',
'register_pretty',
'pretty_call',
'pretty_call_alt',
'trailing_comment',
'comment',
'python_to_sdocs',
'default_render_to_stream',
'PrettyPrinter',
'saferepr',
'isreadable',
'isrecursive',
]
class UnsetSentinel:
def __repr__(self):
return 'UNSET'
__str__ = __repr__
_UNSET_SENTINEL = UnsetSentinel()
_default_config = {
'indent': 4,
'width': 79,
'ribbon_width': 71,
'depth': None,
'max_seq_len': 1000,
'sort_dict_keys': False,
}
def _merge_defaults(
*, indent, width, depth, ribbon_width, max_seq_len, sort_dict_keys
):
kwargs = locals()
return {key: kwargs[key] if kwargs[key] is not _UNSET_SENTINEL else default
for key, default in _default_config.items()}
def get_default_config():
"""Returns a read-only view of the current configuration"""
return MappingProxyType(_default_config)
class PrettyPrinter:
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def pprint(self, object):
pprint(*self._args, **self._kwargs)
def pformat(self, object):
return pformat(*self._args, **self._kwargs)
def isrecursive(self, object):
return isrecursive(object)
def isreadable(self, object):
return isreadable(object)
def format(self, object):
raise NotImplementedError
def pformat(
object,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
compact=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Returns a pretty printed representation of the object as a ``str``.
Accepts the same parameters as :func:`~prettyprinter.pprint`.
The output is not colored.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = StringIO()
default_render_to_stream(stream, sdocs)
return stream.getvalue()
def pprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to ``sys.stdout``. The output will not be colored.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to ``sys.stdout``
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
default_render_to_stream(stream, sdocs)
if end:
stream.write(end)
def cpprint(
object,
stream=_UNSET_SENTINEL,
indent=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
*,
compact=False,
ribbon_width=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL,
style=None,
end='\n'
):
"""Pretty print a Python value ``object`` to ``stream``,
which defaults to sys.stdout. The output will be colored and
syntax highlighted.
:param indent: number of spaces to add for each level of nesting.
:param stream: the output stream, defaults to sys.stdout
:param width: a soft maximum allowed number of columns in the output,
which the layout algorithm attempts to stay under.
:param depth: maximum depth to print nested structures
:param ribbon_width: a soft maximum allowed number of columns in the output,
after indenting the line
:param max_seq_len: a maximum sequence length that applies to subclasses of
lists, sets, frozensets, tuples and dicts. A trailing
comment that indicates the number of truncated elements.
Setting max_seq_len to ``None`` disables truncation.
:param sort_dict_keys: a ``bool`` value indicating if dict keys should be
sorted in the output. Defaults to ``False``, in
which case the default order is used, which is the
insertion order in CPython 3.6+.
:param style: one of ``'light'``, ``'dark'`` or a subclass
of ``pygments.styles.Style``. If omitted,
will use the default style. If the default style
is not changed by the user with :func:`~prettyprinter.set_default_style`,
the default is ``'dark'``.
"""
sdocs = python_to_sdocs(
object,
**_merge_defaults(
indent=indent,
width=width,
depth=depth,
ribbon_width=ribbon_width,
max_seq_len=max_seq_len,
sort_dict_keys=sort_dict_keys,
)
)
stream = (
# This is not in _default_config in case
# sys.stdout changes.
sys.stdout
if stream is _UNSET_SENTINEL
else stream
)
colored_render_to_stream(stream, sdocs, style=style)
if end:
stream.write(end)
ALL_EXTRAS = frozenset([
'attrs',
'django',
'ipython',
'ipython_repr_pretty',
'numpy',
'python',
'requests',
'dataclasses',
])
EMPTY_SET = frozenset()
def install_extras(
include=ALL_EXTRAS,
*,
exclude=EMPTY_SET,
raise_on_error=False,
warn_on_error=True
):
"""Installs extras.
Installing an extra means registering pretty printers for objects from third
party libraries and/or enabling integrations with other python programs.
- ``'attrs'`` - automatically pretty prints classes created using the ``attrs`` package.
- ``'dataclasses'`` - automatically pretty prints classes created using the ``dataclasses``
module.
- ``'django'`` - automatically pretty prints Model and QuerySet subclasses defined in your
Django apps.
- ``numpy`` - automatically pretty prints numpy scalars with explicit types, and,
for numpy>=1.14, numpy arrays.
- ``'requests'`` - automatically pretty prints Requests, Responses, Sessions, etc.
- ``'ipython'`` - makes prettyprinter the default printer in the IPython shell.
- ``'python'`` - makes prettyprinter the default printer in the default Python shell.
- ``'ipython_repr_pretty'`` - automatically prints objects that define a ``_repr_pretty_``
method to integrate with `IPython.lib.pretty
<http://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending>`_.
:param include: an iterable of strs representing the extras to include.
All extras are included by default.
:param exclude: an iterable of strs representing the extras to exclude.
""" # noqa
include = set(include)
exclude = set(exclude)
unexisting_extras = (include | exclude) - ALL_EXTRAS
if unexisting_extras:
raise ValueError(
"The following extras don't exist: {}".format(
', '.join(unexisting_extras)
)
)
extras_to_install = (ALL_EXTRAS & include) - exclude
for extra in extras_to_install:
module_name = 'prettyprinter.extras.' + extra
try:
extra_module = import_module(module_name)
except ImportError as e:
if raise_on_error:
raise e
if warn_on_error:
warnings.warn(
"Failed to import '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
else:
try:
extra_module.install()
except Exception as exc:
if raise_on_error:
raise exc
elif warn_on_error:
warnings.warn(
"Failed to install '{0}' PrettyPrinter extra. "
"If you don't need it, call install_extras with "
"exclude=['{0}']".format(extra)
)
def set_default_config(
*,
style=_UNSET_SENTINEL,
max_seq_len=_UNSET_SENTINEL,
width=_UNSET_SENTINEL,
ribbon_width=_UNSET_SENTINEL,
depth=_UNSET_SENTINEL,
sort_dict_keys=_UNSET_SENTINEL
):
"""
Sets the default configuration values used when calling
`pprint`, `cpprint`, or `pformat`, if those values weren't
explicitly provided. Only overrides the values provided in
the keyword arguments.
"""
global _default_config
if style is not _UNSET_SENTINEL:
set_default_style(style)
new_defaults = {**_default_config}
if max_seq_len is not _UNSET_SENTINEL:
new_defaults['max_seq_len'] = max_seq_len
if width is not _UNSET_SENTINEL:
new_defaults['width'] = width
if ribbon_width is not _UNSET_SENTINEL:
new_defaults['ribbon_width'] = ribbon_width
if depth is not _UNSET_SENTINEL:
new_defaults['depth'] = depth
if sort_dict_keys is not _UNSET_SENTINEL:
new_defaults['sort_dict_keys'] = sort_dict_keys
_default_config = new_defaults
return new_defaults
|
kstateome/django-cas
|
cas/models.py
|
get_tgt_for
|
python
|
def get_tgt_for(user):
if not settings.CAS_PROXY_CALLBACK:
raise CasConfigException("No proxy callback set in settings")
try:
return Tgt.objects.get(username=user.username)
except ObjectDoesNotExist:
logger.warning('No ticket found for user {user}'.format(
user=user.username
))
raise CasTicketException("no ticket found for user " + user.username)
|
Fetch a ticket granting ticket for a given user.
:param user: UserObj
:return: TGT or Exepction
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/models.py#L77-L94
| null |
import logging
from datetime import datetime
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from cas.exceptions import CasTicketException, CasConfigException
logger = logging.getLogger(__name__)
class Tgt(models.Model):
username = models.CharField(max_length=255, unique=True)
tgt = models.CharField(max_length=255)
def get_proxy_ticket_for(self, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: service
Returns username on success and None on failure.
"""
if not settings.CAS_PROXY_CALLBACK:
raise CasConfigException("No proxy callback set in settings")
params = {'pgt': self.tgt, 'targetService': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxy') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('proxySuccess'):
return tree[0][0].text
else:
logger.warning('Failed to get proxy ticket')
raise CasTicketException('Failed to get proxy ticket: %s' % \
tree[0].text.strip())
finally:
page.close()
class PgtIOU(models.Model):
"""
Proxy granting ticket and IOU
"""
pgtIou = models.CharField(max_length = 255, unique = True)
tgt = models.CharField(max_length = 255)
created = models.DateTimeField(auto_now = True)
def delete_old_tickets(**kwargs):
"""
Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created']
"""
sender = kwargs.get('sender', None)
now = datetime.now()
expire = datetime(now.year, now.month, now.day - 2)
sender.objects.filter(created__lt=expire).delete()
post_save.connect(delete_old_tickets, sender=PgtIOU)
|
kstateome/django-cas
|
cas/models.py
|
delete_old_tickets
|
python
|
def delete_old_tickets(**kwargs):
sender = kwargs.get('sender', None)
now = datetime.now()
expire = datetime(now.year, now.month, now.day - 2)
sender.objects.filter(created__lt=expire).delete()
|
Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created']
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/models.py#L97-L106
| null |
import logging
from datetime import datetime
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from cas.exceptions import CasTicketException, CasConfigException
logger = logging.getLogger(__name__)
class Tgt(models.Model):
username = models.CharField(max_length=255, unique=True)
tgt = models.CharField(max_length=255)
def get_proxy_ticket_for(self, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: service
Returns username on success and None on failure.
"""
if not settings.CAS_PROXY_CALLBACK:
raise CasConfigException("No proxy callback set in settings")
params = {'pgt': self.tgt, 'targetService': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxy') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('proxySuccess'):
return tree[0][0].text
else:
logger.warning('Failed to get proxy ticket')
raise CasTicketException('Failed to get proxy ticket: %s' % \
tree[0].text.strip())
finally:
page.close()
class PgtIOU(models.Model):
"""
Proxy granting ticket and IOU
"""
pgtIou = models.CharField(max_length = 255, unique = True)
tgt = models.CharField(max_length = 255)
created = models.DateTimeField(auto_now = True)
def get_tgt_for(user):
"""
Fetch a ticket granting ticket for a given user.
:param user: UserObj
:return: TGT or Exepction
"""
if not settings.CAS_PROXY_CALLBACK:
raise CasConfigException("No proxy callback set in settings")
try:
return Tgt.objects.get(username=user.username)
except ObjectDoesNotExist:
logger.warning('No ticket found for user {user}'.format(
user=user.username
))
raise CasTicketException("no ticket found for user " + user.username)
post_save.connect(delete_old_tickets, sender=PgtIOU)
|
kstateome/django-cas
|
cas/models.py
|
Tgt.get_proxy_ticket_for
|
python
|
def get_proxy_ticket_for(self, service):
if not settings.CAS_PROXY_CALLBACK:
raise CasConfigException("No proxy callback set in settings")
params = {'pgt': self.tgt, 'targetService': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxy') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('proxySuccess'):
return tree[0][0].text
else:
logger.warning('Failed to get proxy ticket')
raise CasTicketException('Failed to get proxy ticket: %s' % \
tree[0].text.strip())
finally:
page.close()
|
Verifies CAS 2.0+ XML-based authentication ticket.
:param: service
Returns username on success and None on failure.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/models.py#L36-L65
| null |
class Tgt(models.Model):
username = models.CharField(max_length=255, unique=True)
tgt = models.CharField(max_length=255)
|
kstateome/django-cas
|
cas/backends.py
|
_verify_cas1
|
python
|
def _verify_cas1(ticket, service):
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
|
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L36-L58
| null |
import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def _internal_verify_cas(ticket, service, suffix):
"""Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
pgt_el = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_el:
pgt = pgt_el[0].firstChild.nodeValue
try:
pgtIou = _get_pgtiou(pgt)
tgt = Tgt.objects.get(username=username)
tgt.tgt = pgtIou.tgt
tgt.save()
pgtIou.delete()
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgtIou.tgt)
logger.info('Creating TGT ticket for {user}'.format(
user=username
))
pgtIou.delete()
except Exception as e:
logger.warning('Failed to do proxy authentication. {message}'.format(
message=e
))
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
def verify_proxy_ticket(ticket, service):
"""
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
def _get_pgtiou(pgt):
"""
Returns a PgtIOU object given a pgt.
The PgtIOU (tgt) is set by the CAS server in a different request
that has completed before this call, however, it may not be found in
the database by this calling thread, hence the attempt to get the
ticket is retried for up to 5 seconds. This should be handled some
better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt
"""
pgtIou = None
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while not pgtIou and retries_left:
try:
return PgtIOU.objects.get(tgt=pgt)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgtIou for pgt %s" % pgt)
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
kstateome/django-cas
|
cas/backends.py
|
_internal_verify_cas
|
python
|
def _internal_verify_cas(ticket, service, suffix):
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
pgt_el = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_el:
pgt = pgt_el[0].firstChild.nodeValue
try:
pgtIou = _get_pgtiou(pgt)
tgt = Tgt.objects.get(username=username)
tgt.tgt = pgtIou.tgt
tgt.save()
pgtIou.delete()
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgtIou.tgt)
logger.info('Creating TGT ticket for {user}'.format(
user=username
))
pgtIou.delete()
except Exception as e:
logger.warning('Failed to do proxy authentication. {message}'.format(
message=e
))
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
|
Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L75-L138
| null |
import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas1(ticket, service):
"""
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def verify_proxy_ticket(ticket, service):
"""
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
def _get_pgtiou(pgt):
"""
Returns a PgtIOU object given a pgt.
The PgtIOU (tgt) is set by the CAS server in a different request
that has completed before this call, however, it may not be found in
the database by this calling thread, hence the attempt to get the
ticket is retried for up to 5 seconds. This should be handled some
better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt
"""
pgtIou = None
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while not pgtIou and retries_left:
try:
return PgtIOU.objects.get(tgt=pgt)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgtIou for pgt %s" % pgt)
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
kstateome/django-cas
|
cas/backends.py
|
verify_proxy_ticket
|
python
|
def verify_proxy_ticket(ticket, service):
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
|
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L141-L171
| null |
import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas1(ticket, service):
"""
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def _internal_verify_cas(ticket, service, suffix):
"""Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
pgt_el = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_el:
pgt = pgt_el[0].firstChild.nodeValue
try:
pgtIou = _get_pgtiou(pgt)
tgt = Tgt.objects.get(username=username)
tgt.tgt = pgtIou.tgt
tgt.save()
pgtIou.delete()
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgtIou.tgt)
logger.info('Creating TGT ticket for {user}'.format(
user=username
))
pgtIou.delete()
except Exception as e:
logger.warning('Failed to do proxy authentication. {message}'.format(
message=e
))
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
def _get_pgtiou(pgt):
"""
Returns a PgtIOU object given a pgt.
The PgtIOU (tgt) is set by the CAS server in a different request
that has completed before this call, however, it may not be found in
the database by this calling thread, hence the attempt to get the
ticket is retried for up to 5 seconds. This should be handled some
better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt
"""
pgtIou = None
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while not pgtIou and retries_left:
try:
return PgtIOU.objects.get(tgt=pgt)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgtIou for pgt %s" % pgt)
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
kstateome/django-cas
|
cas/backends.py
|
_get_pgtiou
|
python
|
def _get_pgtiou(pgt):
pgtIou = None
retries_left = 5
if not settings.CAS_PGT_FETCH_WAIT:
retries_left = 1
while not pgtIou and retries_left:
try:
return PgtIOU.objects.get(tgt=pgt)
except PgtIOU.DoesNotExist:
if settings.CAS_PGT_FETCH_WAIT:
time.sleep(1)
retries_left -= 1
logger.info('Did not fetch ticket, trying again. {tries} tries left.'.format(
tries=retries_left
))
raise CasTicketException("Could not find pgtIou for pgt %s" % pgt)
|
Returns a PgtIOU object given a pgt.
The PgtIOU (tgt) is set by the CAS server in a different request
that has completed before this call, however, it may not be found in
the database by this calling thread, hence the attempt to get the
ticket is retried for up to 5 seconds. This should be handled some
better way.
Users can opt out of this waiting period by setting CAS_PGT_FETCH_WAIT = False
:param: pgt
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L181-L213
| null |
import logging
from xml.dom import minidom
import time
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import get_user_model
from cas.exceptions import CasTicketException
from cas.models import Tgt, PgtIOU
from cas.utils import cas_response_callbacks
__all__ = ['CASBackend']
logger = logging.getLogger(__name__)
def _verify_cas1(ticket, service):
"""
Verifies CAS 1.0 authentication ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'validate') + '?' +
urlencode(params))
page = urlopen(url)
try:
verified = page.readline().strip()
if verified == 'yes':
return page.readline().strip()
else:
return None
finally:
page.close()
def _verify_cas2(ticket, service):
"""
Verifies CAS 2.0+ XML-based authentication ticket.
:param: ticket
:param: service
"""
return _internal_verify_cas(ticket, service, 'proxyValidate')
def _verify_cas3(ticket, service):
return _internal_verify_cas(ticket, service, 'p3/proxyValidate')
def _internal_verify_cas(ticket, service, suffix):
"""Verifies CAS 2.0 and 3.0 XML-based authentication ticket.
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
if settings.CAS_PROXY_CALLBACK:
params['pgtUrl'] = settings.CAS_PROXY_CALLBACK
url = (urljoin(settings.CAS_SERVER_URL, suffix) + '?' +
urlencode(params))
page = urlopen(url)
username = None
try:
response = page.read()
tree = ElementTree.fromstring(response)
document = minidom.parseString(response)
if tree[0].tag.endswith('authenticationSuccess'):
if settings.CAS_RESPONSE_CALLBACKS:
cas_response_callbacks(tree)
username = tree[0][0].text
pgt_el = document.getElementsByTagName('cas:proxyGrantingTicket')
if pgt_el:
pgt = pgt_el[0].firstChild.nodeValue
try:
pgtIou = _get_pgtiou(pgt)
tgt = Tgt.objects.get(username=username)
tgt.tgt = pgtIou.tgt
tgt.save()
pgtIou.delete()
except Tgt.DoesNotExist:
Tgt.objects.create(username=username, tgt=pgtIou.tgt)
logger.info('Creating TGT ticket for {user}'.format(
user=username
))
pgtIou.delete()
except Exception as e:
logger.warning('Failed to do proxy authentication. {message}'.format(
message=e
))
else:
failure = document.getElementsByTagName('cas:authenticationFailure')
if failure:
logger.warn('Authentication failed from CAS server: %s',
failure[0].firstChild.nodeValue)
except Exception as e:
logger.error('Failed to verify CAS authentication: {message}'.format(
message=e
))
finally:
page.close()
return username
def verify_proxy_ticket(ticket, service):
"""
Verifies CAS 2.0+ XML-based proxy ticket.
:param: ticket
:param: service
Returns username on success and None on failure.
"""
params = {'ticket': ticket, 'service': service}
url = (urljoin(settings.CAS_SERVER_URL, 'proxyValidate') + '?' +
urlencode(params))
page = urlopen(url)
try:
response = page.read()
tree = ElementTree.fromstring(response)
if tree[0].tag.endswith('authenticationSuccess'):
username = tree[0][0].text
proxies = []
if len(tree[0]) > 1:
for element in tree[0][1]:
proxies.append(element.text)
return {"username": username, "proxies": proxies}
else:
return None
finally:
page.close()
_PROTOCOLS = {'1': _verify_cas1, '2': _verify_cas2, '3': _verify_cas3}
if settings.CAS_VERSION not in _PROTOCOLS:
raise ValueError('Unsupported CAS_VERSION %r' % settings.CAS_VERSION)
_verify = _PROTOCOLS[settings.CAS_VERSION]
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def authenticate(self, request, ticket, service):
"""
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
"""
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
kstateome/django-cas
|
cas/backends.py
|
CASBackend.authenticate
|
python
|
def authenticate(self, request, ticket, service):
User = get_user_model()
username = _verify(ticket, service)
if not username:
return None
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
# user will have an "unusable" password
if settings.CAS_AUTO_CREATE_USER:
user = User.objects.create_user(username, '')
user.save()
else:
user = None
return user
|
Verifies CAS ticket and gets or creates User object
NB: Use of PT to identify proxy
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/backends.py#L224-L245
| null |
class CASBackend(object):
"""
CAS authentication backend
"""
supports_object_permissions = False
supports_inactive_user = False
def get_user(self, user_id):
"""
Retrieve the user's entry in the User model if it exists
"""
User = get_user_model()
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
kstateome/django-cas
|
cas/decorators.py
|
gateway
|
python
|
def gateway():
if settings.CAS_GATEWAY == False:
raise ImproperlyConfigured('CAS_GATEWAY must be set to True')
def wrap(func):
def wrapped_f(*args):
from cas.views import login
request = args[0]
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
# Is Authed, fine
pass
else:
path_with_params = request.path + '?' + urlencode(request.GET.copy())
if request.GET.get('ticket'):
# Not Authed, but have a ticket!
# Try to authenticate
response = login(request, path_with_params, False, True)
if isinstance(response, HttpResponseRedirect):
# For certain instances where a forbidden occurs, we need to pass instead of return a response.
return response
else:
#Not Authed, but no ticket
gatewayed = request.GET.get('gatewayed')
if gatewayed == 'true':
pass
else:
# Not Authed, try to authenticate
response = login(request, path_with_params, False, True)
if isinstance(response, HttpResponseRedirect):
return response
return func(*args)
return wrapped_f
return wrap
|
Authenticates single sign on session if ticket is available,
but doesn't redirect to sign in url otherwise.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/decorators.py#L60-L106
| null |
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.utils.http import urlquote
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
__all__ = ['permission_required', 'user_passes_test']
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Replacement for django.contrib.auth.decorators.user_passes_test that
returns 403 Forbidden if the user is already logged in.
"""
if not login_url:
login_url = settings.LOGIN_URL
def decorator(view_func):
@wraps(view_func)
def wrapper(request, *args, **kwargs):
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if test_func(request.user):
return view_func(request, *args, **kwargs)
elif is_authenticated:
return HttpResponseForbidden('<h1>Permission denied</h1>')
else:
path = '%s?%s=%s' % (login_url, redirect_field_name,
urlquote(request.get_full_path()))
return HttpResponseRedirect(path)
return wrapper
return decorator
def permission_required(perm, login_url=None):
"""
Replacement for django.contrib.auth.decorators.permission_required that
returns 403 Forbidden if the user is already logged in.
"""
return user_passes_test(lambda u: u.has_perm(perm), login_url=login_url)
|
kstateome/django-cas
|
cas/views.py
|
_service_url
|
python
|
def _service_url(request, redirect_to=None, gateway=False):
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
|
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L32-L79
| null |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
_redirect_url
|
python
|
def _redirect_url(request):
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
|
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L82-L105
| null |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
_login_url
|
python
|
def _login_url(service, ticket='ST', gateway=False):
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
|
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L108-L133
| null |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
_logout_url
|
python
|
def _logout_url(request, next_page=None):
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
|
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L136-L156
| null |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
login
|
python
|
def login(request, next_page=None, required=False, gateway=False):
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
|
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L159-L222
|
[
"def _redirect_url(request):\n \"\"\"\n Redirects to referring page, or CAS_REDIRECT_URL if no referrer is\n set.\n\n :param: request RequestObj\n\n \"\"\"\n\n next = request.GET.get(REDIRECT_FIELD_NAME)\n\n if not next:\n if settings.CAS_IGNORE_REFERER:\n next = settings.CAS_REDIRECT_URL\n else:\n next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)\n\n host = request.get_host()\n prefix = (('http://', 'https://')[request.is_secure()] + host)\n\n if next.startswith(prefix):\n next = next[len(prefix):]\n\n return next\n",
"def _login_url(service, ticket='ST', gateway=False):\n \"\"\"\n Generates CAS login URL\n\n :param: service Service URL\n :param: ticket Ticket\n :param: gateway Gatewayed\n\n \"\"\"\n\n LOGINS = {'ST': 'login',\n 'PT': 'proxyValidate'}\n if gateway:\n params = {'service': service, 'gateway': 'true'}\n else:\n params = {'service': service}\n\n if settings.CAS_EXTRA_LOGIN_PARAMS:\n params.update(settings.CAS_EXTRA_LOGIN_PARAMS)\n\n if not ticket:\n ticket = 'ST'\n\n login_type = LOGINS.get(ticket[:2], 'login')\n\n return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)\n",
"def _service_url(request, redirect_to=None, gateway=False):\n \"\"\"\n Generates application service URL for CAS\n\n :param: request Request Object\n :param: redirect_to URL to redriect to\n :param: gateway Should this be a gatewayed pass through\n\n \"\"\"\n\n if settings.CAS_FORCE_SSL_SERVICE_URL:\n protocol = 'https://'\n else:\n protocol = ('http://', 'https://')[request.is_secure()]\n host = request.get_host()\n service = protocol + host + request.path\n if redirect_to:\n if '?' in service:\n service += '&'\n else:\n service += '?'\n\n if gateway:\n \"\"\" If gateway, capture params and reencode them before returning a url \"\"\"\n gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]\n query_dict = request.GET.copy()\n\n try:\n del query_dict['ticket']\n except:\n pass\n query_list = query_dict.items()\n\n # remove duplicate params\n for item in query_list:\n for index, item2 in enumerate(gateway_params):\n if item[0] == item2[0]:\n gateway_params.pop(index)\n extra_params = gateway_params + query_list\n\n #Sort params by key name so they are always in the same order.\n sorted_params = sorted(extra_params, key=itemgetter(0))\n\n service += urlencode(sorted_params)\n else:\n service += urlencode({REDIRECT_FIELD_NAME: redirect_to})\n\n return service\n",
"def proxy_callback(request):\n \"\"\"Handles CAS 2.0+ XML-based proxy callback call.\n Stores the proxy granting ticket in the database for\n future use.\n\n NB: Use created and set it in python in case database\n has issues with setting up the default timestamp value\n \"\"\"\n\n pgtIou = request.GET.get('pgtIou')\n tgt = request.GET.get('pgtId')\n\n if not (pgtIou and tgt):\n logger.info('No pgtIou or tgt found in request.GET')\n return HttpResponse('No pgtIOO', content_type=\"text/plain\")\n\n try:\n PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())\n request.session['pgt-TICKET'] = pgtIou\n return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type=\"text/plain\")\n except Exception as e:\n logger.warning('PGT storage failed. {message}'.format(\n message=e\n ))\n return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),\n content_type=\"text/plain\")\n"
] |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
logout
|
python
|
def logout(request, next_page=None):
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
|
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L225-L242
|
[
"def _redirect_url(request):\n \"\"\"\n Redirects to referring page, or CAS_REDIRECT_URL if no referrer is\n set.\n\n :param: request RequestObj\n\n \"\"\"\n\n next = request.GET.get(REDIRECT_FIELD_NAME)\n\n if not next:\n if settings.CAS_IGNORE_REFERER:\n next = settings.CAS_REDIRECT_URL\n else:\n next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)\n\n host = request.get_host()\n prefix = (('http://', 'https://')[request.is_secure()] + host)\n\n if next.startswith(prefix):\n next = next[len(prefix):]\n\n return next\n",
"def _logout_url(request, next_page=None):\n \"\"\"\n Generates CAS logout URL\n\n :param: request RequestObj\n :param: next_page Page to redirect after logout.\n\n \"\"\"\n\n url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')\n\n if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):\n parsed_url = urlparse.urlparse(next_page)\n if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction\n url += '?' + urlencode({'service': next_page})\n else:\n protocol = ('http://', 'https://')[request.is_secure()]\n host = request.get_host()\n url += '?' + urlencode({'service': protocol + host + next_page})\n\n return url\n"
] |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def proxy_callback(request):
"""Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
"""
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
kstateome/django-cas
|
cas/views.py
|
proxy_callback
|
python
|
def proxy_callback(request):
pgtIou = request.GET.get('pgtIou')
tgt = request.GET.get('pgtId')
if not (pgtIou and tgt):
logger.info('No pgtIou or tgt found in request.GET')
return HttpResponse('No pgtIOO', content_type="text/plain")
try:
PgtIOU.objects.create(tgt=tgt, pgtIou=pgtIou, created=datetime.datetime.now())
request.session['pgt-TICKET'] = pgtIou
return HttpResponse('PGT ticket is: {ticket}'.format(ticket=pgtIou), content_type="text/plain")
except Exception as e:
logger.warning('PGT storage failed. {message}'.format(
message=e
))
return HttpResponse('PGT storage failed for {request}'.format(request=str(request.GET)),
content_type="text/plain")
|
Handles CAS 2.0+ XML-based proxy callback call.
Stores the proxy granting ticket in the database for
future use.
NB: Use created and set it in python in case database
has issues with setting up the default timestamp value
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/views.py#L245-L270
| null |
import logging
import datetime
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from operator import itemgetter
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib import auth
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from cas.models import PgtIOU
__all__ = ['login', 'logout']
logger = logging.getLogger(__name__)
def _service_url(request, redirect_to=None, gateway=False):
"""
Generates application service URL for CAS
:param: request Request Object
:param: redirect_to URL to redriect to
:param: gateway Should this be a gatewayed pass through
"""
if settings.CAS_FORCE_SSL_SERVICE_URL:
protocol = 'https://'
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
if gateway:
""" If gateway, capture params and reencode them before returning a url """
gateway_params = [(REDIRECT_FIELD_NAME, redirect_to), ('gatewayed', 'true')]
query_dict = request.GET.copy()
try:
del query_dict['ticket']
except:
pass
query_list = query_dict.items()
# remove duplicate params
for item in query_list:
for index, item2 in enumerate(gateway_params):
if item[0] == item2[0]:
gateway_params.pop(index)
extra_params = gateway_params + query_list
#Sort params by key name so they are always in the same order.
sorted_params = sorted(extra_params, key=itemgetter(0))
service += urlencode(sorted_params)
else:
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""
Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
:param: request RequestObj
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
host = request.get_host()
prefix = (('http://', 'https://')[request.is_secure()] + host)
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service, ticket='ST', gateway=False):
"""
Generates CAS login URL
:param: service Service URL
:param: ticket Ticket
:param: gateway Gatewayed
"""
LOGINS = {'ST': 'login',
'PT': 'proxyValidate'}
if gateway:
params = {'service': service, 'gateway': 'true'}
else:
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
if not ticket:
ticket = 'ST'
login_type = LOGINS.get(ticket[:2], 'login')
return urlparse.urljoin(settings.CAS_SERVER_URL, login_type) + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""
Generates CAS logout URL
:param: request RequestObj
:param: next_page Page to redirect after logout.
"""
url = urlparse.urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page and getattr(settings, 'CAS_PROVIDE_URL_TO_LOGOUT', True):
parsed_url = urlparse.urlparse(next_page)
if parsed_url.scheme: #If next_page is a protocol-rooted url, skip redirect url construction
url += '?' + urlencode({'service': next_page})
else:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'service': protocol + host + next_page})
return url
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
def logout(request, next_page=None):
"""
Redirects to CAS logout page
:param: request RequestObj
:param: next_page Page to redirect to
"""
auth.logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
|
kstateome/django-cas
|
cas/middleware.py
|
CASMiddleware.process_view
|
python
|
def process_view(self, request, view_func, view_args, view_kwargs):
if view_func == login:
return cas_login(request, *view_args, **view_kwargs)
elif view_func == logout:
return cas_logout(request, *view_args, **view_kwargs)
if settings.CAS_ADMIN_PREFIX:
if not request.path.startswith(settings.CAS_ADMIN_PREFIX):
return None
elif not view_func.__module__.startswith('django.contrib.admin.'):
return None
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
if request.user.is_staff:
return None
else:
error = ('<h1>Forbidden</h1><p>You do not have staff '
'privileges.</p>')
return HttpResponseForbidden(error)
params = urlencode({REDIRECT_FIELD_NAME: request.get_full_path()})
return HttpResponseRedirect(reverse(cas_login) + '?' + params)
|
Forwards unauthenticated requests to the admin page to the CAS
login URL, as well as calls to django.contrib.auth.views.login and
logout.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/middleware.py#L53-L86
|
[
"def login(request, next_page=None, required=False, gateway=False):\n \"\"\"\n Forwards to CAS login URL or verifies CAS ticket\n\n :param: request RequestObj\n :param: next_page Next page to redirect after login\n :param: required\n :param: gateway Gatewayed response\n\n \"\"\"\n\n if not next_page:\n next_page = _redirect_url(request)\n\n try:\n # use callable for pre-django 2.0\n is_authenticated = request.user.is_authenticated()\n except TypeError:\n is_authenticated = request.user.is_authenticated\n\n if is_authenticated:\n return HttpResponseRedirect(next_page)\n\n ticket = request.GET.get('ticket')\n\n if gateway:\n service = _service_url(request, next_page, True)\n else:\n service = _service_url(request, next_page, False)\n\n if ticket:\n user = auth.authenticate(ticket=ticket, service=service)\n\n if user is not None:\n\n auth.login(request, user)\n\n if settings.CAS_PROXY_CALLBACK:\n proxy_callback(request)\n\n return HttpResponseRedirect(next_page)\n elif settings.CAS_RETRY_LOGIN or required:\n if gateway:\n return HttpResponseRedirect(_login_url(service, ticket, True))\n else:\n return HttpResponseRedirect(_login_url(service, ticket, False))\n else:\n logger.warning('User has a valid ticket but not a valid session')\n # Has ticket, not session\n\n if gateway:\n # Gatewayed responses should nto redirect.\n return False\n\n if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):\n return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + \"?\" + request.META['QUERY_STRING'])\n else:\n error = \"<h1>Forbidden</h1><p>Login failed.</p>\"\n return HttpResponseForbidden(error)\n else:\n if gateway:\n return HttpResponseRedirect(_login_url(service, ticket, True))\n else:\n return HttpResponseRedirect(_login_url(service, ticket, False))\n",
"def logout(request, next_page=None):\n \"\"\"\n Redirects to CAS logout page\n\n :param: request RequestObj\n :param: next_page Page to redirect to\n\n \"\"\"\n\n auth.logout(request)\n\n if not next_page:\n next_page = _redirect_url(request)\n\n if settings.CAS_LOGOUT_COMPLETELY:\n return HttpResponseRedirect(_logout_url(request, next_page))\n else:\n return HttpResponseRedirect(next_page)\n"
] |
class CASMiddleware(MiddlewareMixin):
"""
Middleware that allows CAS authentication on admin pages
"""
def process_request(self, request):
"""
Checks that the authentication middleware is installed
:param: request
"""
error = ("The Django CAS middleware requires authentication "
"middleware to be installed. Edit your MIDDLEWARE_CLASSES "
"setting to insert 'django.contrib.auth.middleware."
"AuthenticationMiddleware'.")
assert hasattr(request, 'user'), error
def process_exception(self, request, exception):
"""
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
"""
if isinstance(exception, CasTicketException):
do_logout(request)
# This assumes that request.path requires authentication.
return HttpResponseRedirect(request.path)
else:
return None
|
kstateome/django-cas
|
cas/middleware.py
|
CASMiddleware.process_exception
|
python
|
def process_exception(self, request, exception):
if isinstance(exception, CasTicketException):
do_logout(request)
# This assumes that request.path requires authentication.
return HttpResponseRedirect(request.path)
else:
return None
|
When we get a CasTicketException, that is probably caused by the ticket timing out.
So logout/login and get the same page again.
|
train
|
https://github.com/kstateome/django-cas/blob/8a871093966f001b4dadf7d097ac326169f3c066/cas/middleware.py#L88-L99
| null |
class CASMiddleware(MiddlewareMixin):
"""
Middleware that allows CAS authentication on admin pages
"""
def process_request(self, request):
"""
Checks that the authentication middleware is installed
:param: request
"""
error = ("The Django CAS middleware requires authentication "
"middleware to be installed. Edit your MIDDLEWARE_CLASSES "
"setting to insert 'django.contrib.auth.middleware."
"AuthenticationMiddleware'.")
assert hasattr(request, 'user'), error
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Forwards unauthenticated requests to the admin page to the CAS
login URL, as well as calls to django.contrib.auth.views.login and
logout.
"""
if view_func == login:
return cas_login(request, *view_args, **view_kwargs)
elif view_func == logout:
return cas_logout(request, *view_args, **view_kwargs)
if settings.CAS_ADMIN_PREFIX:
if not request.path.startswith(settings.CAS_ADMIN_PREFIX):
return None
elif not view_func.__module__.startswith('django.contrib.admin.'):
return None
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
if request.user.is_staff:
return None
else:
error = ('<h1>Forbidden</h1><p>You do not have staff '
'privileges.</p>')
return HttpResponseForbidden(error)
params = urlencode({REDIRECT_FIELD_NAME: request.get_full_path()})
return HttpResponseRedirect(reverse(cas_login) + '?' + params)
|
tox-dev/tox-travis
|
src/tox_travis/after.py
|
travis_after
|
python
|
def travis_after(ini, envlist):
# after-all disabled for pull requests
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return
if not after_config_matches(ini, envlist):
return # This is not the one that needs to wait
github_token = os.environ.get('GITHUB_TOKEN')
if not github_token:
print('No GitHub token given.', file=sys.stderr)
sys.exit(NO_GITHUB_TOKEN)
api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org')
build_id = os.environ.get('TRAVIS_BUILD_ID')
job_number = os.environ.get('TRAVIS_JOB_NUMBER')
try:
polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5))
except ValueError:
print('Invalid polling interval given: {0}'.format(
repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr)
sys.exit(INVALID_POLLING_INTERVAL)
if not all([api_url, build_id, job_number]):
print('Required Travis environment not given.', file=sys.stderr)
sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT)
# This may raise an Exception, and it should be printed
job_statuses = get_job_statuses(
github_token, api_url, build_id, polling_interval, job_number)
if not all(job_statuses):
print('Some jobs were not successful.')
sys.exit(JOBS_FAILED)
print('All required jobs were successful.')
|
Wait for all jobs to finish, then exit successfully.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/after.py#L25-L62
|
[
"def after_config_matches(ini, envlist):\n \"\"\"Determine if this job should wait for the others.\"\"\"\n section = ini.sections.get('travis:after', {})\n\n if not section:\n return False # Never wait if it's not configured\n\n if 'envlist' in section or 'toxenv' in section:\n if 'toxenv' in section:\n print('The \"toxenv\" key of the [travis:after] section is '\n 'deprecated in favor of the \"envlist\" key.', file=sys.stderr)\n\n toxenv = section.get('toxenv')\n required = set(split_env(section.get('envlist', toxenv) or ''))\n actual = set(envlist)\n if required - actual:\n return False\n\n # Translate travis requirements to env requirements\n env_requirements = [\n (TRAVIS_FACTORS[factor], value) for factor, value\n in parse_dict(section.get('travis', '')).items()\n if factor in TRAVIS_FACTORS\n ] + [\n (name, value) for name, value\n in parse_dict(section.get('env', '')).items()\n ]\n\n return all([\n os.environ.get(name) == value\n for name, value in env_requirements\n ])\n",
"def get_job_statuses(github_token, api_url, build_id,\n polling_interval, job_number):\n \"\"\"Wait for all the travis jobs to complete.\n\n Once the other jobs are complete, return a list of booleans,\n indicating whether or not the job was successful. Ignore jobs\n marked \"allow_failure\".\n \"\"\"\n auth = get_json('{api_url}/auth/github'.format(api_url=api_url),\n data={'github_token': github_token})['access_token']\n\n while True:\n build = get_json('{api_url}/builds/{build_id}'.format(\n api_url=api_url, build_id=build_id), auth=auth)\n jobs = [job for job in build['jobs']\n if job['number'] != job_number and\n not job['allow_failure']] # Ignore allowed failures\n if all(job['finished_at'] for job in jobs):\n break # All the jobs have completed\n elif any(job['state'] != 'passed'\n for job in jobs if job['finished_at']):\n break # Some required job that finished did not pass\n\n print('Waiting for jobs to complete: {job_numbers}'.format(\n job_numbers=[job['number'] for job in jobs\n if not job['finished_at']]))\n time.sleep(polling_interval)\n\n return [job['state'] == 'passed' for job in jobs]\n"
] |
"""Add a flag to pause and wait for all Travis jobs to complete."""
from __future__ import print_function
import os
import sys
import json
import time
from tox.config import _split_env as split_env
try:
import urllib.request as urllib2
except ImportError:
import urllib2 # Python 2
from .utils import TRAVIS_FACTORS, parse_dict
# Exit code constants. They are purposely undocumented.
# Please do not depend on their values.
NO_GITHUB_TOKEN = 32
INVALID_POLLING_INTERVAL = 33
INCOMPLETE_TRAVIS_ENVIRONMENT = 34
JOBS_FAILED = 35
def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
])
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
"""Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
"""
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs]
def get_json(url, auth=None, data=None):
"""Make a GET request, and return the response as parsed JSON."""
headers = {
'Accept': 'application/vnd.travis-ci.2+json',
'User-Agent': 'Travis/Tox-Travis-1.0a',
# User-Agent must start with "Travis/" in order to work
}
if auth:
headers['Authorization'] = 'token {auth}'.format(auth=auth)
params = {}
if data:
headers['Content-Type'] = 'application/json'
params['data'] = json.dumps(data).encode('utf-8')
request = urllib2.Request(url, headers=headers, **params)
response = urllib2.urlopen(request).read()
return json.loads(response.decode('utf-8'))
|
tox-dev/tox-travis
|
src/tox_travis/after.py
|
after_config_matches
|
python
|
def after_config_matches(ini, envlist):
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
])
|
Determine if this job should wait for the others.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/after.py#L65-L96
|
[
"def parse_dict(value):\n \"\"\"Parse a dict value from the tox config.\n\n .. code-block: ini\n\n [travis]\n python =\n 2.7: py27, docs\n 3.5: py{35,36}\n\n With this config, the value of ``python`` would be parsed\n by this function, and would return::\n\n {\n '2.7': 'py27, docs',\n '3.5': 'py{35,36}',\n }\n\n \"\"\"\n lines = [line.strip() for line in value.strip().splitlines()]\n pairs = [line.split(':', 1) for line in lines if line]\n return dict((k.strip(), v.strip()) for k, v in pairs)\n"
] |
"""Add a flag to pause and wait for all Travis jobs to complete."""
from __future__ import print_function
import os
import sys
import json
import time
from tox.config import _split_env as split_env
try:
import urllib.request as urllib2
except ImportError:
import urllib2 # Python 2
from .utils import TRAVIS_FACTORS, parse_dict
# Exit code constants. They are purposely undocumented.
# Please do not depend on their values.
NO_GITHUB_TOKEN = 32
INVALID_POLLING_INTERVAL = 33
INCOMPLETE_TRAVIS_ENVIRONMENT = 34
JOBS_FAILED = 35
def travis_after(ini, envlist):
"""Wait for all jobs to finish, then exit successfully."""
# after-all disabled for pull requests
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return
if not after_config_matches(ini, envlist):
return # This is not the one that needs to wait
github_token = os.environ.get('GITHUB_TOKEN')
if not github_token:
print('No GitHub token given.', file=sys.stderr)
sys.exit(NO_GITHUB_TOKEN)
api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org')
build_id = os.environ.get('TRAVIS_BUILD_ID')
job_number = os.environ.get('TRAVIS_JOB_NUMBER')
try:
polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5))
except ValueError:
print('Invalid polling interval given: {0}'.format(
repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr)
sys.exit(INVALID_POLLING_INTERVAL)
if not all([api_url, build_id, job_number]):
print('Required Travis environment not given.', file=sys.stderr)
sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT)
# This may raise an Exception, and it should be printed
job_statuses = get_job_statuses(
github_token, api_url, build_id, polling_interval, job_number)
if not all(job_statuses):
print('Some jobs were not successful.')
sys.exit(JOBS_FAILED)
print('All required jobs were successful.')
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
"""Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
"""
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs]
def get_json(url, auth=None, data=None):
"""Make a GET request, and return the response as parsed JSON."""
headers = {
'Accept': 'application/vnd.travis-ci.2+json',
'User-Agent': 'Travis/Tox-Travis-1.0a',
# User-Agent must start with "Travis/" in order to work
}
if auth:
headers['Authorization'] = 'token {auth}'.format(auth=auth)
params = {}
if data:
headers['Content-Type'] = 'application/json'
params['data'] = json.dumps(data).encode('utf-8')
request = urllib2.Request(url, headers=headers, **params)
response = urllib2.urlopen(request).read()
return json.loads(response.decode('utf-8'))
|
tox-dev/tox-travis
|
src/tox_travis/after.py
|
get_job_statuses
|
python
|
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs]
|
Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/after.py#L99-L127
|
[
"def get_json(url, auth=None, data=None):\n \"\"\"Make a GET request, and return the response as parsed JSON.\"\"\"\n headers = {\n 'Accept': 'application/vnd.travis-ci.2+json',\n 'User-Agent': 'Travis/Tox-Travis-1.0a',\n # User-Agent must start with \"Travis/\" in order to work\n }\n if auth:\n headers['Authorization'] = 'token {auth}'.format(auth=auth)\n\n params = {}\n if data:\n headers['Content-Type'] = 'application/json'\n params['data'] = json.dumps(data).encode('utf-8')\n\n request = urllib2.Request(url, headers=headers, **params)\n response = urllib2.urlopen(request).read()\n return json.loads(response.decode('utf-8'))\n"
] |
"""Add a flag to pause and wait for all Travis jobs to complete."""
from __future__ import print_function
import os
import sys
import json
import time
from tox.config import _split_env as split_env
try:
import urllib.request as urllib2
except ImportError:
import urllib2 # Python 2
from .utils import TRAVIS_FACTORS, parse_dict
# Exit code constants. They are purposely undocumented.
# Please do not depend on their values.
NO_GITHUB_TOKEN = 32
INVALID_POLLING_INTERVAL = 33
INCOMPLETE_TRAVIS_ENVIRONMENT = 34
JOBS_FAILED = 35
def travis_after(ini, envlist):
"""Wait for all jobs to finish, then exit successfully."""
# after-all disabled for pull requests
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return
if not after_config_matches(ini, envlist):
return # This is not the one that needs to wait
github_token = os.environ.get('GITHUB_TOKEN')
if not github_token:
print('No GitHub token given.', file=sys.stderr)
sys.exit(NO_GITHUB_TOKEN)
api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org')
build_id = os.environ.get('TRAVIS_BUILD_ID')
job_number = os.environ.get('TRAVIS_JOB_NUMBER')
try:
polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5))
except ValueError:
print('Invalid polling interval given: {0}'.format(
repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr)
sys.exit(INVALID_POLLING_INTERVAL)
if not all([api_url, build_id, job_number]):
print('Required Travis environment not given.', file=sys.stderr)
sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT)
# This may raise an Exception, and it should be printed
job_statuses = get_job_statuses(
github_token, api_url, build_id, polling_interval, job_number)
if not all(job_statuses):
print('Some jobs were not successful.')
sys.exit(JOBS_FAILED)
print('All required jobs were successful.')
def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
])
def get_json(url, auth=None, data=None):
"""Make a GET request, and return the response as parsed JSON."""
headers = {
'Accept': 'application/vnd.travis-ci.2+json',
'User-Agent': 'Travis/Tox-Travis-1.0a',
# User-Agent must start with "Travis/" in order to work
}
if auth:
headers['Authorization'] = 'token {auth}'.format(auth=auth)
params = {}
if data:
headers['Content-Type'] = 'application/json'
params['data'] = json.dumps(data).encode('utf-8')
request = urllib2.Request(url, headers=headers, **params)
response = urllib2.urlopen(request).read()
return json.loads(response.decode('utf-8'))
|
tox-dev/tox-travis
|
src/tox_travis/after.py
|
get_json
|
python
|
def get_json(url, auth=None, data=None):
headers = {
'Accept': 'application/vnd.travis-ci.2+json',
'User-Agent': 'Travis/Tox-Travis-1.0a',
# User-Agent must start with "Travis/" in order to work
}
if auth:
headers['Authorization'] = 'token {auth}'.format(auth=auth)
params = {}
if data:
headers['Content-Type'] = 'application/json'
params['data'] = json.dumps(data).encode('utf-8')
request = urllib2.Request(url, headers=headers, **params)
response = urllib2.urlopen(request).read()
return json.loads(response.decode('utf-8'))
|
Make a GET request, and return the response as parsed JSON.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/after.py#L130-L147
| null |
"""Add a flag to pause and wait for all Travis jobs to complete."""
from __future__ import print_function
import os
import sys
import json
import time
from tox.config import _split_env as split_env
try:
import urllib.request as urllib2
except ImportError:
import urllib2 # Python 2
from .utils import TRAVIS_FACTORS, parse_dict
# Exit code constants. They are purposely undocumented.
# Please do not depend on their values.
NO_GITHUB_TOKEN = 32
INVALID_POLLING_INTERVAL = 33
INCOMPLETE_TRAVIS_ENVIRONMENT = 34
JOBS_FAILED = 35
def travis_after(ini, envlist):
"""Wait for all jobs to finish, then exit successfully."""
# after-all disabled for pull requests
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return
if not after_config_matches(ini, envlist):
return # This is not the one that needs to wait
github_token = os.environ.get('GITHUB_TOKEN')
if not github_token:
print('No GitHub token given.', file=sys.stderr)
sys.exit(NO_GITHUB_TOKEN)
api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org')
build_id = os.environ.get('TRAVIS_BUILD_ID')
job_number = os.environ.get('TRAVIS_JOB_NUMBER')
try:
polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5))
except ValueError:
print('Invalid polling interval given: {0}'.format(
repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr)
sys.exit(INVALID_POLLING_INTERVAL)
if not all([api_url, build_id, job_number]):
print('Required Travis environment not given.', file=sys.stderr)
sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT)
# This may raise an Exception, and it should be printed
job_statuses = get_job_statuses(
github_token, api_url, build_id, polling_interval, job_number)
if not all(job_statuses):
print('Some jobs were not successful.')
sys.exit(JOBS_FAILED)
print('All required jobs were successful.')
def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
])
def get_job_statuses(github_token, api_url, build_id,
polling_interval, job_number):
"""Wait for all the travis jobs to complete.
Once the other jobs are complete, return a list of booleans,
indicating whether or not the job was successful. Ignore jobs
marked "allow_failure".
"""
auth = get_json('{api_url}/auth/github'.format(api_url=api_url),
data={'github_token': github_token})['access_token']
while True:
build = get_json('{api_url}/builds/{build_id}'.format(
api_url=api_url, build_id=build_id), auth=auth)
jobs = [job for job in build['jobs']
if job['number'] != job_number and
not job['allow_failure']] # Ignore allowed failures
if all(job['finished_at'] for job in jobs):
break # All the jobs have completed
elif any(job['state'] != 'passed'
for job in jobs if job['finished_at']):
break # Some required job that finished did not pass
print('Waiting for jobs to complete: {job_numbers}'.format(
job_numbers=[job['number'] for job in jobs
if not job['finished_at']]))
time.sleep(polling_interval)
return [job['state'] == 'passed' for job in jobs]
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
detect_envlist
|
python
|
def detect_envlist(ini):
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
|
Default envlist automatically based on the Travis environment.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L14-L27
|
[
"def get_declared_envs(ini):\n \"\"\"Get the full list of envs from the tox ini.\n\n This notably also includes envs that aren't in the envlist,\n but are declared by having their own testenv:envname section.\n\n The envs are expected in a particular order. First the ones\n declared in the envlist, then the other testenvs in order.\n \"\"\"\n tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'\n tox_section = ini.sections.get(tox_section_name, {})\n envlist = split_env(tox_section.get('envlist', []))\n\n # Add additional envs that are declared as sections in the ini\n section_envs = [\n section[8:] for section in sorted(ini.sections, key=ini.lineof)\n if section.startswith('testenv:')\n ]\n\n return envlist + [env for env in section_envs if env not in envlist]\n",
"def get_desired_factors(ini):\n \"\"\"Get the list of desired envs per declared factor.\n\n Look at all the accepted configuration locations, and give a list\n of envlists, one for each Travis factor found.\n\n Look in the ``[travis]`` section for the known Travis factors,\n which are backed by environment variable checking behind the\n scenes, but provide a cleaner interface.\n\n Also look for the ``[tox:travis]`` section, which is deprecated,\n and treat it as an additional ``python`` key from the ``[travis]``\n section.\n\n Finally, look for factors based directly on environment variables,\n listed in the ``[travis:env]`` section. Configuration found in the\n ``[travis]`` and ``[tox:travis]`` sections are converted to this\n form under the hood, and are considered in the same way.\n\n Special consideration is given to the ``python`` factor. If this\n factor is set in the environment, then an appropriate configuration\n will be provided automatically if no manual configuration is\n provided.\n\n To allow for the most flexible processing, the envlists provided\n by each factor are not combined after they are selected, but\n instead returned as a list of envlists, and expected to be\n combined as and when appropriate by the caller. This allows for\n special handling based on the number of factors that were found\n to apply to this environment.\n \"\"\"\n # Find configuration based on known travis factors\n travis_section = ini.sections.get('travis', {})\n found_factors = [\n (factor, parse_dict(travis_section[factor]))\n for factor in TRAVIS_FACTORS\n if factor in travis_section\n ]\n\n # Backward compatibility with the old tox:travis section\n if 'tox:travis' in ini.sections:\n print('The [tox:travis] section is deprecated in favor of'\n ' the \"python\" key of the [travis] section.', file=sys.stderr)\n found_factors.append(('python', ini.sections['tox:travis']))\n\n # Inject any needed autoenv\n version = os.environ.get('TRAVIS_PYTHON_VERSION')\n if version:\n default_envlist = get_default_envlist(version)\n if not any(factor == 'python' for factor, _ in found_factors):\n found_factors.insert(0, ('python', {version: default_envlist}))\n python_factors = [(factor, mapping)\n for factor, mapping in found_factors\n if version and factor == 'python']\n for _, mapping in python_factors:\n mapping.setdefault(version, default_envlist)\n\n # Convert known travis factors to env factors,\n # and combine with declared env factors.\n env_factors = [\n (TRAVIS_FACTORS[factor], mapping)\n for factor, mapping in found_factors\n ] + [\n (name, parse_dict(value))\n for name, value in ini.sections.get('travis:env', {}).items()\n ]\n\n # Choose the correct envlists based on the factor values\n return [\n split_env(mapping[os.environ[name]])\n for name, mapping in env_factors\n if name in os.environ and os.environ[name] in mapping\n ]\n",
"def match_envs(declared_envs, desired_envs, passthru):\n \"\"\"Determine the envs that match the desired_envs.\n\n If ``passthru` is True, and none of the declared envs match the\n desired envs, then the desired envs will be used verbatim.\n\n :param declared_envs: The envs that are declared in the tox config.\n :param desired_envs: The envs desired from the tox-travis config.\n :param bool passthru: Whether to used the ``desired_envs`` as a\n fallback if no declared envs match.\n \"\"\"\n matched = [\n declared for declared in declared_envs\n if any(env_matches(declared, desired) for desired in desired_envs)\n ]\n return desired_envs if not matched and passthru else matched\n"
] |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
autogen_envconfigs
|
python
|
def autogen_envconfigs(config, envs):
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
|
Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L30-L59
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
get_declared_envs
|
python
|
def get_declared_envs(ini):
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
|
Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L62-L81
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
get_version_info
|
python
|
def get_version_info():
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
|
Get version info from the sys module.
Override from environment for testing.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L84-L95
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
guess_python_env
|
python
|
def guess_python_env():
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
|
Guess the default python env to use.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L98-L103
|
[
"def get_version_info():\n \"\"\"Get version info from the sys module.\n\n Override from environment for testing.\n \"\"\"\n overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')\n if overrides:\n version, major, minor = overrides.split(',')[:3]\n major, minor = int(major), int(minor)\n else:\n version, (major, minor) = sys.version, sys.version_info[:2]\n return version, major, minor\n"
] |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
get_default_envlist
|
python
|
def get_default_envlist(version):
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
|
Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L106-L122
|
[
"def guess_python_env():\n \"\"\"Guess the default python env to use.\"\"\"\n version, major, minor = get_version_info()\n if 'PyPy' in version:\n return 'pypy3' if major == 3 else 'pypy'\n return 'py{major}{minor}'.format(major=major, minor=minor)\n"
] |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
get_desired_factors
|
python
|
def get_desired_factors(ini):
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
|
Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L125-L197
|
[
"def get_default_envlist(version):\n \"\"\"Parse a default tox env based on the version.\n\n The version comes from the ``TRAVIS_PYTHON_VERSION`` environment\n variable. If that isn't set or is invalid, then use\n sys.version_info to come up with a reasonable default.\n \"\"\"\n if version in ['pypy', 'pypy3']:\n return version\n\n # Assume single digit major and minor versions\n match = re.match(r'^(\\d)\\.(\\d)(?:\\.\\d+)?$', version or '')\n if match:\n major, minor = match.groups()\n return 'py{major}{minor}'.format(major=major, minor=minor)\n\n return guess_python_env()\n"
] |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
match_envs
|
python
|
def match_envs(declared_envs, desired_envs, passthru):
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
|
Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L200-L215
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
env_matches
|
python
|
def env_matches(declared, desired):
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
|
Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L218-L228
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def override_ignore_outcome(ini):
"""Decide whether to override ignore_outcomes."""
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
tox-dev/tox-travis
|
src/tox_travis/envlist.py
|
override_ignore_outcome
|
python
|
def override_ignore_outcome(ini):
travis_reader = tox.config.SectionReader("travis", ini)
return travis_reader.getbool('unignore_outcomes', False)
|
Decide whether to override ignore_outcomes.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/envlist.py#L231-L234
| null |
"""Default Tox envlist based on the Travis environment."""
from __future__ import print_function
import os
import re
import sys
from itertools import product
import tox.config
from tox.config import _split_env as split_env
from .utils import TRAVIS_FACTORS, parse_dict
def detect_envlist(ini):
"""Default envlist automatically based on the Travis environment."""
# Find the envs that tox knows about
declared_envs = get_declared_envs(ini)
# Find all the envs for all the desired factors given
desired_factors = get_desired_factors(ini)
# Reduce desired factors
desired_envs = ['-'.join(env) for env in product(*desired_factors)]
# Find matching envs
return match_envs(declared_envs, desired_envs,
passthru=len(desired_factors) == 1)
def autogen_envconfigs(config, envs):
"""Make the envconfigs for undeclared envs.
This is a stripped-down version of parseini.__init__ made for making
an envconfig.
"""
prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None
reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix)
distshare_default = "{homedir}/.tox/distshare"
reader.addsubstitutions(toxinidir=config.toxinidir,
homedir=config.homedir)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", distshare_default)
reader.addsubstitutions(distshare=config.distshare)
try:
make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+
except AttributeError:
make_envconfig = tox.config.parseini.make_envconfig
# Dig past the unbound method in Python 2
make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)
# Create the undeclared envs
for env in envs:
section = tox.config.testenvprefix + env
config.envconfigs[env] = make_envconfig(
config, env, section, reader._subs, config)
def get_declared_envs(ini):
"""Get the full list of envs from the tox ini.
This notably also includes envs that aren't in the envlist,
but are declared by having their own testenv:envname section.
The envs are expected in a particular order. First the ones
declared in the envlist, then the other testenvs in order.
"""
tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox'
tox_section = ini.sections.get(tox_section_name, {})
envlist = split_env(tox_section.get('envlist', []))
# Add additional envs that are declared as sections in the ini
section_envs = [
section[8:] for section in sorted(ini.sections, key=ini.lineof)
if section.startswith('testenv:')
]
return envlist + [env for env in section_envs if env not in envlist]
def get_version_info():
"""Get version info from the sys module.
Override from environment for testing.
"""
overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION')
if overrides:
version, major, minor = overrides.split(',')[:3]
major, minor = int(major), int(minor)
else:
version, (major, minor) = sys.version, sys.version_info[:2]
return version, major, minor
def guess_python_env():
"""Guess the default python env to use."""
version, major, minor = get_version_info()
if 'PyPy' in version:
return 'pypy3' if major == 3 else 'pypy'
return 'py{major}{minor}'.format(major=major, minor=minor)
def get_default_envlist(version):
"""Parse a default tox env based on the version.
The version comes from the ``TRAVIS_PYTHON_VERSION`` environment
variable. If that isn't set or is invalid, then use
sys.version_info to come up with a reasonable default.
"""
if version in ['pypy', 'pypy3']:
return version
# Assume single digit major and minor versions
match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '')
if match:
major, minor = match.groups()
return 'py{major}{minor}'.format(major=major, minor=minor)
return guess_python_env()
def get_desired_factors(ini):
"""Get the list of desired envs per declared factor.
Look at all the accepted configuration locations, and give a list
of envlists, one for each Travis factor found.
Look in the ``[travis]`` section for the known Travis factors,
which are backed by environment variable checking behind the
scenes, but provide a cleaner interface.
Also look for the ``[tox:travis]`` section, which is deprecated,
and treat it as an additional ``python`` key from the ``[travis]``
section.
Finally, look for factors based directly on environment variables,
listed in the ``[travis:env]`` section. Configuration found in the
``[travis]`` and ``[tox:travis]`` sections are converted to this
form under the hood, and are considered in the same way.
Special consideration is given to the ``python`` factor. If this
factor is set in the environment, then an appropriate configuration
will be provided automatically if no manual configuration is
provided.
To allow for the most flexible processing, the envlists provided
by each factor are not combined after they are selected, but
instead returned as a list of envlists, and expected to be
combined as and when appropriate by the caller. This allows for
special handling based on the number of factors that were found
to apply to this environment.
"""
# Find configuration based on known travis factors
travis_section = ini.sections.get('travis', {})
found_factors = [
(factor, parse_dict(travis_section[factor]))
for factor in TRAVIS_FACTORS
if factor in travis_section
]
# Backward compatibility with the old tox:travis section
if 'tox:travis' in ini.sections:
print('The [tox:travis] section is deprecated in favor of'
' the "python" key of the [travis] section.', file=sys.stderr)
found_factors.append(('python', ini.sections['tox:travis']))
# Inject any needed autoenv
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version:
default_envlist = get_default_envlist(version)
if not any(factor == 'python' for factor, _ in found_factors):
found_factors.insert(0, ('python', {version: default_envlist}))
python_factors = [(factor, mapping)
for factor, mapping in found_factors
if version and factor == 'python']
for _, mapping in python_factors:
mapping.setdefault(version, default_envlist)
# Convert known travis factors to env factors,
# and combine with declared env factors.
env_factors = [
(TRAVIS_FACTORS[factor], mapping)
for factor, mapping in found_factors
] + [
(name, parse_dict(value))
for name, value in ini.sections.get('travis:env', {}).items()
]
# Choose the correct envlists based on the factor values
return [
split_env(mapping[os.environ[name]])
for name, mapping in env_factors
if name in os.environ and os.environ[name] in mapping
]
def match_envs(declared_envs, desired_envs, passthru):
"""Determine the envs that match the desired_envs.
If ``passthru` is True, and none of the declared envs match the
desired envs, then the desired envs will be used verbatim.
:param declared_envs: The envs that are declared in the tox config.
:param desired_envs: The envs desired from the tox-travis config.
:param bool passthru: Whether to used the ``desired_envs`` as a
fallback if no declared envs match.
"""
matched = [
declared for declared in declared_envs
if any(env_matches(declared, desired) for desired in desired_envs)
]
return desired_envs if not matched and passthru else matched
def env_matches(declared, desired):
"""Determine if a declared env matches a desired env.
Rather than simply using the name of the env verbatim, take a
closer look to see if all the desired factors are fulfilled. If
the desired factors are fulfilled, but there are other factors,
it should still match the env.
"""
desired_factors = desired.split('-')
declared_factors = declared.split('-')
return all(factor in declared_factors for factor in desired_factors)
|
tox-dev/tox-travis
|
src/tox_travis/hooks.py
|
tox_addoption
|
python
|
def tox_addoption(parser):
parser.add_argument(
'--travis-after', dest='travis_after', action='store_true',
help='Exit successfully after all Travis jobs complete successfully.')
if 'TRAVIS' in os.environ:
pypy_version_monkeypatch()
subcommand_test_monkeypatch(tox_subcommand_test_post)
|
Add arguments and needed monkeypatches.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/hooks.py#L19-L27
|
[
"def pypy_version_monkeypatch():\n \"\"\"Patch Tox to work with non-default PyPy 3 versions.\"\"\"\n # Travis virtualenv do not provide `pypy3`, which tox tries to execute.\n # This doesnt affect Travis python version `pypy3`, as the pyenv pypy3\n # is in the PATH.\n # https://github.com/travis-ci/travis-ci/issues/6304\n # Force use of the virtualenv `python`.\n version = os.environ.get('TRAVIS_PYTHON_VERSION')\n if version and default_factors and version.startswith('pypy3.3-'):\n default_factors['pypy3'] = 'python'\n",
"def subcommand_test_monkeypatch(post):\n \"\"\"Monkeypatch Tox session to call a hook when commands finish.\"\"\"\n import tox.session\n real_subcommand_test = tox.session.Session.subcommand_test\n\n def subcommand_test(self):\n retcode = real_subcommand_test(self)\n post(self.config)\n return retcode\n\n tox.session.Session.subcommand_test = subcommand_test\n"
] |
"""Tox hook implementations."""
from __future__ import print_function
import os
import sys
import tox
from .envlist import (
detect_envlist,
autogen_envconfigs,
override_ignore_outcome,
)
from .hacks import (
pypy_version_monkeypatch,
subcommand_test_monkeypatch,
)
from .after import travis_after
@tox.hookimpl
@tox.hookimpl
def tox_configure(config):
"""Check for the presence of the added options."""
if 'TRAVIS' not in os.environ:
return
ini = config._cfg
# envlist
if 'TOXENV' not in os.environ and not config.option.env:
envlist = detect_envlist(ini)
undeclared = set(envlist) - set(config.envconfigs)
if undeclared:
print('Matching undeclared envs is deprecated. Be sure all the '
'envs that Tox should run are declared in the tox config.',
file=sys.stderr)
autogen_envconfigs(config, undeclared)
config.envlist = envlist
# Override ignore_outcomes
if override_ignore_outcome(ini):
for envconfig in config.envconfigs.values():
envconfig.ignore_outcome = False
# after
if config.option.travis_after:
print('The after all feature has been deprecated. Check out Travis\' '
'build stages, which are a better solution. '
'See https://tox-travis.readthedocs.io/en/stable/after.html '
'for more details.', file=sys.stderr)
def tox_subcommand_test_post(config):
"""Wait for this job if the configuration matches."""
if config.option.travis_after:
travis_after(config._cfg, config.envlist)
|
tox-dev/tox-travis
|
src/tox_travis/hooks.py
|
tox_configure
|
python
|
def tox_configure(config):
if 'TRAVIS' not in os.environ:
return
ini = config._cfg
# envlist
if 'TOXENV' not in os.environ and not config.option.env:
envlist = detect_envlist(ini)
undeclared = set(envlist) - set(config.envconfigs)
if undeclared:
print('Matching undeclared envs is deprecated. Be sure all the '
'envs that Tox should run are declared in the tox config.',
file=sys.stderr)
autogen_envconfigs(config, undeclared)
config.envlist = envlist
# Override ignore_outcomes
if override_ignore_outcome(ini):
for envconfig in config.envconfigs.values():
envconfig.ignore_outcome = False
# after
if config.option.travis_after:
print('The after all feature has been deprecated. Check out Travis\' '
'build stages, which are a better solution. '
'See https://tox-travis.readthedocs.io/en/stable/after.html '
'for more details.', file=sys.stderr)
|
Check for the presence of the added options.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/hooks.py#L31-L59
|
[
"def detect_envlist(ini):\n \"\"\"Default envlist automatically based on the Travis environment.\"\"\"\n # Find the envs that tox knows about\n declared_envs = get_declared_envs(ini)\n\n # Find all the envs for all the desired factors given\n desired_factors = get_desired_factors(ini)\n\n # Reduce desired factors\n desired_envs = ['-'.join(env) for env in product(*desired_factors)]\n\n # Find matching envs\n return match_envs(declared_envs, desired_envs,\n passthru=len(desired_factors) == 1)\n",
"def autogen_envconfigs(config, envs):\n \"\"\"Make the envconfigs for undeclared envs.\n\n This is a stripped-down version of parseini.__init__ made for making\n an envconfig.\n \"\"\"\n prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None\n reader = tox.config.SectionReader(\"tox\", config._cfg, prefix=prefix)\n distshare_default = \"{homedir}/.tox/distshare\"\n reader.addsubstitutions(toxinidir=config.toxinidir,\n homedir=config.homedir)\n\n reader.addsubstitutions(toxworkdir=config.toxworkdir)\n config.distdir = reader.getpath(\"distdir\", \"{toxworkdir}/dist\")\n reader.addsubstitutions(distdir=config.distdir)\n config.distshare = reader.getpath(\"distshare\", distshare_default)\n reader.addsubstitutions(distshare=config.distshare)\n\n try:\n make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+\n except AttributeError:\n make_envconfig = tox.config.parseini.make_envconfig\n # Dig past the unbound method in Python 2\n make_envconfig = getattr(make_envconfig, '__func__', make_envconfig)\n\n # Create the undeclared envs\n for env in envs:\n section = tox.config.testenvprefix + env\n config.envconfigs[env] = make_envconfig(\n config, env, section, reader._subs, config)\n",
"def override_ignore_outcome(ini):\n \"\"\"Decide whether to override ignore_outcomes.\"\"\"\n travis_reader = tox.config.SectionReader(\"travis\", ini)\n return travis_reader.getbool('unignore_outcomes', False)\n"
] |
"""Tox hook implementations."""
from __future__ import print_function
import os
import sys
import tox
from .envlist import (
detect_envlist,
autogen_envconfigs,
override_ignore_outcome,
)
from .hacks import (
pypy_version_monkeypatch,
subcommand_test_monkeypatch,
)
from .after import travis_after
@tox.hookimpl
def tox_addoption(parser):
"""Add arguments and needed monkeypatches."""
parser.add_argument(
'--travis-after', dest='travis_after', action='store_true',
help='Exit successfully after all Travis jobs complete successfully.')
if 'TRAVIS' in os.environ:
pypy_version_monkeypatch()
subcommand_test_monkeypatch(tox_subcommand_test_post)
@tox.hookimpl
def tox_subcommand_test_post(config):
"""Wait for this job if the configuration matches."""
if config.option.travis_after:
travis_after(config._cfg, config.envlist)
|
tox-dev/tox-travis
|
src/tox_travis/utils.py
|
parse_dict
|
python
|
def parse_dict(value):
lines = [line.strip() for line in value.strip().splitlines()]
pairs = [line.split(':', 1) for line in lines if line]
return dict((k.strip(), v.strip()) for k, v in pairs)
|
Parse a dict value from the tox config.
.. code-block: ini
[travis]
python =
2.7: py27, docs
3.5: py{35,36}
With this config, the value of ``python`` would be parsed
by this function, and would return::
{
'2.7': 'py27, docs',
'3.5': 'py{35,36}',
}
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/utils.py#L11-L32
| null |
"""Shared constants and utility functions."""
# Mapping Travis factors to the associated env variables
TRAVIS_FACTORS = {
'os': 'TRAVIS_OS_NAME',
'language': 'TRAVIS_LANGUAGE',
'python': 'TRAVIS_PYTHON_VERSION',
}
|
tox-dev/tox-travis
|
src/tox_travis/hacks.py
|
pypy_version_monkeypatch
|
python
|
def pypy_version_monkeypatch():
# Travis virtualenv do not provide `pypy3`, which tox tries to execute.
# This doesnt affect Travis python version `pypy3`, as the pyenv pypy3
# is in the PATH.
# https://github.com/travis-ci/travis-ci/issues/6304
# Force use of the virtualenv `python`.
version = os.environ.get('TRAVIS_PYTHON_VERSION')
if version and default_factors and version.startswith('pypy3.3-'):
default_factors['pypy3'] = 'python'
|
Patch Tox to work with non-default PyPy 3 versions.
|
train
|
https://github.com/tox-dev/tox-travis/blob/d97a966c19abb020298a7e4b91fe83dd1d0a4517/src/tox_travis/hacks.py#L9-L18
| null |
import os
try:
from tox.config import default_factors
except ImportError:
default_factors = None
def subcommand_test_monkeypatch(post):
"""Monkeypatch Tox session to call a hook when commands finish."""
import tox.session
real_subcommand_test = tox.session.Session.subcommand_test
def subcommand_test(self):
retcode = real_subcommand_test(self)
post(self.config)
return retcode
tox.session.Session.subcommand_test = subcommand_test
|
LPgenerator/django-db-mailer
|
dbmail/providers/pubnub/push.py
|
send
|
python
|
def send(channel, message, **kwargs):
pubnub = Pubnub(
publish_key=settings.PUBNUB_PUB_KEY,
subscribe_key=settings.PUBNUB_SUB_KEY,
secret_key=settings.PUBNUB_SEC_KEY,
ssl_on=kwargs.pop('ssl_on', False), **kwargs)
return pubnub.publish(channel=channel, message={"text": message})
|
Site: http://www.pubnub.com/
API: https://www.mashape.com/pubnub/pubnub-network
Desc: real-time browser notifications
Installation and usage:
pip install -U pubnub
Tests for browser notification http://127.0.0.1:8000/browser_notification/
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/pubnub/push.py#L11-L27
| null |
# -*- encoding: utf-8 -*-
from django.conf import settings
from Pubnub import Pubnub
class PushOverError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/boxcar/push.py
|
send
|
python
|
def send(token, title, **kwargs):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
}
data = {
"user_credentials": token,
"notification[title]": from_unicode(title),
"notification[sound]": "notifier-2"
}
for k, v in kwargs.items():
data['notification[%s]' % k] = from_unicode(v)
http = HTTPSConnection(kwargs.pop("api_url", "new.boxcar.io"))
http.request(
"POST", "/api/notifications",
headers=headers,
body=urlencode(data))
response = http.getresponse()
if response.status != 201:
raise BoxcarError(response.reason)
return True
|
Site: https://boxcar.io/
API: http://help.boxcar.io/knowledgebase/topics/48115-boxcar-api
Desc: Best app for system administrators
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/boxcar/push.py#L19-L48
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlencode
from dbmail.providers.prowl.push import from_unicode
from dbmail import get_version
class BoxcarError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/slack/push.py
|
send
|
python
|
def send(channel, message, **kwargs):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
}
username = from_unicode(kwargs.pop("username", settings.SLACK_USERNAME))
hook_url = from_unicode(kwargs.pop("hook_url", settings.SLACK_HOOCK_URL))
channel = from_unicode(channel or settings.SLACK_CHANNEL)
emoji = from_unicode(kwargs.pop("emoji", ""))
message = from_unicode(message)
data = {
"channel": channel,
"username": username,
"text": message,
"icon_emoji": emoji,
}
_data = kwargs.pop('data', None)
if _data is not None:
data.update(_data)
up = urlparse(hook_url)
http = HTTPSConnection(up.netloc)
http.request(
"POST", up.path,
headers=headers,
body=urlencode({"payload": dumps(data)}))
response = http.getresponse()
if response.status != 200:
raise SlackError(response.reason)
body = response.read()
if body != "ok":
raise SlackError(repr(body))
return True
|
Site: https://slack.com
API: https://api.slack.com
Desc: real-time messaging
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/slack/push.py#L23-L65
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urlparse import urlparse
from urllib import urlencode
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlparse, urlencode
from json import dumps
from django.conf import settings
from dbmail import get_version
from dbmail.providers.prowl.push import from_unicode
class SlackError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/smsaero/sms.py
|
send
|
python
|
def send(sms_to, sms_body, **kwargs):
headers = {
"User-Agent": "DBMail/%s" % get_version(),
}
kwargs.update({
'user': settings.SMSAERO_LOGIN,
'password': settings.SMSAERO_MD5_PASSWORD,
'from': kwargs.pop('sms_from', settings.SMSAERO_FROM),
'to': sms_to.replace('+', ''),
'text': from_unicode(sms_body),
'answer': 'json',
})
http = HTTPConnection(kwargs.pop("api_url", "gate.smsaero.ru"))
http.request("GET", "/send/?" + urlencode(kwargs), headers=headers)
response = http.getresponse()
if response.status != 200:
raise AeroSmsError(response.reason)
read = response.read().decode(response.headers.get_content_charset())
data = json.loads(read)
status = None
if 'result' in data:
status = data['result']
sms_id = None
if 'id' in data:
sms_id = data['id']
if sms_id and status == 'accepted':
return True
return False
|
Site: http://smsaero.ru/
API: http://smsaero.ru/api/
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/smsaero/sms.py#L23-L61
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- coding: utf-8 -*-
try:
from httplib import HTTPConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPConnection
from urllib.parse import urlencode
from django.conf import settings
from dbmail.providers.prowl.push import from_unicode
from dbmail import get_version
import json
class AeroSmsError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/sendinblue/mail.py
|
email_list_to_email_dict
|
python
|
def email_list_to_email_dict(email_list):
if email_list is None:
return {}
result = {}
for value in email_list:
realname, address = email.utils.parseaddr(value)
result[address] = realname if realname and address else address
return result
|
Convert a list of email to a dict of email.
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/sendinblue/mail.py#L15-L23
| null |
"""SendInBlue mail provider."""
import base64
import email
from mailin import Mailin
class SendInBlueError(Exception):
"""Custom exception."""
pass
def email_address_to_list(email_address):
"""Convert an email address to a list."""
realname, address = email.utils.parseaddr(email_address)
return (
[address, realname] if realname and address else
[email_address, email_address]
)
def send(sender_instance):
"""Send a transactional email using SendInBlue API.
Site: https://www.sendinblue.com
API: https://apidocs.sendinblue.com/
"""
m = Mailin(
"https://api.sendinblue.com/v2.0",
sender_instance._kwargs.get("api_key")
)
data = {
"to": email_list_to_email_dict(sender_instance._recipient_list),
"cc": email_list_to_email_dict(sender_instance._cc),
"bcc": email_list_to_email_dict(sender_instance._bcc),
"from": email_address_to_list(sender_instance._from_email),
"subject": sender_instance._subject,
}
if sender_instance._template.is_html:
data.update({
"html": sender_instance._message,
"headers": {"Content-Type": "text/html; charset=utf-8"}
})
else:
data.update({"text": sender_instance._message})
if "attachments" in sender_instance._kwargs:
data["attachment"] = {}
for attachment in sender_instance._kwargs["attachments"]:
data["attachment"][attachment[0]] = base64.b64encode(attachment[1])
result = m.send_email(data)
if result["code"] != "success":
raise SendInBlueError(result["message"])
|
LPgenerator/django-db-mailer
|
dbmail/providers/sendinblue/mail.py
|
email_address_to_list
|
python
|
def email_address_to_list(email_address):
realname, address = email.utils.parseaddr(email_address)
return (
[address, realname] if realname and address else
[email_address, email_address]
)
|
Convert an email address to a list.
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/sendinblue/mail.py#L26-L32
| null |
"""SendInBlue mail provider."""
import base64
import email
from mailin import Mailin
class SendInBlueError(Exception):
"""Custom exception."""
pass
def email_list_to_email_dict(email_list):
"""Convert a list of email to a dict of email."""
if email_list is None:
return {}
result = {}
for value in email_list:
realname, address = email.utils.parseaddr(value)
result[address] = realname if realname and address else address
return result
def send(sender_instance):
"""Send a transactional email using SendInBlue API.
Site: https://www.sendinblue.com
API: https://apidocs.sendinblue.com/
"""
m = Mailin(
"https://api.sendinblue.com/v2.0",
sender_instance._kwargs.get("api_key")
)
data = {
"to": email_list_to_email_dict(sender_instance._recipient_list),
"cc": email_list_to_email_dict(sender_instance._cc),
"bcc": email_list_to_email_dict(sender_instance._bcc),
"from": email_address_to_list(sender_instance._from_email),
"subject": sender_instance._subject,
}
if sender_instance._template.is_html:
data.update({
"html": sender_instance._message,
"headers": {"Content-Type": "text/html; charset=utf-8"}
})
else:
data.update({"text": sender_instance._message})
if "attachments" in sender_instance._kwargs:
data["attachment"] = {}
for attachment in sender_instance._kwargs["attachments"]:
data["attachment"][attachment[0]] = base64.b64encode(attachment[1])
result = m.send_email(data)
if result["code"] != "success":
raise SendInBlueError(result["message"])
|
LPgenerator/django-db-mailer
|
dbmail/providers/sendinblue/mail.py
|
send
|
python
|
def send(sender_instance):
m = Mailin(
"https://api.sendinblue.com/v2.0",
sender_instance._kwargs.get("api_key")
)
data = {
"to": email_list_to_email_dict(sender_instance._recipient_list),
"cc": email_list_to_email_dict(sender_instance._cc),
"bcc": email_list_to_email_dict(sender_instance._bcc),
"from": email_address_to_list(sender_instance._from_email),
"subject": sender_instance._subject,
}
if sender_instance._template.is_html:
data.update({
"html": sender_instance._message,
"headers": {"Content-Type": "text/html; charset=utf-8"}
})
else:
data.update({"text": sender_instance._message})
if "attachments" in sender_instance._kwargs:
data["attachment"] = {}
for attachment in sender_instance._kwargs["attachments"]:
data["attachment"][attachment[0]] = base64.b64encode(attachment[1])
result = m.send_email(data)
if result["code"] != "success":
raise SendInBlueError(result["message"])
|
Send a transactional email using SendInBlue API.
Site: https://www.sendinblue.com
API: https://apidocs.sendinblue.com/
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/sendinblue/mail.py#L35-L65
|
[
"def email_list_to_email_dict(email_list):\n \"\"\"Convert a list of email to a dict of email.\"\"\"\n if email_list is None:\n return {}\n result = {}\n for value in email_list:\n realname, address = email.utils.parseaddr(value)\n result[address] = realname if realname and address else address\n return result\n",
"def email_address_to_list(email_address):\n \"\"\"Convert an email address to a list.\"\"\"\n realname, address = email.utils.parseaddr(email_address)\n return (\n [address, realname] if realname and address else\n [email_address, email_address]\n )\n"
] |
"""SendInBlue mail provider."""
import base64
import email
from mailin import Mailin
class SendInBlueError(Exception):
"""Custom exception."""
pass
def email_list_to_email_dict(email_list):
"""Convert a list of email to a dict of email."""
if email_list is None:
return {}
result = {}
for value in email_list:
realname, address = email.utils.parseaddr(value)
result[address] = realname if realname and address else address
return result
def email_address_to_list(email_address):
"""Convert an email address to a list."""
realname, address = email.utils.parseaddr(email_address)
return (
[address, realname] if realname and address else
[email_address, email_address]
)
|
LPgenerator/django-db-mailer
|
dbmail/providers/iqsms/sms.py
|
send
|
python
|
def send(sms_to, sms_body, **kwargs):
headers = {
"User-Agent": "DBMail/%s" % get_version(),
'Authorization': 'Basic %s' % b64encode(
"%s:%s" % (
settings.IQSMS_API_LOGIN, settings.IQSMS_API_PASSWORD
)).decode("ascii")
}
kwargs.update({
'phone': sms_to,
'text': from_unicode(sms_body),
'sender': kwargs.pop('sms_from', settings.IQSMS_FROM)
})
http = HTTPConnection(kwargs.pop("api_url", "gate.iqsms.ru"))
http.request("GET", "/send/?" + urlencode(kwargs), headers=headers)
response = http.getresponse()
if response.status != 200:
raise IQSMSError(response.reason)
body = response.read().strip()
if '=accepted' not in body:
raise IQSMSError(body)
return int(body.split('=')[0])
|
Site: http://iqsms.ru/
API: http://iqsms.ru/api/
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/iqsms/sms.py#L22-L52
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPConnection
from urllib.parse import urlencode
from base64 import b64encode
from django.conf import settings
from dbmail.providers.prowl.push import from_unicode
from dbmail import get_version
class IQSMSError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/parse_com/push.py
|
send
|
python
|
def send(device_id, description, **kwargs):
headers = {
"X-Parse-Application-Id": settings.PARSE_APP_ID,
"X-Parse-REST-API-Key": settings.PARSE_API_KEY,
"User-Agent": "DBMail/%s" % get_version(),
"Content-type": "application/json",
}
data = {
"where": {
"user_id": device_id,
},
"data": {
"alert": description,
"title": kwargs.pop("event")
}
}
_data = kwargs.pop('data', None)
if _data is not None:
data.update(_data)
http = HTTPSConnection(kwargs.pop("api_url", "api.parse.com"))
http.request(
"POST", "/1/push",
headers=headers,
body=dumps(data))
response = http.getresponse()
if response.status != 200:
raise ParseComError(response.reason)
body = loads(response.read())
if body['error']:
raise ParseComError(body['error'])
return True
|
Site: http://parse.com
API: https://www.parse.com/docs/push_guide#scheduled/REST
Desc: Best app for system administrators
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/parse_com/push.py#L19-L59
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
except ImportError:
from http.client import HTTPSConnection
from json import dumps, loads
from django.conf import settings
from dbmail import get_version
class ParseComError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/prowl/push.py
|
send
|
python
|
def send(api_key, description, **kwargs):
headers = {
"User-Agent": "DBMail/%s" % get_version(),
"Content-type": "application/x-www-form-urlencoded"
}
application = from_unicode(kwargs.pop("app", settings.PROWL_APP), 256)
event = from_unicode(kwargs.pop("event", 'Alert'), 1024)
description = from_unicode(description, 10000)
data = {
"apikey": api_key,
"application": application,
"event": event,
"description": description,
"priority": kwargs.pop("priority", 1)
}
provider_key = kwargs.pop("providerkey", None)
url = kwargs.pop('url', None)
if provider_key is not None:
data["providerkey"] = provider_key
if url is not None:
data["url"] = url[0:512]
http = HTTPSConnection(kwargs.pop("api_url", "api.prowlapp.com"))
http.request(
"POST", "/publicapi/add",
headers=headers,
body=urlencode(data))
response = http.getresponse()
if response.status != 200:
raise ProwlError(response.reason)
return True
|
Site: http://prowlapp.com
API: http://prowlapp.com/api.php
Desc: Best app for system administrators
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/prowl/push.py#L30-L71
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlencode
from django.conf import settings
from dbmail import get_version
class ProwlError(Exception):
pass
def from_unicode(text, text_length=None):
try:
text = text.encode('utf-8', 'ignore')
except UnicodeDecodeError:
pass
if text_length is not None:
text = text[0:text_length]
return text
|
LPgenerator/django-db-mailer
|
dbmail/providers/apple/apns.py
|
send
|
python
|
def send(token_hex, message, **kwargs):
is_enhanced = kwargs.pop('is_enhanced', False)
identifier = kwargs.pop('identifier', 0)
expiry = kwargs.pop('expiry', 0)
alert = {
"title": kwargs.pop("event"),
"body": message,
"action": kwargs.pop(
'apns_action', defaults.APNS_PROVIDER_DEFAULT_ACTION)
}
data = {
"aps": {
'alert': alert,
'content-available': kwargs.pop('content_available', 0) and 1
}
}
data['aps'].update(kwargs)
payload = dumps(data, separators=(',', ':'))
token = a2b_hex(token_hex)
if is_enhanced is True:
fmt = '!BIIH32sH%ds' % len(payload)
expiry = expiry and time() + expiry
notification = pack(
fmt, 1, identifier, expiry,
32, token, len(payload), payload)
else:
token_length_bin = pack('>H', len(token))
payload_length_bin = pack('>H', len(payload))
zero_byte = bytes('\0', 'utf-8') if PY3 is True else '\0'
payload = bytes(payload, 'utf-8') if PY3 is True else payload
notification = (
zero_byte + token_length_bin + token +
payload_length_bin + payload)
sock = socket(AF_INET, SOCK_STREAM)
sock.settimeout(3)
sock.connect((settings.APNS_GW_HOST, settings.APNS_GW_PORT))
ssl = wrap_socket(
sock, settings.APNS_KEY_FILE,
settings.APNS_CERT_FILE,
do_handshake_on_connect=False)
result = ssl.write(notification)
sock.close()
ssl.close()
if not result:
raise APNsError
return True
|
Site: https://apple.com
API: https://developer.apple.com
Desc: iOS notifications
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/apple/apns.py#L21-L79
| null |
# -*- encoding: utf-8 -*-
from binascii import a2b_hex
from json import dumps
from socket import socket, AF_INET, SOCK_STREAM
from struct import pack
from time import time
try:
from ssl import wrap_socket
except ImportError:
from socket import ssl as wrap_socket
from django.conf import settings
from dbmail import defaults
from dbmail.providers.apple.errors import APNsError
from dbmail import PY3
|
LPgenerator/django-db-mailer
|
dbmail/providers/pushall/push.py
|
send
|
python
|
def send(ch, message, **kwargs):
params = {
'type': kwargs.pop('req_type', 'self'),
'key': settings.PUSHALL_API_KEYS[ch]['key'],
'id': settings.PUSHALL_API_KEYS[ch]['id'],
'title': kwargs.pop(
"title", settings.PUSHALL_API_KEYS[ch].get('title') or ""),
'text': message,
'priority': kwargs.pop(
"priority", settings.PUSHALL_API_KEYS[ch].get('priority') or "0"),
}
if kwargs:
params.update(**kwargs)
response = urlopen(
Request('https://pushall.ru/api.php'),
urlencode(params),
timeout=10
)
if response.code != 200:
raise PushAllError(response.read())
json = loads(response.read())
if json.get('error'):
raise PushAllError(json.get('error'))
return True
|
Site: https://pushall.ru
API: https://pushall.ru/blog/api
Desc: App for notification to devices/browsers and messaging apps
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/pushall/push.py#L14-L46
| null |
# -*- encoding: utf-8 -*-
from json import loads
from urllib import urlencode
from urllib2 import urlopen, Request
from django.conf import settings
class PushAllError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/telegram/bot.py
|
send
|
python
|
def send(to, message, **kwargs):
available_kwargs_keys = [
'parse_mode',
'disable_web_page_preview',
'disable_notification',
'reply_to_message_id',
'reply_markup'
]
available_kwargs = {
k: v for k, v in kwargs.iteritems() if k in available_kwargs_keys
}
bot = telepot.Bot(settings.TELEGRAM_BOT_TOKEN)
return bot.sendMessage(to, message, **available_kwargs)
|
SITE: https://github.com/nickoala/telepot
TELEGRAM API: https://core.telegram.org/bots/api
Installation:
pip install 'telepot>=10.4'
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/telegram/bot.py#L7-L29
| null |
# -*- encoding: utf-8 -*-
import telepot
from django.conf import settings
|
LPgenerator/django-db-mailer
|
dbmail/providers/google/browser.py
|
send
|
python
|
def send(reg_id, message, **kwargs):
subscription_info = kwargs.pop('subscription_info')
payload = {
"title": kwargs.pop("event"),
"body": message,
"url": kwargs.pop("push_url", None)
}
payload.update(kwargs)
wp = WebPusher(subscription_info)
response = wp.send(
dumps(payload), gcm_key=settings.GCM_KEY,
ttl=kwargs.pop("ttl", 60))
if not response.ok or (
response.text and loads(response.text).get("failure") > 0):
raise GCMError(response.text)
return True
|
Site: https://developers.google.com
API: https://developers.google.com/web/updates/2016/03/web-push-encryption
Desc: Web Push notifications for Chrome and FireFox
Installation:
pip install 'pywebpush>=0.4.0'
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/google/browser.py#L13-L40
| null |
# -*- encoding: utf-8 -*-
from json import dumps, loads
from django.conf import settings
from pywebpush import WebPusher
class GCMError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/apple/apns2.py
|
send
|
python
|
def send(token_hex, message, **kwargs):
priority = kwargs.pop('priority', 10)
topic = kwargs.pop('topic', None)
alert = {
"title": kwargs.pop("event"),
"body": message,
"action": kwargs.pop(
'apns_action', defaults.APNS_PROVIDER_DEFAULT_ACTION)
}
data = {
"aps": {
'alert': alert,
'content-available': kwargs.pop('content_available', 0) and 1
}
}
data['aps'].update(kwargs)
payload = dumps(data, separators=(',', ':'))
headers = {
'apns-priority': priority
}
if topic is not None:
headers['apns-topic'] = topic
ssl_context = init_context()
ssl_context.load_cert_chain(settings.APNS_CERT_FILE)
connection = HTTP20Connection(
settings.APNS_GW_HOST, settings.APNS_GW_PORT, ssl_context=ssl_context)
stream_id = connection.request(
'POST', '/3/device/{}'.format(token_hex), payload, headers)
response = connection.get_response(stream_id)
if response.status != 200:
raise APNsError(response.read())
return True
|
Site: https://apple.com
API: https://developer.apple.com
Desc: iOS notifications
Installation and usage:
pip install hyper
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/apple/apns2.py#L11-L56
| null |
from json import dumps
from django.conf import settings
from hyper import HTTP20Connection
from hyper.tls import init_context
from dbmail import defaults
from dbmail.providers.apple.errors import APNsError
|
LPgenerator/django-db-mailer
|
dbmail/providers/twilio/sms.py
|
send
|
python
|
def send(sms_to, sms_body, **kwargs):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
'Authorization': 'Basic %s' % b64encode(
"%s:%s" % (
settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN
)).decode("ascii")
}
kwargs.update({
'From': kwargs.pop('sms_from', settings.TWILIO_FROM),
'To': sms_to,
'Body': from_unicode(sms_body)
})
http = HTTPSConnection(kwargs.pop("api_url", "api.twilio.com"))
http.request(
"POST",
"/2010-04-01/Accounts/%s/Messages.json" % settings.TWILIO_ACCOUNT_SID,
headers=headers,
body=urlencode(kwargs))
response = http.getresponse()
if response.status != 201:
raise TwilioSmsError(response.reason)
return loads(response.read()).get('sid')
|
Site: https://www.twilio.com/
API: https://www.twilio.com/docs/api/rest/sending-messages
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/twilio/sms.py#L23-L55
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- coding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlencode
from base64 import b64encode
from json import loads
from django.conf import settings
from dbmail.providers.prowl.push import from_unicode
from dbmail import get_version
class TwilioSmsError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/pushover/push.py
|
send
|
python
|
def send(user, message, **kwargs):
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-Agent": "DBMail/%s" % get_version(),
}
title = from_unicode(kwargs.pop("title", settings.PUSHOVER_APP))
message = from_unicode(message)
data = {
"token": settings.PUSHOVER_TOKEN,
"user": user,
"message": message,
"title": title,
"priority": kwargs.pop("priority", 0)
}
_data = kwargs.pop('data', None)
if _data is not None:
data.update(_data)
http = HTTPSConnection(kwargs.pop("api_url", "api.pushover.net"))
http.request(
"POST", "/1/messages.json",
headers=headers,
body=urlencode(data))
response = http.getresponse()
if response.status != 200:
raise PushOverError(response.reason)
body = loads(response.read())
if body.get('status') != 1:
raise PushOverError(repr(body))
return True
|
Site: https://pushover.net/
API: https://pushover.net/api
Desc: real-time notifications
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/pushover/push.py#L22-L61
|
[
"def get_version():\n return '.'.join(map(str, VERSION))\n",
"def from_unicode(text, text_length=None):\n try:\n text = text.encode('utf-8', 'ignore')\n except UnicodeDecodeError:\n pass\n\n if text_length is not None:\n text = text[0:text_length]\n\n return text\n"
] |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urllib import urlencode
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlencode
from json import loads
from django.conf import settings
from dbmail import get_version
from dbmail.providers.prowl.push import from_unicode
class PushOverError(Exception):
pass
|
LPgenerator/django-db-mailer
|
dbmail/providers/google/android.py
|
send
|
python
|
def send(user, message, **kwargs):
headers = {
"Content-type": "application/json",
"Authorization": "key=" + kwargs.pop("gcm_key", settings.GCM_KEY)
}
hook_url = 'https://android.googleapis.com/gcm/send'
data = {
"registration_ids": [user],
"data": {
"title": kwargs.pop("event"),
'message': message,
}
}
data['data'].update(kwargs)
up = urlparse(hook_url)
http = HTTPSConnection(up.netloc)
http.request(
"POST", up.path,
headers=headers,
body=dumps(data))
response = http.getresponse()
if response.status != 200:
raise GCMError(response.reason)
body = response.read()
if loads(body).get("failure") > 0:
raise GCMError(repr(body))
return True
|
Site: https://developers.google.com
API: https://developers.google.com/cloud-messaging/
Desc: Android notifications
|
train
|
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/google/android.py#L18-L55
| null |
# -*- encoding: utf-8 -*-
try:
from httplib import HTTPSConnection
from urlparse import urlparse
except ImportError:
from http.client import HTTPSConnection
from urllib.parse import urlparse
from json import dumps, loads
from django.conf import settings
class GCMError(Exception):
pass
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperFormMixin.add_composite_field
|
python
|
def add_composite_field(self, name, field):
self.composite_fields[name] = field
self._init_composite_field(name, field)
|
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L189-L195
|
[
"def _init_composite_field(self, name, field):\n if hasattr(field, 'get_form'):\n form = field.get_form(self, name)\n self.forms[name] = form\n if hasattr(field, 'get_formset'):\n formset = field.get_formset(self, name)\n self.formsets[name] = formset\n"
] |
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperFormMixin.get_composite_field_value
|
python
|
def get_composite_field_value(self, name):
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
|
Return the form/formset instance for the given field name.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L197-L205
| null |
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperFormMixin._init_composite_fields
|
python
|
def _init_composite_fields(self):
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
|
Setup the forms and formsets.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L215-L229
|
[
"def _init_composite_field(self, name, field):\n if hasattr(field, 'get_form'):\n form = field.get_form(self, name)\n self.forms[name] = form\n if hasattr(field, 'get_formset'):\n formset = field.get_formset(self, name)\n self.formsets[name] = formset\n"
] |
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperFormMixin.full_clean
|
python
|
def full_clean(self):
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
|
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L231-L245
| null |
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
@property
def media(self):
"""
Incooperate composite field's media.
"""
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperFormMixin.media
|
python
|
def media(self):
media_list = []
media_list.append(super(SuperFormMixin, self).media)
for composite_name in self.composite_fields.keys():
form = self.get_composite_field_value(composite_name)
media_list.append(form.media)
return reduce(lambda a, b: a + b, media_list)
|
Incooperate composite field's media.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L248-L257
|
[
"def get_composite_field_value(self, name):\n \"\"\"\n Return the form/formset instance for the given field name.\n \"\"\"\n field = self.composite_fields[name]\n if hasattr(field, 'get_form'):\n return self.forms[name]\n if hasattr(field, 'get_formset'):\n return self.formsets[name]\n"
] |
class SuperFormMixin(object):
"""
The base class for all super forms. It does not inherit from any other
classes, so you are free to mix it into any custom form class you have. You
need to use it together with ``SuperFormMetaclass``, like this:
.. code:: python
from django_superform import SuperFormMixin
from django_superform import SuperFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperFormMetaclass,
SuperFormMixin,
MyCustomForm)):
pass
The goal of a superform is to behave just like a normal django form but is
able to take composite fields, like
:class:`~django_superform.fields.FormField` and
:class:`~django_superform.fields.FormSetField`.
Cleaning, validation, etc. should work totally transparent. See the
:ref:`Quickstart Guide <quickstart>` for how superforms are used.
"""
def __init__(self, *args, **kwargs):
super(SuperFormMixin, self).__init__(*args, **kwargs)
self._init_composite_fields()
def __getitem__(self, name):
"""
Returns a ``django.forms.BoundField`` for the given field name. It also
returns :class:`~django_superform.boundfield.CompositeBoundField`
instances for composite fields.
"""
if name not in self.fields and name in self.composite_fields:
field = self.composite_fields[name]
return field.get_bound_field(self, name)
return super(SuperFormMixin, self).__getitem__(name)
def add_composite_field(self, name, field):
"""
Add a dynamic composite field to the already existing ones and
initialize it appropriatly.
"""
self.composite_fields[name] = field
self._init_composite_field(name, field)
def get_composite_field_value(self, name):
"""
Return the form/formset instance for the given field name.
"""
field = self.composite_fields[name]
if hasattr(field, 'get_form'):
return self.forms[name]
if hasattr(field, 'get_formset'):
return self.formsets[name]
def _init_composite_field(self, name, field):
if hasattr(field, 'get_form'):
form = field.get_form(self, name)
self.forms[name] = form
if hasattr(field, 'get_formset'):
formset = field.get_formset(self, name)
self.formsets[name] = formset
def _init_composite_fields(self):
"""
Setup the forms and formsets.
"""
# The base_composite_fields class attribute is the *class-wide*
# definition of fields. Because a particular *instance* of the class
# might want to alter self.composite_fields, we create
# self.composite_fields here by copying base_composite_fields.
# Instances should always modify self.composite_fields; they should not
# modify base_composite_fields.
self.composite_fields = copy.deepcopy(self.base_composite_fields)
self.forms = OrderedDict()
self.formsets = OrderedDict()
for name, field in self.composite_fields.items():
self._init_composite_field(name, field)
def full_clean(self):
"""
Clean the form, including all formsets and add formset errors to the
errors dict. Errors of nested forms and formsets are only included if
they actually contain errors.
"""
super(SuperFormMixin, self).full_clean()
for field_name, composite in self.forms.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorDict(composite._errors)
for field_name, composite in self.formsets.items():
composite.full_clean()
if not composite.is_valid() and composite._errors:
self._errors[field_name] = ErrorList(composite._errors)
@property
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperModelFormMixin.save
|
python
|
def save(self, commit=True):
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
|
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L277-L307
|
[
"def save_form(self, commit=True):\n \"\"\"\n This calls Django's ``ModelForm.save()``. It only takes care of\n saving this actual form, and leaves the nested forms and formsets\n alone.\n\n We separate this out of the\n :meth:`~django_superform.forms.SuperModelForm.save` method to make\n extensibility easier.\n \"\"\"\n return super(SuperModelFormMixin, self).save(commit=commit)\n",
"def save_forms(self, commit=True):\n saved_composites = []\n for name, composite in self.forms.items():\n field = self.composite_fields[name]\n if hasattr(field, 'save'):\n field.save(self, name, composite, commit=commit)\n saved_composites.append(composite)\n\n self._extend_save_m2m('save_forms_m2m', saved_composites)\n",
"def save_formsets(self, commit=True):\n \"\"\"\n Save all formsets. If ``commit=False``, it will modify the form's\n ``save_m2m()`` so that it also calls the formsets' ``save_m2m()``\n methods.\n \"\"\"\n saved_composites = []\n for name, composite in self.formsets.items():\n field = self.composite_fields[name]\n if hasattr(field, 'save'):\n field.save(self, name, composite, commit=commit)\n saved_composites.append(composite)\n\n self._extend_save_m2m('save_formsets_m2m', saved_composites)\n"
] |
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperModelFormMixin.save_form
|
python
|
def save_form(self, commit=True):
return super(SuperModelFormMixin, self).save(commit=commit)
|
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L337-L347
| null |
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
def save_formsets(self, commit=True):
"""
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
"""
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
|
gregmuellegger/django-superform
|
django_superform/forms.py
|
SuperModelFormMixin.save_formsets
|
python
|
def save_formsets(self, commit=True):
saved_composites = []
for name, composite in self.formsets.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_formsets_m2m', saved_composites)
|
Save all formsets. If ``commit=False``, it will modify the form's
``save_m2m()`` so that it also calls the formsets' ``save_m2m()``
methods.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/forms.py#L359-L372
|
[
"def _extend_save_m2m(self, name, composites):\n additional_save_m2m = []\n for composite in composites:\n if hasattr(composite, 'save_m2m'):\n additional_save_m2m.append(composite.save_m2m)\n\n if not additional_save_m2m:\n return\n\n def additional_saves():\n for save_m2m in additional_save_m2m:\n save_m2m()\n\n # The save() method was called before save_forms()/save_formsets(), so\n # we will already have save_m2m() available.\n if hasattr(self, 'save_m2m'):\n _original_save_m2m = self.save_m2m\n else:\n def _original_save_m2m():\n return None\n\n def augmented_save_m2m():\n _original_save_m2m()\n additional_saves()\n\n self.save_m2m = augmented_save_m2m\n setattr(self, name, additional_saves)\n"
] |
class SuperModelFormMixin(SuperFormMixin):
"""
Can be used in with your custom form subclasses like this:
.. code:: python
from django_superform import SuperModelFormMixin
from django_superform import SuperModelFormMetaclass
import six
class MySuperForm(six.with_metaclass(
SuperModelFormMetaclass,
SuperModelFormMixin,
MyCustomModelForm)):
pass
"""
def save(self, commit=True):
"""
When saving a super model form, the nested forms and formsets will be
saved as well.
The implementation of ``.save()`` looks like this:
.. code:: python
saved_obj = self.save_form()
self.save_forms()
self.save_formsets()
return saved_obj
That makes it easy to override it in order to change the order in which
things are saved.
The ``.save()`` method will return only a single model instance even if
nested forms are saved as well. That keeps the API similiar to what
Django's model forms are offering.
If ``commit=False`` django's modelform implementation will attach a
``save_m2m`` method to the form instance, so that you can call it
manually later. When you call ``save_m2m``, the ``save_forms`` and
``save_formsets`` methods will be executed as well so again all nested
forms are taken care of transparantly.
"""
saved_obj = self.save_form(commit=commit)
self.save_forms(commit=commit)
self.save_formsets(commit=commit)
return saved_obj
def _extend_save_m2m(self, name, composites):
additional_save_m2m = []
for composite in composites:
if hasattr(composite, 'save_m2m'):
additional_save_m2m.append(composite.save_m2m)
if not additional_save_m2m:
return
def additional_saves():
for save_m2m in additional_save_m2m:
save_m2m()
# The save() method was called before save_forms()/save_formsets(), so
# we will already have save_m2m() available.
if hasattr(self, 'save_m2m'):
_original_save_m2m = self.save_m2m
else:
def _original_save_m2m():
return None
def augmented_save_m2m():
_original_save_m2m()
additional_saves()
self.save_m2m = augmented_save_m2m
setattr(self, name, additional_saves)
def save_form(self, commit=True):
"""
This calls Django's ``ModelForm.save()``. It only takes care of
saving this actual form, and leaves the nested forms and formsets
alone.
We separate this out of the
:meth:`~django_superform.forms.SuperModelForm.save` method to make
extensibility easier.
"""
return super(SuperModelFormMixin, self).save(commit=commit)
def save_forms(self, commit=True):
saved_composites = []
for name, composite in self.forms.items():
field = self.composite_fields[name]
if hasattr(field, 'save'):
field.save(self, name, composite, commit=commit)
saved_composites.append(composite)
self._extend_save_m2m('save_forms_m2m', saved_composites)
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
CompositeField.get_prefix
|
python
|
def get_prefix(self, form, name):
return '{form_prefix}{prefix_name}-{field_name}'.format(
form_prefix=form.prefix + '-' if form.prefix else '',
prefix_name=self.prefix_name,
field_name=name)
|
Return the prefix that is used for the formset.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L67-L74
| null |
class CompositeField(BaseCompositeField):
"""
Implements the base structure that is relevant for all composite fields.
This field cannot be used directly, use a subclass of it.
"""
prefix_name = 'composite'
def __init__(self, *args, **kwargs):
super(CompositeField, self).__init__(*args, **kwargs)
# Let the widget know about the field for easier complex renderings in
# the template.
self.widget.field = self
def get_bound_field(self, form, field_name):
return CompositeBoundField(form, self, field_name)
def get_initial(self, form, name):
"""
Get the initial data that got passed into the superform for this
composite field. It should return ``None`` if no initial values where
given.
"""
if hasattr(form, 'initial'):
return form.initial.get(name, None)
return None
def get_kwargs(self, form, name):
"""
Return the keyword arguments that are used to instantiate the formset.
"""
kwargs = {
'prefix': self.get_prefix(form, name),
'initial': self.get_initial(form, name),
}
kwargs.update(self.default_kwargs)
return kwargs
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
CompositeField.get_initial
|
python
|
def get_initial(self, form, name):
if hasattr(form, 'initial'):
return form.initial.get(name, None)
return None
|
Get the initial data that got passed into the superform for this
composite field. It should return ``None`` if no initial values where
given.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L76-L85
| null |
class CompositeField(BaseCompositeField):
"""
Implements the base structure that is relevant for all composite fields.
This field cannot be used directly, use a subclass of it.
"""
prefix_name = 'composite'
def __init__(self, *args, **kwargs):
super(CompositeField, self).__init__(*args, **kwargs)
# Let the widget know about the field for easier complex renderings in
# the template.
self.widget.field = self
def get_bound_field(self, form, field_name):
return CompositeBoundField(form, self, field_name)
def get_prefix(self, form, name):
"""
Return the prefix that is used for the formset.
"""
return '{form_prefix}{prefix_name}-{field_name}'.format(
form_prefix=form.prefix + '-' if form.prefix else '',
prefix_name=self.prefix_name,
field_name=name)
def get_kwargs(self, form, name):
"""
Return the keyword arguments that are used to instantiate the formset.
"""
kwargs = {
'prefix': self.get_prefix(form, name),
'initial': self.get_initial(form, name),
}
kwargs.update(self.default_kwargs)
return kwargs
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
CompositeField.get_kwargs
|
python
|
def get_kwargs(self, form, name):
kwargs = {
'prefix': self.get_prefix(form, name),
'initial': self.get_initial(form, name),
}
kwargs.update(self.default_kwargs)
return kwargs
|
Return the keyword arguments that are used to instantiate the formset.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L87-L96
|
[
"def get_prefix(self, form, name):\n \"\"\"\n Return the prefix that is used for the formset.\n \"\"\"\n return '{form_prefix}{prefix_name}-{field_name}'.format(\n form_prefix=form.prefix + '-' if form.prefix else '',\n prefix_name=self.prefix_name,\n field_name=name)\n",
"def get_initial(self, form, name):\n \"\"\"\n Get the initial data that got passed into the superform for this\n composite field. It should return ``None`` if no initial values where\n given.\n \"\"\"\n\n if hasattr(form, 'initial'):\n return form.initial.get(name, None)\n return None\n"
] |
class CompositeField(BaseCompositeField):
"""
Implements the base structure that is relevant for all composite fields.
This field cannot be used directly, use a subclass of it.
"""
prefix_name = 'composite'
def __init__(self, *args, **kwargs):
super(CompositeField, self).__init__(*args, **kwargs)
# Let the widget know about the field for easier complex renderings in
# the template.
self.widget.field = self
def get_bound_field(self, form, field_name):
return CompositeBoundField(form, self, field_name)
def get_prefix(self, form, name):
"""
Return the prefix that is used for the formset.
"""
return '{form_prefix}{prefix_name}-{field_name}'.format(
form_prefix=form.prefix + '-' if form.prefix else '',
prefix_name=self.prefix_name,
field_name=name)
def get_initial(self, form, name):
"""
Get the initial data that got passed into the superform for this
composite field. It should return ``None`` if no initial values where
given.
"""
if hasattr(form, 'initial'):
return form.initial.get(name, None)
return None
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
FormField.get_form
|
python
|
def get_form(self, form, name):
kwargs = self.get_kwargs(form, name)
form_class = self.get_form_class(form, name)
composite_form = form_class(
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
**kwargs)
return composite_form
|
Get an instance of the form.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L170-L180
|
[
"def get_kwargs(self, form, name):\n \"\"\"\n Return the keyword arguments that are used to instantiate the formset.\n \"\"\"\n kwargs = {\n 'prefix': self.get_prefix(form, name),\n 'initial': self.get_initial(form, name),\n }\n kwargs.update(self.default_kwargs)\n return kwargs\n",
"def get_form_class(self, form, name):\n \"\"\"\n Return the form class that will be used for instantiation in\n ``get_form``. You can override this method in subclasses to change\n the behaviour of the given form class.\n \"\"\"\n return self.form_class\n"
] |
class FormField(CompositeField):
"""
A field that can be used to nest a form inside another form::
from django import forms
from django_superform import SuperForm
class AddressForm(forms.Form):
street = forms.CharField()
city = forms.CharField()
class RegistrationForm(SuperForm):
first_name = forms.CharField()
last_name = forms.CharField()
address = FormField(AddressForm)
You can then display the fields in the template with (given that
``registration_form`` is an instance of ``RegistrationForm``)::
{{ registration_form.address.street }}
{{ registration_form.address.street.errors }}
{{ registration_form.address.city }}
{{ registration_form.address.city.errors }}
The fields will all have a prefix in their name so that the naming does not
clash with other fields on the page. The name attribute of the input tag
for the ``street`` field in this example will be: ``form-address-street``.
The name will change if you set a prefix on the superform::
form = RegistrationForm(prefix='registration')
Then the field name will be ``registration-form-address-street``.
You can pass the ``kwargs`` argument to the ``__init__`` method in order to
give keyword arguments that you want to pass through to the form when it is
instaniated. So you could use this to pass in initial values::
class RegistrationForm(SuperForm):
address = FormField(AddressForm, kwargs={
'initial': {'street': 'Stairway to Heaven 1'}
})
But you can also use nested initial values which you pass into the
superform::
RegistrationForm(initial={
'address': {'street': 'Highway to Hell 666'}
})
The first method (using ``kwargs``) will take precedence.
"""
prefix_name = 'form'
widget = FormWidget
def __init__(self, form_class, kwargs=None, **field_kwargs):
super(FormField, self).__init__(**field_kwargs)
self.form_class = form_class
if kwargs is None:
kwargs = {}
self.default_kwargs = kwargs
def get_form_class(self, form, name):
"""
Return the form class that will be used for instantiation in
``get_form``. You can override this method in subclasses to change
the behaviour of the given form class.
"""
return self.form_class
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
ModelFormField.get_kwargs
|
python
|
def get_kwargs(self, form, name):
kwargs = super(ModelFormField, self).get_kwargs(form, name)
instance = self.get_instance(form, name)
kwargs.setdefault('instance', instance)
kwargs.setdefault('empty_permitted', not self.required)
return kwargs
|
Return the keyword arguments that are used to instantiate the formset.
The ``instance`` kwarg will be set to the value returned by
:meth:`~django_superform.fields.ModelFormField.get_instance`. The
``empty_permitted`` kwarg will be set to the inverse of the
``required`` argument passed into the constructor of this field.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L238-L251
|
[
"def get_kwargs(self, form, name):\n \"\"\"\n Return the keyword arguments that are used to instantiate the formset.\n \"\"\"\n kwargs = {\n 'prefix': self.get_prefix(form, name),\n 'initial': self.get_initial(form, name),\n }\n kwargs.update(self.default_kwargs)\n return kwargs\n",
"def get_instance(self, form, name):\n \"\"\"\n Provide an instance that shall be used when instantiating the\n modelform. The ``form`` argument is the super-form instance that this\n ``ModelFormField`` is used in. ``name`` is the name of this field on\n the super-form.\n\n This returns ``None`` by default. So you usually want to override this\n method in a subclass.\n \"\"\"\n return None\n",
"def get_instance(self, form, name):\n field_name = self.get_field_name(form, name)\n return getattr(form.instance, field_name)\n"
] |
class ModelFormField(FormField):
"""
This class is the to :class:`~django_superform.fields.FormField` what
Django's :class:`ModelForm` is to :class:`Form`. It has the same behaviour
as :class:`~django_superform.fields.FormField` but will also save the
nested form if the super form is saved. Here is an example::
from django_superform import ModelFormField
class EmailForm(forms.ModelForm):
class Meta:
model = EmailAddress
fields = ('email',)
class UserForm(SuperModelForm):
email = ModelFormField(EmailForm)
class Meta:
model = User
fields = ('username',)
user_form = UserForm(
{'username': 'john', 'form-email-email': 'john@example.com'})
if user_form.is_valid():
user_form.save()
This will save the ``user_form`` and create a new instance of ``User``
model and it will also save the ``EmailForm`` and therefore create an
instance of ``EmailAddress``!
However you usually want to use one of the exsting subclasses, like
:class:`~django_superform.fields.ForeignKeyFormField` or extend from
``ModelFormField`` class and override the
:meth:`~django_superform.fields.ModelFormField.get_instance` method.
.. note::
Usually the :class:`~django_superform.fields.ModelFormField` is used
inside a :class:`~django_superform.forms.SuperModelForm`. You actually
can use it within a :class:`~django_superform.forms.SuperForm`, but
since this form type does not have a ``save()`` method, you will need
to take care of saving the nested model form yourself.
"""
def get_instance(self, form, name):
"""
Provide an instance that shall be used when instantiating the
modelform. The ``form`` argument is the super-form instance that this
``ModelFormField`` is used in. ``name`` is the name of this field on
the super-form.
This returns ``None`` by default. So you usually want to override this
method in a subclass.
"""
return None
def shall_save(self, form, name, composite_form):
"""
Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms.
"""
if composite_form.empty_permitted and not composite_form.has_changed():
return False
return True
def save(self, form, name, composite_form, commit):
"""
This method is called by
:meth:`django_superform.forms.SuperModelForm.save` in order to save the
modelform that this field takes care of and calls on the nested form's
``save()`` method. But only if
:meth:`~django_superform.fields.ModelFormField.shall_save` returns
``True``.
"""
if self.shall_save(form, name, composite_form):
return composite_form.save(commit=commit)
return None
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
ModelFormField.shall_save
|
python
|
def shall_save(self, form, name, composite_form):
if composite_form.empty_permitted and not composite_form.has_changed():
return False
return True
|
Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L253-L265
| null |
class ModelFormField(FormField):
"""
This class is the to :class:`~django_superform.fields.FormField` what
Django's :class:`ModelForm` is to :class:`Form`. It has the same behaviour
as :class:`~django_superform.fields.FormField` but will also save the
nested form if the super form is saved. Here is an example::
from django_superform import ModelFormField
class EmailForm(forms.ModelForm):
class Meta:
model = EmailAddress
fields = ('email',)
class UserForm(SuperModelForm):
email = ModelFormField(EmailForm)
class Meta:
model = User
fields = ('username',)
user_form = UserForm(
{'username': 'john', 'form-email-email': 'john@example.com'})
if user_form.is_valid():
user_form.save()
This will save the ``user_form`` and create a new instance of ``User``
model and it will also save the ``EmailForm`` and therefore create an
instance of ``EmailAddress``!
However you usually want to use one of the exsting subclasses, like
:class:`~django_superform.fields.ForeignKeyFormField` or extend from
``ModelFormField`` class and override the
:meth:`~django_superform.fields.ModelFormField.get_instance` method.
.. note::
Usually the :class:`~django_superform.fields.ModelFormField` is used
inside a :class:`~django_superform.forms.SuperModelForm`. You actually
can use it within a :class:`~django_superform.forms.SuperForm`, but
since this form type does not have a ``save()`` method, you will need
to take care of saving the nested model form yourself.
"""
def get_instance(self, form, name):
"""
Provide an instance that shall be used when instantiating the
modelform. The ``form`` argument is the super-form instance that this
``ModelFormField`` is used in. ``name`` is the name of this field on
the super-form.
This returns ``None`` by default. So you usually want to override this
method in a subclass.
"""
return None
def get_kwargs(self, form, name):
"""
Return the keyword arguments that are used to instantiate the formset.
The ``instance`` kwarg will be set to the value returned by
:meth:`~django_superform.fields.ModelFormField.get_instance`. The
``empty_permitted`` kwarg will be set to the inverse of the
``required`` argument passed into the constructor of this field.
"""
kwargs = super(ModelFormField, self).get_kwargs(form, name)
instance = self.get_instance(form, name)
kwargs.setdefault('instance', instance)
kwargs.setdefault('empty_permitted', not self.required)
return kwargs
def save(self, form, name, composite_form, commit):
"""
This method is called by
:meth:`django_superform.forms.SuperModelForm.save` in order to save the
modelform that this field takes care of and calls on the nested form's
``save()`` method. But only if
:meth:`~django_superform.fields.ModelFormField.shall_save` returns
``True``.
"""
if self.shall_save(form, name, composite_form):
return composite_form.save(commit=commit)
return None
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
ModelFormField.save
|
python
|
def save(self, form, name, composite_form, commit):
if self.shall_save(form, name, composite_form):
return composite_form.save(commit=commit)
return None
|
This method is called by
:meth:`django_superform.forms.SuperModelForm.save` in order to save the
modelform that this field takes care of and calls on the nested form's
``save()`` method. But only if
:meth:`~django_superform.fields.ModelFormField.shall_save` returns
``True``.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L267-L278
|
[
"def shall_save(self, form, name, composite_form):\n \"\"\"\n Return ``True`` if the given ``composite_form`` (the nested form of\n this field) shall be saved. Return ``False`` if the form shall not be\n saved together with the super-form.\n\n By default it will return ``False`` if the form was not changed and the\n ``empty_permitted`` argument for the form was set to ``True``. That way\n you can allow empty forms.\n \"\"\"\n if composite_form.empty_permitted and not composite_form.has_changed():\n return False\n return True\n"
] |
class ModelFormField(FormField):
"""
This class is the to :class:`~django_superform.fields.FormField` what
Django's :class:`ModelForm` is to :class:`Form`. It has the same behaviour
as :class:`~django_superform.fields.FormField` but will also save the
nested form if the super form is saved. Here is an example::
from django_superform import ModelFormField
class EmailForm(forms.ModelForm):
class Meta:
model = EmailAddress
fields = ('email',)
class UserForm(SuperModelForm):
email = ModelFormField(EmailForm)
class Meta:
model = User
fields = ('username',)
user_form = UserForm(
{'username': 'john', 'form-email-email': 'john@example.com'})
if user_form.is_valid():
user_form.save()
This will save the ``user_form`` and create a new instance of ``User``
model and it will also save the ``EmailForm`` and therefore create an
instance of ``EmailAddress``!
However you usually want to use one of the exsting subclasses, like
:class:`~django_superform.fields.ForeignKeyFormField` or extend from
``ModelFormField`` class and override the
:meth:`~django_superform.fields.ModelFormField.get_instance` method.
.. note::
Usually the :class:`~django_superform.fields.ModelFormField` is used
inside a :class:`~django_superform.forms.SuperModelForm`. You actually
can use it within a :class:`~django_superform.forms.SuperForm`, but
since this form type does not have a ``save()`` method, you will need
to take care of saving the nested model form yourself.
"""
def get_instance(self, form, name):
"""
Provide an instance that shall be used when instantiating the
modelform. The ``form`` argument is the super-form instance that this
``ModelFormField`` is used in. ``name`` is the name of this field on
the super-form.
This returns ``None`` by default. So you usually want to override this
method in a subclass.
"""
return None
def get_kwargs(self, form, name):
"""
Return the keyword arguments that are used to instantiate the formset.
The ``instance`` kwarg will be set to the value returned by
:meth:`~django_superform.fields.ModelFormField.get_instance`. The
``empty_permitted`` kwarg will be set to the inverse of the
``required`` argument passed into the constructor of this field.
"""
kwargs = super(ModelFormField, self).get_kwargs(form, name)
instance = self.get_instance(form, name)
kwargs.setdefault('instance', instance)
kwargs.setdefault('empty_permitted', not self.required)
return kwargs
def shall_save(self, form, name, composite_form):
"""
Return ``True`` if the given ``composite_form`` (the nested form of
this field) shall be saved. Return ``False`` if the form shall not be
saved together with the super-form.
By default it will return ``False`` if the form was not changed and the
``empty_permitted`` argument for the form was set to ``True``. That way
you can allow empty forms.
"""
if composite_form.empty_permitted and not composite_form.has_changed():
return False
return True
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
ForeignKeyFormField.allow_blank
|
python
|
def allow_blank(self, form, name):
if self.blank is not None:
return self.blank
model = form._meta.model
field = model._meta.get_field(self.get_field_name(form, name))
return field.blank
|
Allow blank determines if the form might be completely empty. If it's
empty it will result in a None as the saved value for the ForeignKey.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L301-L310
|
[
"def get_field_name(self, form, name):\n return self.field_name or name\n"
] |
class ForeignKeyFormField(ModelFormField):
def __init__(self, form_class, kwargs=None, field_name=None, blank=None,
**field_kwargs):
super(ForeignKeyFormField, self).__init__(form_class, kwargs,
**field_kwargs)
self.field_name = field_name
self.blank = blank
def get_kwargs(self, form, name):
kwargs = super(ForeignKeyFormField, self).get_kwargs(form, name)
if 'instance' not in kwargs:
kwargs.setdefault('instance', self.get_instance(form, name))
if 'empty_permitted' not in kwargs:
if self.allow_blank(form, name):
kwargs['empty_permitted'] = True
return kwargs
def get_field_name(self, form, name):
return self.field_name or name
def get_form_class(self, form, name):
form_class = self.form_class
return form_class
def get_instance(self, form, name):
field_name = self.get_field_name(form, name)
return getattr(form.instance, field_name)
def save(self, form, name, composite_form, commit):
# Support the ``empty_permitted`` attribute. This is set if the field
# is ``blank=True`` .
if composite_form.empty_permitted and not composite_form.has_changed():
saved_obj = composite_form.instance
else:
saved_obj = super(ForeignKeyFormField, self).save(form, name,
composite_form,
commit)
setattr(form.instance, self.get_field_name(form, name), saved_obj)
if commit:
form.instance.save()
else:
raise NotImplementedError(
'ForeignKeyFormField cannot yet be used with non-commiting '
'form saves.')
return saved_obj
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
FormSetField.get_formset
|
python
|
def get_formset(self, form, name):
kwargs = self.get_kwargs(form, name)
formset_class = self.get_formset_class(form, name)
formset = formset_class(
form.data if form.is_bound else None,
form.files if form.is_bound else None,
**kwargs)
return formset
|
Get an instance of the formset.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L367-L377
|
[
"def get_kwargs(self, form, name):\n \"\"\"\n Return the keyword arguments that are used to instantiate the formset.\n \"\"\"\n kwargs = {\n 'prefix': self.get_prefix(form, name),\n 'initial': self.get_initial(form, name),\n }\n kwargs.update(self.default_kwargs)\n return kwargs\n",
"def get_formset_class(self, form, name):\n \"\"\"\n Return the formset class that will be used for instantiation in\n ``get_formset``. You can override this method in subclasses to change\n the behaviour of the given formset class.\n \"\"\"\n return self.formset_class\n"
] |
class FormSetField(CompositeField):
"""
First argument is a formset class that is instantiated by this
FormSetField.
You can pass the ``kwargs`` argument to specify kwargs values that
are used when the ``formset_class`` is instantiated.
"""
prefix_name = 'formset'
widget = FormSetWidget
def __init__(self, formset_class, kwargs=None, **field_kwargs):
super(FormSetField, self).__init__(**field_kwargs)
self.formset_class = formset_class
if kwargs is None:
kwargs = {}
self.default_kwargs = kwargs
def get_formset_class(self, form, name):
"""
Return the formset class that will be used for instantiation in
``get_formset``. You can override this method in subclasses to change
the behaviour of the given formset class.
"""
return self.formset_class
|
gregmuellegger/django-superform
|
django_superform/fields.py
|
InlineFormSetField.get_formset_class
|
python
|
def get_formset_class(self, form, name):
if self.formset_class is not None:
return self.formset_class
formset_class = inlineformset_factory(
self.get_parent_model(form, name),
self.get_model(form, name),
**self.formset_factory_kwargs)
return formset_class
|
Either return the formset class that was provided as argument to the
__init__ method, or build one based on the ``parent_model`` and
``model`` attributes.
|
train
|
https://github.com/gregmuellegger/django-superform/blob/5f389911ad38932b6dad184cc7fa81f27db752f9/django_superform/fields.py#L479-L491
|
[
"def get_model(self, form, name):\n return self.model\n",
"def get_parent_model(self, form, name):\n if self.parent_model is not None:\n return self.parent_model\n return form._meta.model\n"
] |
class InlineFormSetField(ModelFormSetField):
"""
The ``InlineFormSetField`` helps when you want to use a inline formset.
You can pass in either the keyword argument ``formset_class`` which is a
ready to use formset that inherits from ``BaseInlineFormSet`` or was
created by the ``inlineformset_factory``.
The other option is to provide the arguments that you would usually pass
into the ``inlineformset_factory``. The required arguments for that are:
``model``
The model class which should be represented by the forms in the
formset.
``parent_model``
The parent model is the one that is referenced by the model in a
foreignkey.
``form`` (optional)
The model form that is used as a baseclass for the forms in the inline
formset.
You can use the ``kwargs`` keyword argument to pass extra arguments for the
formset that are passed through when the formset is instantiated.
All other not mentioned keyword arguments, like ``extra``, ``max_num`` etc.
will be passed directly to the ``inlineformset_factory``.
Example:
class Gallery(models.Model):
name = models.CharField(max_length=50)
class Image(models.Model):
gallery = models.ForeignKey(Gallery)
image = models.ImageField(...)
class GalleryForm(ModelFormWithFormSets):
class Meta:
model = Gallery
fields = ('name',)
images = InlineFormSetField(
parent_model=Gallery,
model=Image,
extra=1)
"""
def __init__(self, parent_model=None, model=None, formset_class=None,
kwargs=None, **factory_kwargs):
"""
You need to either provide the ``formset_class`` or the ``model``
argument.
If the ``formset_class`` argument is not given, the ``model`` argument
is used to create the formset_class on the fly when needed by using the
``inlineformset_factory``.
"""
# Make sure that all standard arguments will get passed through to the
# parent's __init__ method.
field_kwargs = {}
for arg in ['required', 'widget', 'label', 'help_text', 'localize']:
if arg in factory_kwargs:
field_kwargs[arg] = factory_kwargs.pop(arg)
self.parent_model = parent_model
self.model = model
self.formset_factory_kwargs = factory_kwargs
super(InlineFormSetField, self).__init__(formset_class, kwargs=kwargs,
**field_kwargs)
if (
self.formset_class is None and
'form' not in self.formset_factory_kwargs and
'fields' not in self.formset_factory_kwargs and
'exclude' not in self.formset_factory_kwargs):
raise ValueError(
'You need to either specify the `formset_class` argument or '
'one of `form`/`fields`/`exclude` arguments '
'when creating a {0}.'
.format(self.__class__.__name__))
def get_model(self, form, name):
return self.model
def get_parent_model(self, form, name):
if self.parent_model is not None:
return self.parent_model
return form._meta.model
def get_kwargs(self, form, name):
kwargs = super(InlineFormSetField, self).get_kwargs(form, name)
kwargs.setdefault('instance', form.instance)
return kwargs
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.__parse_json_data
|
python
|
def __parse_json_data(self, data):
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
|
Process Json data
:@param data
:@type data: json/dict
:throws TypeError
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L32-L44
| null |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.__parse_json_file
|
python
|
def __parse_json_file(self, file_path):
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
|
Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L46-L60
| null |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.__get_value_from_data
|
python
|
def __get_value_from_data(self, key, data):
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
|
Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L62-L80
| null |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.at
|
python
|
def at(self, root):
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
|
Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L101-L114
|
[
"def __get_value_from_data(self, key, data):\n \"\"\"Find value from json data\n\n :@pram key\n :@type: string\n\n :@pram data\n :@type data: dict\n\n :@return object\n :@throws KeyError\n \"\"\"\n if key.isdigit():\n return data[int(key)]\n\n if key not in data:\n raise KeyError(\"Key not exists\")\n\n return data.get(key)\n"
] |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.reset
|
python
|
def reset(self, data={}):
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
|
JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L120-L136
|
[
"def __reset_queries(self):\n \"\"\"Reset previous query data\"\"\"\n\n self._queries = []\n self._current_query_index = 0\n"
] |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.__store_query
|
python
|
def __store_query(self, query_items):
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
|
Make where clause
:@param query_items
:@type query_items: dict
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L138-L148
| null |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.__execute_queries
|
python
|
def __execute_queries(self):
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
|
Execute all condition and filter result data
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L157-L173
| null |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
# ---------- Query Methods ------------- #
def where(self, key, operator, value):
"""Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
"""
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
s1s1ty/py-jsonq
|
pyjsonq/query.py
|
JsonQ.where
|
python
|
def where(self, key, operator, value):
self.__store_query({"key": key, "operator": operator, "value": value})
return self
|
Make where clause
:@param key
:@param operator
:@param value
:@type key,operator,value: string
:@return self
|
train
|
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L177-L188
|
[
"def __store_query(self, query_items):\n \"\"\"Make where clause\n\n :@param query_items\n :@type query_items: dict\n \"\"\"\n temp_index = self._current_query_index\n if len(self._queries) - 1 < temp_index:\n self._queries.append([])\n\n self._queries[temp_index].append(query_items)\n"
] |
class JsonQ(object):
"""Query over Json file"""
def __init__(self, file_path="", data={}):
"""
:@param file_path: Set main json file path
:@type file_path: string
"""
if file_path != "":
self.from_file(file_path)
if data:
self.__parse_json_data(data)
self.__reset_queries()
self._matcher = Matcher()
def __reset_queries(self):
"""Reset previous query data"""
self._queries = []
self._current_query_index = 0
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json")
def __parse_json_file(self, file_path):
"""Process Json file data
:@param file_path
:@type file_path: string
:@throws IOError
"""
if file_path == '' or os.path.splitext(file_path)[1] != '.json':
raise IOError('Invalid Json file')
with open(file_path) as json_file:
self._raw_data = json.load(json_file)
self._json_data = copy.deepcopy(self._raw_data)
def __get_value_from_data(self, key, data):
"""Find value from json data
:@pram key
:@type: string
:@pram data
:@type data: dict
:@return object
:@throws KeyError
"""
if key.isdigit():
return data[int(key)]
if key not in data:
raise KeyError("Key not exists")
return data.get(key)
def get(self):
"""Getting prepared data
:@return object
"""
self.__prepare()
return self._json_data
def from_file(self, file_path):
"""Set main json file path
:@param file_path
:@type file_path: string
:@throws FileNotFoundError
"""
self.__parse_json_file(file_path)
return self
def at(self, root):
"""Set root where PyJsonq start to prepare
:@param root
:@type root: string
:@return self
:@throws KeyError
"""
leafs = root.strip(" ").split('.')
for leaf in leafs:
if leaf:
self._json_data = self.__get_value_from_data(leaf, self._json_data)
return self
def clone(self):
"""Clone the exact same copy of the current object instance."""
return copy.deepcopy(self._json_data)
def reset(self, data={}):
"""JsonQuery object cen be reset to new data
according to given data or previously given raw Json data
:@param data: {}
:@type data: json/dict
:@return self
"""
if data and (isinstance(data, dict) or isinstance(data, list)):
self._json_data = data
else:
self._json_data = copy.deepcopy(self._raw_data)
self.__reset_queries()
return self
def __store_query(self, query_items):
"""Make where clause
:@param query_items
:@type query_items: dict
"""
temp_index = self._current_query_index
if len(self._queries) - 1 < temp_index:
self._queries.append([])
self._queries[temp_index].append(query_items)
def __prepare(self):
"""Prepare query result"""
if len(self._queries) > 0:
self.__execute_queries()
self.__reset_queries()
def __execute_queries(self):
"""Execute all condition and filter result data"""
def func(item):
or_check = False
for queries in self._queries:
and_check = True
for query in queries:
and_check &= self._matcher._match(
item.get(query.get('key'), None),
query.get('operator'),
query.get('value')
)
or_check |= and_check
return or_check
self._json_data = list(filter(lambda item: func(item), self._json_data))
# ---------- Query Methods ------------- #
def or_where(self, key, operator, value):
"""Make or_where clause
:@param key
:@param operator
:@param value
:@type key, operator, value: string
:@return self
"""
if len(self._queries) > 0:
self._current_query_index += 1
self.__store_query({"key": key, "operator": operator, "value": value})
return self
def where_in(self, key, value):
"""Make where_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'in', value)
return self
def where_not_in(self, key, value):
"""Make where_not_in clause
:@param key
:@param value
:@type key, value: string
:@return self
"""
self.where(key, 'notin', value)
return self
def where_null(self, key):
"""Make where_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '=', 'None')
return self
def where_not_null(self, key):
"""Make where_not_null clause
:@param key
:@type key: string
:@return self
"""
self.where(key, '!=', 'None')
return self
def where_start_with(self, key, value):
"""Make where_start_with clause
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'startswith', value)
return self
def where_end_with(self, key, value):
"""Make where_ends_with clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'endswith', value)
return self
def where_contains(self, key, value):
"""Make where_contains clause.
:@param key
:@param value
:@type key,value: string
:@return self
"""
self.where(key, 'contains', value)
return self
# ---------- Aggregate Methods ------------- #
def count(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def size(self):
"""Getting the size of the collection
:@return int
"""
self.__prepare()
return len(self._json_data)
def first(self):
"""Getting the first element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[0] if self.count() > 0 else None
def last(self):
"""Getting the last element of the collection otherwise None
:@return object
"""
self.__prepare()
return self._json_data[-1] if self.count() > 0 else None
def nth(self, index):
"""Getting the nth element of the collection
:@param index
:@type index: int
:@return object
"""
self.__prepare()
return None if self.count() < math.fabs(index) else self._json_data[index]
def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
def max(self, property):
"""Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def min(self, property):
"""Getting the minimum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
"""
self.__prepare()
try:
return min(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
def chunk(self, size=0):
"""Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
"""
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
def group_by(self, property):
"""Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
"""
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
def sort(self, order="asc"):
"""Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
def sort_by(self, property, order="asc"):
"""Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
"""
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.