repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.read_word_data
|
python
|
def read_word_data(self, i2c_addr, register, force=None):
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
|
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L409-L427
|
[
"def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):\n u = union_i2c_smbus_data()\n return i2c_smbus_ioctl_data(\n read_write=read_write, command=command, size=size,\n data=union_pointer_type(u))\n",
"def _set_address(self, address, force=None):\n \"\"\"\n Set i2c slave address to use for subsequent calls.\n\n :param address:\n :type address: int\n :param force:\n :type force: Boolean\n \"\"\"\n force = force if force is not None else self.force\n if self.address != address or self._force_last != force:\n if force is True:\n ioctl(self.fd, I2C_SLAVE_FORCE, address)\n else:\n ioctl(self.fd, I2C_SLAVE, address)\n self.address = address\n self._force_last = force\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def i2c_rdwr(self, *i2c_msgs):
"""
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
"""
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.process_call
|
python
|
def process_call(self, i2c_addr, register, value, force=None):
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
|
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L450-L470
|
[
"def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):\n u = union_i2c_smbus_data()\n return i2c_smbus_ioctl_data(\n read_write=read_write, command=command, size=size,\n data=union_pointer_type(u))\n",
"def _set_address(self, address, force=None):\n \"\"\"\n Set i2c slave address to use for subsequent calls.\n\n :param address:\n :type address: int\n :param force:\n :type force: Boolean\n \"\"\"\n force = force if force is not None else self.force\n if self.address != address or self._force_last != force:\n if force is True:\n ioctl(self.fd, I2C_SLAVE_FORCE, address)\n else:\n ioctl(self.fd, I2C_SLAVE, address)\n self.address = address\n self._force_last = force\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def i2c_rdwr(self, *i2c_msgs):
"""
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
"""
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.read_block_data
|
python
|
def read_block_data(self, i2c_addr, register, force=None):
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
|
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L472-L491
|
[
"def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):\n u = union_i2c_smbus_data()\n return i2c_smbus_ioctl_data(\n read_write=read_write, command=command, size=size,\n data=union_pointer_type(u))\n",
"def _set_address(self, address, force=None):\n \"\"\"\n Set i2c slave address to use for subsequent calls.\n\n :param address:\n :type address: int\n :param force:\n :type force: Boolean\n \"\"\"\n force = force if force is not None else self.force\n if self.address != address or self._force_last != force:\n if force is True:\n ioctl(self.fd, I2C_SLAVE_FORCE, address)\n else:\n ioctl(self.fd, I2C_SLAVE, address)\n self.address = address\n self._force_last = force\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def i2c_rdwr(self, *i2c_msgs):
"""
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
"""
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.write_block_data
|
python
|
def write_block_data(self, i2c_addr, register, data, force=None):
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
|
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L493-L516
|
[
"def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):\n u = union_i2c_smbus_data()\n return i2c_smbus_ioctl_data(\n read_write=read_write, command=command, size=size,\n data=union_pointer_type(u))\n",
"def _set_address(self, address, force=None):\n \"\"\"\n Set i2c slave address to use for subsequent calls.\n\n :param address:\n :type address: int\n :param force:\n :type force: Boolean\n \"\"\"\n force = force if force is not None else self.force\n if self.address != address or self._force_last != force:\n if force is True:\n ioctl(self.fd, I2C_SLAVE_FORCE, address)\n else:\n ioctl(self.fd, I2C_SLAVE, address)\n self.address = address\n self._force_last = force\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def i2c_rdwr(self, *i2c_msgs):
"""
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
"""
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.read_i2c_block_data
|
python
|
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
|
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L546-L569
|
[
"def create(read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE_DATA):\n u = union_i2c_smbus_data()\n return i2c_smbus_ioctl_data(\n read_write=read_write, command=command, size=size,\n data=union_pointer_type(u))\n",
"def _set_address(self, address, force=None):\n \"\"\"\n Set i2c slave address to use for subsequent calls.\n\n :param address:\n :type address: int\n :param force:\n :type force: Boolean\n \"\"\"\n force = force if force is not None else self.force\n if self.address != address or self._force_last != force:\n if force is True:\n ioctl(self.fd, I2C_SLAVE_FORCE, address)\n else:\n ioctl(self.fd, I2C_SLAVE, address)\n self.address = address\n self._force_last = force\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def i2c_rdwr(self, *i2c_msgs):
"""
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
"""
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
kplindegaard/smbus2
|
smbus2/smbus2.py
|
SMBus.i2c_rdwr
|
python
|
def i2c_rdwr(self, *i2c_msgs):
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data)
|
Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None
|
train
|
https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L596-L609
|
[
"def create(*i2c_msg_instances):\n \"\"\"\n Factory method for creating a i2c_rdwr_ioctl_data struct that can\n be called with ``ioctl(fd, I2C_RDWR, data)``.\n\n :param i2c_msg_instances: Up to 42 i2c_msg instances\n :rtype: i2c_rdwr_ioctl_data\n \"\"\"\n n_msg = len(i2c_msg_instances)\n msg_array = (i2c_msg * n_msg)(*i2c_msg_instances)\n return i2c_rdwr_ioctl_data(\n msgs=msg_array,\n nmsgs=n_msg\n )\n"
] |
class SMBus(object):
def __init__(self, bus=None, force=False):
"""
Initialize and (optionally) open an i2c bus connection.
:param bus: i2c bus number (e.g. 0 or 1). If not given, a subsequent
call to ``open()`` is required.
:type bus: int
:param force: force using the slave address even when driver is
already using it.
:type force: boolean
"""
self.fd = None
self.funcs = 0
if bus is not None:
self.open(bus)
self.address = None
self.force = force
self._force_last = None
def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs()
def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None
def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force
def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value
def write_quick(self, i2c_addr, force=None):
"""
Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte
def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg)
def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def write_word_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_WORD_DATA
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word
def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
def block_process_call(self, i2c_addr, register, data, force=None):
"""
Executes a SMBus Block Process Call, sending a variable-size data block and receiving another variable-size response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_PROC_CALL
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1]
def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1]
def write_i2c_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg)
|
jcushman/pdfquery
|
pdfquery/cache.py
|
BaseCache.set_hash_key
|
python
|
def set_hash_key(self, file):
filehasher = hashlib.md5()
while True:
data = file.read(8192)
if not data:
break
filehasher.update(data)
file.seek(0)
self.hash_key = filehasher.hexdigest()
|
Calculate and store hash key for file.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/cache.py#L10-L19
| null |
class BaseCache(object):
def __init__(self):
self.hash_key = None
def set(self, page_range_key, tree):
"""write tree to key"""
pass
def get(self, page_range_key):
"""load tree from key, or None if cache miss"""
return None
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
_append_sorted
|
python
|
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
|
Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L45-L60
| null |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
_box_in_box
|
python
|
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
|
Return True if child is contained within el.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L63-L70
| null |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
_comp_bbox
|
python
|
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
|
Return 1 if el in el2, -1 if el2 in el, else 0
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L74-L83
| null |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
smart_unicode_decode
|
python
|
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
|
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L114-L146
| null |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
prepare_for_json_encoding
|
python
|
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
|
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L148-L168
|
[
"def smart_unicode_decode(encoded_string):\n \"\"\"\n Given an encoded string of unknown format, detect the format with\n chardet and return the unicode version.\n Example input from bug #11:\n ('\\xfe\\xff\\x00I\\x00n\\x00s\\x00p\\x00e\\x00c\\x00t\\x00i\\x00o\\x00n\\x00'\n '\\x00R\\x00e\\x00p\\x00o\\x00r\\x00t\\x00 \\x00v\\x002\\x00.\\x002')\n \"\"\"\n if not encoded_string:\n return u''\n\n # optimization -- first try ascii\n try:\n return encoded_string.decode('ascii')\n except UnicodeDecodeError:\n pass\n\n # detect encoding\n detected_encoding = chardet.detect(encoded_string)\n # bug 54 -- depending on chardet version, if encoding is not guessed,\n # either detected_encoding will be None or detected_encoding['encoding'] will be None\n detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'\n decoded_string = six.text_type(\n encoded_string,\n encoding=detected_encoding,\n errors='replace'\n )\n\n # unicode string may still have useless BOM character at the beginning\n if decoded_string and decoded_string[0] in bom_headers:\n decoded_string = decoded_string[1:]\n\n return decoded_string\n"
] |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
obj_to_string
|
python
|
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
|
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L170-L177
|
[
"def prepare_for_json_encoding(obj):\n \"\"\"\n Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).\n \"\"\"\n obj_type = type(obj)\n if obj_type == list or obj_type == tuple:\n return [prepare_for_json_encoding(item) for item in obj]\n if obj_type == dict:\n # alphabetizing keys lets us compare attributes for equality across runs\n return OrderedDict(\n (prepare_for_json_encoding(k),\n prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())\n )\n if obj_type == six.binary_type:\n return smart_unicode_decode(obj)\n if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):\n return obj\n if obj_type == PSLiteral:\n # special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__\n return u\"/%s\" % obj.name\n return six.text_type(obj)\n"
] |
from __future__ import print_function
# -*- coding: utf-8 -*-
# builtins
import codecs
import json
import numbers
import re
import chardet
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # sorry py2.6! Ordering isn't that important for our purposes anyway.
# pdfminer
from pdfminer.psparser import PSLiteral
from pdfminer.pdfparser import PDFParser
try:
# pdfminer < 20131022
from pdfminer.pdfparser import PDFDocument, PDFPage
except ImportError:
# pdfminer >= 20131022
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.layout import LAParams, LTChar, LTImage, LTPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.pdftypes import resolve1
# other dependencies
from pyquery import PyQuery
from lxml import etree
import cssselect
import six
from six.moves import map
from six.moves import zip
# local imports
from .pdftranslator import PDFQueryTranslator
from .cache import DummyCache
# Re-sort the PDFMiner Layout tree so elements that fit inside other elements
# will be children of them
def _append_sorted(root, el, comparator):
""" Add el as a child of root, or as a child of one of root's children.
Comparator is a function(a, b) returning > 0 if a is a child of b, < 0 if
b is a child of a, 0 if neither.
"""
for child in root:
rel = comparator(el, child)
if rel > 0:
# el fits inside child, add to child and return
_append_sorted(child, el, comparator)
return
if rel < 0:
# child fits inside el, move child into el (may move more than one)
_append_sorted(el, child, comparator)
# we weren't added to a child, so add to root
root.append(el)
def _box_in_box(el, child):
""" Return True if child is contained within el. """
return all([
float(el.get('x0')) <= float(child.get('x0')),
float(el.get('x1')) >= float(child.get('x1')),
float(el.get('y0')) <= float(child.get('y0')),
float(el.get('y1')) >= float(child.get('y1')),
])
_comp_bbox_keys_required = set(['x0', 'x1', 'y0', 'y1'])
def _comp_bbox(el, el2):
""" Return 1 if el in el2, -1 if el2 in el, else 0"""
# only compare if both elements have x/y coordinates
if _comp_bbox_keys_required <= set(el.keys()) and \
_comp_bbox_keys_required <= set(el2.keys()):
if _box_in_box(el2, el):
return 1
if _box_in_box(el, el2):
return -1
return 0
# assorted helpers
def _flatten(l, ltypes=(list, tuple)):
# via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
# these might have to be removed from the start of a decoded string after
# conversion
bom_headers = set([
six.text_type(codecs.BOM_UTF8, 'utf8'),
six.text_type(codecs.BOM_UTF16_LE, 'utf-16LE'),
six.text_type(codecs.BOM_UTF16_BE, 'utf-16BE'),
six.text_type(codecs.BOM_UTF32_LE, 'utf-32LE'),
six.text_type(codecs.BOM_UTF32_BE, 'utf-32BE'),
])
def smart_unicode_decode(encoded_string):
"""
Given an encoded string of unknown format, detect the format with
chardet and return the unicode version.
Example input from bug #11:
('\xfe\xff\x00I\x00n\x00s\x00p\x00e\x00c\x00t\x00i\x00o\x00n\x00'
'\x00R\x00e\x00p\x00o\x00r\x00t\x00 \x00v\x002\x00.\x002')
"""
if not encoded_string:
return u''
# optimization -- first try ascii
try:
return encoded_string.decode('ascii')
except UnicodeDecodeError:
pass
# detect encoding
detected_encoding = chardet.detect(encoded_string)
# bug 54 -- depending on chardet version, if encoding is not guessed,
# either detected_encoding will be None or detected_encoding['encoding'] will be None
detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'
decoded_string = six.text_type(
encoded_string,
encoding=detected_encoding,
errors='replace'
)
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string[0] in bom_headers:
decoded_string = decoded_string[1:]
return decoded_string
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
def obj_to_string(obj, top=True):
"""
Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.
"""
obj = prepare_for_json_encoding(obj)
if type(obj) == six.text_type:
return obj
return json.dumps(obj)
# via http://stackoverflow.com/a/25920392/307769
invalid_xml_chars_re = re.compile(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def strip_invalid_xml_chars(s):
return invalid_xml_chars_re.sub('', s)
# custom PDFDocument class
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
# create etree parser using custom Element class
class LayoutElement(etree.ElementBase):
@property
def layout(self):
if not hasattr(self, '_layout'):
self._layout = None
return self._layout
@layout.setter
def layout(self, value):
self._layout = value
parser_lookup = etree.ElementDefaultClassLookup(element=LayoutElement)
parser = etree.XMLParser()
parser.set_element_class_lookup(parser_lookup)
# main class
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
if __name__ == "__main__":
import doctest
pdf = PDFQuery("../examples/sample.pdf")
doctest.testmod(extraglobs={'pdf': pdf}, optionflags=doctest.ELLIPSIS)
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
QPDFDocument.get_page_number
|
python
|
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
|
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L188-L276
|
[
"def smart_unicode_decode(encoded_string):\n \"\"\"\n Given an encoded string of unknown format, detect the format with\n chardet and return the unicode version.\n Example input from bug #11:\n ('\\xfe\\xff\\x00I\\x00n\\x00s\\x00p\\x00e\\x00c\\x00t\\x00i\\x00o\\x00n\\x00'\n '\\x00R\\x00e\\x00p\\x00o\\x00r\\x00t\\x00 \\x00v\\x002\\x00.\\x002')\n \"\"\"\n if not encoded_string:\n return u''\n\n # optimization -- first try ascii\n try:\n return encoded_string.decode('ascii')\n except UnicodeDecodeError:\n pass\n\n # detect encoding\n detected_encoding = chardet.detect(encoded_string)\n # bug 54 -- depending on chardet version, if encoding is not guessed,\n # either detected_encoding will be None or detected_encoding['encoding'] will be None\n detected_encoding = detected_encoding['encoding'] if detected_encoding and detected_encoding.get('encoding') else 'utf8'\n decoded_string = six.text_type(\n encoded_string,\n encoding=detected_encoding,\n errors='replace'\n )\n\n # unicode string may still have useless BOM character at the beginning\n if decoded_string and decoded_string[0] in bom_headers:\n decoded_string = decoded_string[1:]\n\n return decoded_string\n"
] |
class QPDFDocument(PDFDocument):
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery.load
|
python
|
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
|
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L370-L389
|
[
"def _flatten(l, ltypes=(list, tuple)):\n # via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html\n ltype = type(l)\n l = list(l)\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n i -= 1\n break\n else:\n l[i:i + 1] = l[i]\n i += 1\n return ltype(l)\n",
"def get_pyquery(self, tree=None, page_numbers=None):\n \"\"\"\n Wrap given tree in pyquery and return.\n If no tree supplied, will generate one from given page_numbers, or\n all page numbers.\n \"\"\"\n if not page_numbers:\n page_numbers = []\n if tree is None:\n if not page_numbers and self.tree is not None:\n tree = self.tree\n else:\n tree = self.get_tree(page_numbers)\n if hasattr(tree, 'getroot'):\n tree = tree.getroot()\n return PyQuery(tree, css_translator=PDFQueryTranslator())\n",
"def get_tree(self, *page_numbers):\n \"\"\"\n Return lxml.etree.ElementTree for entire document, or page numbers\n given if any.\n \"\"\"\n cache_key = \"_\".join(map(str, _flatten(page_numbers)))\n tree = self._parse_tree_cacher.get(cache_key)\n if tree is None:\n # set up root\n root = parser.makeelement(\"pdfxml\")\n if self.doc.info:\n for k, v in list(self.doc.info[0].items()):\n k = obj_to_string(k)\n v = obj_to_string(resolve1(v))\n try:\n root.set(k, v)\n except ValueError as e:\n # Sometimes keys have a character in them, like ':',\n # that isn't allowed in XML attribute names.\n # If that happens we just replace non-word characters\n # with '_'.\n if \"Invalid attribute name\" in e.args[0]:\n k = re.sub('\\W', '_', k)\n root.set(k, v)\n\n # Parse pages and append to root.\n # If nothing was passed in for page_numbers, we do this for all\n # pages, but if None was explicitly passed in, we skip it.\n if not(len(page_numbers) == 1 and page_numbers[0] is None):\n if page_numbers:\n pages = [[n, self.get_layout(self.get_page(n))] for n in\n _flatten(page_numbers)]\n else:\n pages = enumerate(self.get_layouts())\n for n, page in pages:\n page = self._xmlize(page)\n page.set('page_index', obj_to_string(n))\n page.set('page_label', self.doc.get_page_number(n))\n root.append(page)\n self._clean_text(root)\n\n # wrap root in ElementTree\n tree = etree.ElementTree(root)\n self._parse_tree_cacher.set(cache_key, tree)\n\n return tree\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery.extract
|
python
|
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
|
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L391-L436
|
[
"def load(self, *page_numbers):\n \"\"\"\n Load etree and pyquery object for entire document, or given page\n numbers (ints or lists). After this is called, objects are\n available at pdf.tree and pdf.pq.\n\n >>> pdf.load()\n >>> pdf.tree\n <lxml.etree._ElementTree object at ...>\n >>> pdf.pq('LTPage')\n [<LTPage>, <LTPage>]\n >>> pdf.load(1)\n >>> pdf.pq('LTPage')\n [<LTPage>]\n >>> pdf.load(0, 1)\n >>> pdf.pq('LTPage')\n [<LTPage>, <LTPage>]\n \"\"\"\n self.tree = self.get_tree(*_flatten(page_numbers))\n self.pq = self.get_pyquery(self.tree)\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery.get_pyquery
|
python
|
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
|
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L439-L454
|
[
"def get_tree(self, *page_numbers):\n \"\"\"\n Return lxml.etree.ElementTree for entire document, or page numbers\n given if any.\n \"\"\"\n cache_key = \"_\".join(map(str, _flatten(page_numbers)))\n tree = self._parse_tree_cacher.get(cache_key)\n if tree is None:\n # set up root\n root = parser.makeelement(\"pdfxml\")\n if self.doc.info:\n for k, v in list(self.doc.info[0].items()):\n k = obj_to_string(k)\n v = obj_to_string(resolve1(v))\n try:\n root.set(k, v)\n except ValueError as e:\n # Sometimes keys have a character in them, like ':',\n # that isn't allowed in XML attribute names.\n # If that happens we just replace non-word characters\n # with '_'.\n if \"Invalid attribute name\" in e.args[0]:\n k = re.sub('\\W', '_', k)\n root.set(k, v)\n\n # Parse pages and append to root.\n # If nothing was passed in for page_numbers, we do this for all\n # pages, but if None was explicitly passed in, we skip it.\n if not(len(page_numbers) == 1 and page_numbers[0] is None):\n if page_numbers:\n pages = [[n, self.get_layout(self.get_page(n))] for n in\n _flatten(page_numbers)]\n else:\n pages = enumerate(self.get_layouts())\n for n, page in pages:\n page = self._xmlize(page)\n page.set('page_index', obj_to_string(n))\n page.set('page_label', self.doc.get_page_number(n))\n root.append(page)\n self._clean_text(root)\n\n # wrap root in ElementTree\n tree = etree.ElementTree(root)\n self._parse_tree_cacher.set(cache_key, tree)\n\n return tree\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery.get_tree
|
python
|
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
|
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L456-L501
|
[
"def _flatten(l, ltypes=(list, tuple)):\n # via http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html\n ltype = type(l)\n l = list(l)\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n i -= 1\n break\n else:\n l[i:i + 1] = l[i]\n i += 1\n return ltype(l)\n",
"def obj_to_string(obj, top=True):\n \"\"\"\n Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.\n \"\"\"\n obj = prepare_for_json_encoding(obj)\n if type(obj) == six.text_type:\n return obj\n return json.dumps(obj)\n",
"def _clean_text(self, branch):\n \"\"\"\n Remove text from node if same text exists in its children.\n Apply string formatter if set.\n \"\"\"\n if branch.text and self.input_text_formatter:\n branch.text = self.input_text_formatter(branch.text)\n try:\n for child in branch:\n self._clean_text(child)\n if branch.text and branch.text.find(child.text) >= 0:\n branch.text = branch.text.replace(child.text, '', 1)\n except TypeError: # not an iterable node\n pass\n",
"def _xmlize(self, node, root=None):\n if isinstance(node, LayoutElement):\n # Already an XML element we can use\n branch = node\n else:\n # collect attributes of current node\n tags = self._getattrs(\n node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',\n 'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'\n )\n if type(node) == LTImage:\n tags.update(self._getattrs(\n node, 'colorspace', 'bits', 'imagemask', 'srcsize',\n 'stream', 'name', 'pts', 'linewidth')\n )\n elif type(node) == LTChar:\n tags.update(self._getattrs(\n node, 'fontname', 'adv', 'upright', 'size')\n )\n elif type(node) == LTPage:\n tags.update(self._getattrs(node, 'pageid', 'rotate'))\n\n # create node\n branch = parser.makeelement(node.__class__.__name__, tags)\n\n branch.layout = node\n self._elements += [branch] # make sure layout keeps state\n if root is None:\n root = branch\n\n # add text\n if hasattr(node, 'get_text'):\n branch.text = strip_invalid_xml_chars(node.get_text())\n\n # add children if node is an iterable\n if hasattr(node, '__iter__'):\n last = None\n for child in node:\n child = self._xmlize(child, root)\n if self.merge_tags and child.tag in self.merge_tags:\n if branch.text and child.text in branch.text:\n continue\n elif last is not None and last.tag in self.merge_tags:\n last.text += child.text\n last.set(\n '_obj_id',\n last.get('_obj_id','') + \",\" + child.get('_obj_id','')\n )\n continue\n # sort children by bounding boxes\n if self.resort:\n _append_sorted(root, child, _comp_bbox)\n else:\n branch.append(child)\n last = child\n return branch\n",
"def get_layouts(self):\n \"\"\" Get list of PDFMiner Layout objects for each page. \"\"\"\n return (self.get_layout(page) for page in self._cached_pages())\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery._clean_text
|
python
|
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
|
Remove text from node if same text exists in its children.
Apply string formatter if set.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L503-L516
|
[
"def _clean_text(self, branch):\n \"\"\"\n Remove text from node if same text exists in its children.\n Apply string formatter if set.\n \"\"\"\n if branch.text and self.input_text_formatter:\n branch.text = self.input_text_formatter(branch.text)\n try:\n for child in branch:\n self._clean_text(child)\n if branch.text and branch.text.find(child.text) >= 0:\n branch.text = branch.text.replace(child.text, '', 1)\n except TypeError: # not an iterable node\n pass\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery._getattrs
|
python
|
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
|
Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L575-L585
| null |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery.get_layout
|
python
|
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
|
Get PDFMiner Layout object for given page object or page number.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L600-L607
|
[
"def get_page(self, page_number):\n \"\"\" Get PDFPage object -- 0-indexed.\"\"\"\n return self._cached_pages(target_page=page_number)\n",
"def _add_annots(self, layout, annots):\n \"\"\"Adds annotations to the layout object\n \"\"\"\n if annots:\n for annot in resolve1(annots):\n annot = resolve1(annot)\n if annot.get('Rect') is not None:\n annot['bbox'] = annot.pop('Rect') # Rename key\n annot = self._set_hwxy_attrs(annot)\n try:\n annot['URI'] = resolve1(annot['A'])['URI']\n except KeyError:\n pass\n for k, v in six.iteritems(annot):\n if not isinstance(v, six.string_types):\n annot[k] = obj_to_string(v)\n elem = parser.makeelement('Annot', annot)\n layout.add(elem)\n return layout\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery._cached_pages
|
python
|
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
|
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L613-L640
| null |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery._add_annots
|
python
|
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
|
Adds annotations to the layout object
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L642-L660
|
[
"def obj_to_string(obj, top=True):\n \"\"\"\n Turn an arbitrary object into a unicode string. If complex (dict/list/tuple), will be json-encoded.\n \"\"\"\n obj = prepare_for_json_encoding(obj)\n if type(obj) == six.text_type:\n return obj\n return json.dumps(obj)\n",
"def _set_hwxy_attrs(attr):\n \"\"\"Using the bbox attribute, set the h, w, x0, x1, y0, and y1\n attributes.\n \"\"\"\n bbox = attr['bbox']\n attr['x0'] = bbox[0]\n attr['x1'] = bbox[2]\n attr['y0'] = bbox[1]\n attr['y1'] = bbox[3]\n attr['height'] = attr['y1'] - attr['y0']\n attr['width'] = attr['x1'] - attr['x0']\n return attr\n"
] |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
jcushman/pdfquery
|
pdfquery/pdfquery.py
|
PDFQuery._set_hwxy_attrs
|
python
|
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
|
train
|
https://github.com/jcushman/pdfquery/blob/f1c05d15e0c1b7c523a0971bc89b5610d8560f79/pdfquery/pdfquery.py#L663-L674
| null |
class PDFQuery(object):
def __init__(
self,
file,
merge_tags=('LTChar', 'LTAnno'),
round_floats=True,
round_digits=3,
input_text_formatter=None,
normalize_spaces=True,
resort=True,
parse_tree_cacher=None,
laparams={'all_texts':True, 'detect_vertical':True},
):
# store input
self.merge_tags = merge_tags
self.round_floats = round_floats
self.round_digits = round_digits
self.resort = resort
# set up input text formatting function, if any
if input_text_formatter:
self.input_text_formatter = input_text_formatter
elif normalize_spaces:
r = re.compile(r'\s+')
self.input_text_formatter = lambda s: re.sub(r, ' ', s)
else:
self.input_text_formatter = None
# open doc
if not hasattr(file, 'read'):
try:
file = open(file, 'rb')
except TypeError:
raise TypeError("File must be file object or filepath string.")
parser = PDFParser(file)
if hasattr(QPDFDocument, 'set_parser'):
# pdfminer < 20131022
doc = QPDFDocument()
parser.set_document(doc)
doc.set_parser(parser)
else:
# pdfminer >= 20131022
doc = QPDFDocument(parser)
parser.set_document(doc)
if hasattr(doc, 'initialize'):
# as of pdfminer==20140328, "PDFDocument.initialize() method is
# removed and no longer needed."
doc.initialize()
self.doc = doc
self.parser = parser
self.tree = None
self.pq = None
self.file = file
if parse_tree_cacher:
self._parse_tree_cacher = parse_tree_cacher
self._parse_tree_cacher.set_hash_key(self.file)
else:
self._parse_tree_cacher = DummyCache()
# set up layout parsing
rsrcmgr = PDFResourceManager()
if type(laparams) == dict:
laparams = LAParams(**laparams)
self.device = PDFPageAggregator(rsrcmgr, laparams=laparams)
self.interpreter = PDFPageInterpreter(rsrcmgr, self.device)
# caches
self._pages = []
self._pages_iter = None
self._elements = []
def load(self, *page_numbers):
"""
Load etree and pyquery object for entire document, or given page
numbers (ints or lists). After this is called, objects are
available at pdf.tree and pdf.pq.
>>> pdf.load()
>>> pdf.tree
<lxml.etree._ElementTree object at ...>
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
>>> pdf.load(1)
>>> pdf.pq('LTPage')
[<LTPage>]
>>> pdf.load(0, 1)
>>> pdf.pq('LTPage')
[<LTPage>, <LTPage>]
"""
self.tree = self.get_tree(*_flatten(page_numbers))
self.pq = self.get_pyquery(self.tree)
def extract(self, searches, tree=None, as_dict=True):
"""
>>> foo = pdf.extract([['pages', 'LTPage']])
>>> foo
{'pages': [<LTPage>, <LTPage>]}
>>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0])
{'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,...
"""
if self.tree is None or self.pq is None:
self.load()
if tree is None:
pq = self.pq
else:
pq = PyQuery(tree, css_translator=PDFQueryTranslator())
results = []
formatter = None
parent = pq
for search in searches:
if len(search) < 3:
search = list(search) + [formatter]
key, search, tmp_formatter = search
if key == 'with_formatter':
if isinstance(search, six.string_types):
# is a pyquery method name, e.g. 'text'
formatter = lambda o, search=search: getattr(o, search)()
elif hasattr(search, '__call__') or not search:
# is a method, or None to end formatting
formatter = search
else:
raise TypeError("Formatter should be either a pyquery "
"method name or a callable function.")
elif key == 'with_parent':
parent = pq(search) if search else pq
else:
try:
result = parent("*").filter(search) if \
hasattr(search, '__call__') else parent(search)
except cssselect.SelectorSyntaxError as e:
raise cssselect.SelectorSyntaxError(
"Error applying selector '%s': %s" % (search, e))
if tmp_formatter:
result = tmp_formatter(result)
results += result if type(result) == tuple else [[key, result]]
if as_dict:
results = dict(results)
return results
# tree building stuff
def get_pyquery(self, tree=None, page_numbers=None):
"""
Wrap given tree in pyquery and return.
If no tree supplied, will generate one from given page_numbers, or
all page numbers.
"""
if not page_numbers:
page_numbers = []
if tree is None:
if not page_numbers and self.tree is not None:
tree = self.tree
else:
tree = self.get_tree(page_numbers)
if hasattr(tree, 'getroot'):
tree = tree.getroot()
return PyQuery(tree, css_translator=PDFQueryTranslator())
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree
def _clean_text(self, branch):
"""
Remove text from node if same text exists in its children.
Apply string formatter if set.
"""
if branch.text and self.input_text_formatter:
branch.text = self.input_text_formatter(branch.text)
try:
for child in branch:
self._clean_text(child)
if branch.text and branch.text.find(child.text) >= 0:
branch.text = branch.text.replace(child.text, '', 1)
except TypeError: # not an iterable node
pass
def _xmlize(self, node, root=None):
if isinstance(node, LayoutElement):
# Already an XML element we can use
branch = node
else:
# collect attributes of current node
tags = self._getattrs(
node, 'y0', 'y1', 'x0', 'x1', 'width', 'height', 'bbox',
'linewidth', 'pts', 'index', 'name', 'matrix', 'word_margin'
)
if type(node) == LTImage:
tags.update(self._getattrs(
node, 'colorspace', 'bits', 'imagemask', 'srcsize',
'stream', 'name', 'pts', 'linewidth')
)
elif type(node) == LTChar:
tags.update(self._getattrs(
node, 'fontname', 'adv', 'upright', 'size')
)
elif type(node) == LTPage:
tags.update(self._getattrs(node, 'pageid', 'rotate'))
# create node
branch = parser.makeelement(node.__class__.__name__, tags)
branch.layout = node
self._elements += [branch] # make sure layout keeps state
if root is None:
root = branch
# add text
if hasattr(node, 'get_text'):
branch.text = strip_invalid_xml_chars(node.get_text())
# add children if node is an iterable
if hasattr(node, '__iter__'):
last = None
for child in node:
child = self._xmlize(child, root)
if self.merge_tags and child.tag in self.merge_tags:
if branch.text and child.text in branch.text:
continue
elif last is not None and last.tag in self.merge_tags:
last.text += child.text
last.set(
'_obj_id',
last.get('_obj_id','') + "," + child.get('_obj_id','')
)
continue
# sort children by bounding boxes
if self.resort:
_append_sorted(root, child, _comp_bbox)
else:
branch.append(child)
last = child
return branch
def _getattrs(self, obj, *attrs):
""" Return dictionary of given attrs on given object, if they exist,
processing through _filter_value().
"""
filtered_attrs = {}
for attr in attrs:
if hasattr(obj, attr):
filtered_attrs[attr] = obj_to_string(
self._filter_value(getattr(obj, attr))
)
return filtered_attrs
def _filter_value(self, val):
if self.round_floats:
if type(val) == float:
val = round(val, self.round_digits)
elif hasattr(val, '__iter__') and not isinstance(val, six.string_types):
val = [self._filter_value(item) for item in val]
return val
# page access stuff
def get_page(self, page_number):
""" Get PDFPage object -- 0-indexed."""
return self._cached_pages(target_page=page_number)
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
def get_layouts(self):
""" Get list of PDFMiner Layout objects for each page. """
return (self.get_layout(page) for page in self._cached_pages())
def _cached_pages(self, target_page=-1):
"""
Get a page or all pages from page generator, caching results.
This is necessary because PDFMiner searches recursively for pages,
so we won't know how many there are until we parse the whole document,
which we don't want to do until we need to.
"""
try:
# pdfminer < 20131022
self._pages_iter = self._pages_iter or self.doc.get_pages()
except AttributeError:
# pdfminer >= 20131022
self._pages_iter = self._pages_iter or \
PDFPage.create_pages(self.doc)
if target_page >= 0:
while len(self._pages) <= target_page:
next_page = next(self._pages_iter)
if not next_page:
return None
next_page.page_number = 0
self._pages += [next_page]
try:
return self._pages[target_page]
except IndexError:
return None
self._pages += list(self._pages_iter)
return self._pages
def _add_annots(self, layout, annots):
"""Adds annotations to the layout object
"""
if annots:
for annot in resolve1(annots):
annot = resolve1(annot)
if annot.get('Rect') is not None:
annot['bbox'] = annot.pop('Rect') # Rename key
annot = self._set_hwxy_attrs(annot)
try:
annot['URI'] = resolve1(annot['A'])['URI']
except KeyError:
pass
for k, v in six.iteritems(annot):
if not isinstance(v, six.string_types):
annot[k] = obj_to_string(v)
elem = parser.makeelement('Annot', annot)
layout.add(elem)
return layout
@staticmethod
def _set_hwxy_attrs(attr):
"""Using the bbox attribute, set the h, w, x0, x1, y0, and y1
attributes.
"""
bbox = attr['bbox']
attr['x0'] = bbox[0]
attr['x1'] = bbox[2]
attr['y0'] = bbox[1]
attr['y1'] = bbox[3]
attr['height'] = attr['y1'] - attr['y0']
attr['width'] = attr['x1'] - attr['x0']
return attr
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.primary_key
|
python
|
def primary_key(self):
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L63-L69
| null |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.foreign_keys
|
python
|
def foreign_keys(self):
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L72-L85
| null |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.add_field
|
python
|
def add_field(self, descriptor):
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L107-L113
|
[
"def __build(self):\n\n # Process descriptor\n expand = helpers.expand_schema_descriptor\n self.__current_descriptor = expand(self.__current_descriptor)\n self.__next_descriptor = deepcopy(self.__current_descriptor)\n\n # Validate descriptor\n try:\n self.__profile.validate(self.__current_descriptor)\n self.__errors = []\n except exceptions.ValidationError as exception:\n self.__errors = exception.errors\n if self.__strict:\n raise exception\n\n # Populate fields\n self.__fields = []\n for field in self.__current_descriptor.get('fields', []):\n missing_values = self.__current_descriptor['missingValues']\n try:\n field = Field(field, missing_values=missing_values)\n except Exception:\n field = False\n self.__fields.append(field)\n"
] |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.update_field
|
python
|
def update_field(self, name, update):
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L115-L122
| null |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.remove_field
|
python
|
def remove_field(self, name):
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L124-L133
|
[
"def get_field(self, name):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n for field in self.fields:\n if field.name == name:\n return field\n return None\n",
"def __build(self):\n\n # Process descriptor\n expand = helpers.expand_schema_descriptor\n self.__current_descriptor = expand(self.__current_descriptor)\n self.__next_descriptor = deepcopy(self.__current_descriptor)\n\n # Validate descriptor\n try:\n self.__profile.validate(self.__current_descriptor)\n self.__errors = []\n except exceptions.ValidationError as exception:\n self.__errors = exception.errors\n if self.__strict:\n raise exception\n\n # Populate fields\n self.__fields = []\n for field in self.__current_descriptor.get('fields', []):\n missing_values = self.__current_descriptor['missingValues']\n try:\n field = Field(field, missing_values=missing_values)\n except Exception:\n field = False\n self.__fields.append(field)\n"
] |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.cast_row
|
python
|
def cast_row(self, row, fail_fast=False):
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L135-L163
| null |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.infer
|
python
|
def infer(self, rows, headers=1, confidence=0.75):
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L165-L213
|
[
"def __build(self):\n\n # Process descriptor\n expand = helpers.expand_schema_descriptor\n self.__current_descriptor = expand(self.__current_descriptor)\n self.__next_descriptor = deepcopy(self.__current_descriptor)\n\n # Validate descriptor\n try:\n self.__profile.validate(self.__current_descriptor)\n self.__errors = []\n except exceptions.ValidationError as exception:\n self.__errors = exception.errors\n if self.__strict:\n raise exception\n\n # Populate fields\n self.__fields = []\n for field in self.__current_descriptor.get('fields', []):\n missing_values = self.__current_descriptor['missingValues']\n try:\n field = Field(field, missing_values=missing_values)\n except Exception:\n field = False\n self.__fields.append(field)\n",
"def cast(self, value):\n for priority, name in enumerate(_INFER_TYPE_ORDER):\n cast = getattr(types, 'cast_%s' % name)\n result = cast('default', value)\n if result != config.ERROR:\n yield (name, 'default', priority)\n",
"def get(self, results, confidence):\n variants = set(results)\n # only one candidate... that's easy.\n if len(variants) == 1:\n rv = {'type': results[0][0], 'format': results[0][1]}\n else:\n counts = {}\n for result in results:\n if counts.get(result):\n counts[result] += 1\n else:\n counts[result] = 1\n # tuple representation of `counts` dict sorted by values\n sorted_counts = sorted(counts.items(),\n key=lambda item: item[1],\n reverse=True)\n # Allow also counts that are not the max, based on the confidence\n max_count = sorted_counts[0][1]\n sorted_counts = filter(lambda item: item[1] >= max_count * confidence,\n sorted_counts)\n # Choose the most specific data type\n sorted_counts = sorted(sorted_counts,\n key=lambda item: item[0][2])\n rv = {'type': sorted_counts[0][0][0], 'format': sorted_counts[0][0][1]}\n return rv\n"
] |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
def save(self, target, ensure_ascii=True):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/schema.py
|
Schema.save
|
python
|
def save(self, target, ensure_ascii=True):
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
helpers.ensure_dir(target)
with io.open(target, mode=mode, encoding=encoding) as file:
json.dump(self.__current_descriptor, file, indent=4, ensure_ascii=ensure_ascii)
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/schema.py#L226-L236
|
[
"def ensure_dir(path):\n \"\"\"Ensure directory exists.\n\n Args:\n path(str): dir path\n\n \"\"\"\n dirpath = os.path.dirname(path)\n if dirpath and not os.path.exists(dirpath):\n os.makedirs(dirpath)\n"
] |
class Schema(object):
# Public
def __init__(self, descriptor={}, strict=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Process descriptor
descriptor = helpers.retrieve_descriptor(descriptor)
# Set attributes
self.__strict = strict
self.__current_descriptor = deepcopy(descriptor)
self.__next_descriptor = deepcopy(descriptor)
self.__profile = Profile('table-schema')
self.__errors = []
self.__fields = []
# Build instance
self.__build()
@property
def valid(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return not bool(self.__errors)
@property
def errors(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__errors
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Never use this.descriptor inside this class (!!!)
return self.__next_descriptor
@property
def primary_key(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
primary_key = self.__current_descriptor.get('primaryKey', [])
if not isinstance(primary_key, list):
primary_key = [primary_key]
return primary_key
@property
def foreign_keys(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
foreign_keys = self.__current_descriptor.get('foreignKeys', [])
for key in foreign_keys:
key.setdefault('fields', [])
key.setdefault('reference', {})
key['reference'].setdefault('resource', '')
key['reference'].setdefault('fields', [])
if not isinstance(key['fields'], list):
key['fields'] = [key['fields']]
if not isinstance(key['reference']['fields'], list):
key['reference']['fields'] = [key['reference']['fields']]
return foreign_keys
@property
def fields(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__fields
@property
def field_names(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return [field.name for field in self.fields]
def get_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.fields:
if field.name == name:
return field
return None
def add_field(self, descriptor):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
self.__current_descriptor.setdefault('fields', [])
self.__current_descriptor['fields'].append(descriptor)
self.__build()
return self.__fields[-1]
def update_field(self, name, update):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
for field in self.__next_descriptor['fields']:
if field['name'] == name:
field.update(update)
return True
return False
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
def cast_row(self, row, fail_fast=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare
result = []
errors = []
# Check row length
if len(row) != len(self.fields):
message = 'Row length %s doesn\'t match fields count %s'
message = message % (len(row), len(self.fields))
raise exceptions.CastError(message)
# Cast row
for field, value in zip(self.fields, row):
try:
result.append(field.cast_value(value))
except exceptions.CastError as exception:
if fail_fast:
raise
errors.append(exception)
# Raise errors
if errors:
message = 'There are %s cast errors (see exception.errors)' % len(errors)
raise exceptions.CastError(message, errors=errors)
return result
def infer(self, rows, headers=1, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Get headers
if isinstance(headers, int):
headers_row = headers
while True:
headers_row -= 1
headers = rows.pop(0)
if not headers_row:
break
elif not isinstance(headers, list):
headers = []
# Get descriptor
guesser = _TypeGuesser()
resolver = _TypeResolver()
descriptor = {'fields': []}
type_matches = {}
for header in headers:
descriptor['fields'].append({'name': header})
for index, row in enumerate(rows):
# Normalize rows with invalid dimensions for sanity
row_length = len(row)
headers_length = len(headers)
if row_length > headers_length:
row = row[:len(headers)]
if row_length < headers_length:
diff = headers_length - row_length
fill = [''] * diff
row = row + fill
# build a column-wise lookup of type matches
for index, value in enumerate(row):
rv = guesser.cast(value)
if type_matches.get(index):
type_matches[index].extend(rv)
else:
type_matches[index] = list(rv)
# choose a type/format for each column based on the matches
for index, results in type_matches.items():
rv = resolver.get(results, confidence)
descriptor['fields'][index].update(**rv)
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor
def commit(self, strict=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if strict is not None:
self.__strict = strict
elif self.__current_descriptor == self.__next_descriptor:
return False
self.__current_descriptor = deepcopy(self.__next_descriptor)
self.__build()
return True
# Internal
def __build(self):
# Process descriptor
expand = helpers.expand_schema_descriptor
self.__current_descriptor = expand(self.__current_descriptor)
self.__next_descriptor = deepcopy(self.__current_descriptor)
# Validate descriptor
try:
self.__profile.validate(self.__current_descriptor)
self.__errors = []
except exceptions.ValidationError as exception:
self.__errors = exception.errors
if self.__strict:
raise exception
# Populate fields
self.__fields = []
for field in self.__current_descriptor.get('fields', []):
missing_values = self.__current_descriptor['missingValues']
try:
field = Field(field, missing_values=missing_values)
except Exception:
field = False
self.__fields.append(field)
# Deprecated
headers = field_names
has_field = get_field
|
frictionlessdata/tableschema-py
|
tableschema/helpers.py
|
ensure_dir
|
python
|
def ensure_dir(path):
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
|
Ensure directory exists.
Args:
path(str): dir path
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/helpers.py#L67-L76
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import sys
import six
import json
import requests
from copy import deepcopy
from importlib import import_module
from . import exceptions
from . import config
# Retrieve descriptor
def retrieve_descriptor(source):
try:
# Inline
if isinstance(source, (dict, list)):
return deepcopy(source)
# Remote
if six.moves.urllib.parse.urlparse(source).scheme in config.REMOTE_SCHEMES:
return requests.get(source).json()
# Local
if isinstance(source, six.string_types):
with io.open(source, encoding='utf-8') as file:
return json.load(file)
# Stream
else:
return json.load(source)
except Exception:
raise exceptions.LoadError('Can\'t load descriptor')
# Expand descriptor
def expand_schema_descriptor(descriptor):
if isinstance(descriptor, dict):
descriptor = deepcopy(descriptor)
for field in descriptor.get('fields', []):
field.setdefault('type', config.DEFAULT_FIELD_TYPE)
field.setdefault('format', config.DEFAULT_FIELD_FORMAT)
descriptor.setdefault('missingValues', config.DEFAULT_MISSING_VALUES)
return descriptor
def expand_field_descriptor(descriptor):
descriptor = deepcopy(descriptor)
descriptor.setdefault('type', config.DEFAULT_FIELD_TYPE)
descriptor.setdefault('format', config.DEFAULT_FIELD_FORMAT)
return descriptor
# Miscellaneous
def normalize_value(value):
"""Convert value to string and make it lower cased.
"""
cast = str
if six.PY2:
cast = unicode # noqa
return cast(value).lower()
class PluginImporter(object):
"""Plugin importer.
Example:
Add to myapp.plugins something like this:
```
importer = PluginImporter(virtual='myapp.plugins.', actual='myapp_')
importer.register()
del PluginImporter
del importer
```
"""
# Public
def __init__(self, virtual, actual):
self.__virtual = virtual
self.__actual = actual
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.virtual == other.virtual and
self.actual == other.actual)
@property
def virtual(self):
return self.__virtual
@property
def actual(self):
return self.__actual
def register(self):
if self not in sys.meta_path:
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if fullname.startswith(self.virtual):
return self
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if not fullname.startswith(self.virtual):
raise ImportError(fullname)
realname = fullname.replace(self.virtual, self.actual)
try:
module = import_module(realname)
except ImportError:
message = 'Plugin "%s" is not installed. '
message += 'Run "pip install %s" to install.'
message = message % (fullname, realname.replace('_', '-'))
raise ImportError(message)
sys.modules[realname] = module
sys.modules[fullname] = module
return module
|
frictionlessdata/tableschema-py
|
tableschema/helpers.py
|
normalize_value
|
python
|
def normalize_value(value):
cast = str
if six.PY2:
cast = unicode # noqa
return cast(value).lower()
|
Convert value to string and make it lower cased.
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/helpers.py#L79-L85
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import sys
import six
import json
import requests
from copy import deepcopy
from importlib import import_module
from . import exceptions
from . import config
# Retrieve descriptor
def retrieve_descriptor(source):
try:
# Inline
if isinstance(source, (dict, list)):
return deepcopy(source)
# Remote
if six.moves.urllib.parse.urlparse(source).scheme in config.REMOTE_SCHEMES:
return requests.get(source).json()
# Local
if isinstance(source, six.string_types):
with io.open(source, encoding='utf-8') as file:
return json.load(file)
# Stream
else:
return json.load(source)
except Exception:
raise exceptions.LoadError('Can\'t load descriptor')
# Expand descriptor
def expand_schema_descriptor(descriptor):
if isinstance(descriptor, dict):
descriptor = deepcopy(descriptor)
for field in descriptor.get('fields', []):
field.setdefault('type', config.DEFAULT_FIELD_TYPE)
field.setdefault('format', config.DEFAULT_FIELD_FORMAT)
descriptor.setdefault('missingValues', config.DEFAULT_MISSING_VALUES)
return descriptor
def expand_field_descriptor(descriptor):
descriptor = deepcopy(descriptor)
descriptor.setdefault('type', config.DEFAULT_FIELD_TYPE)
descriptor.setdefault('format', config.DEFAULT_FIELD_FORMAT)
return descriptor
# Miscellaneous
def ensure_dir(path):
"""Ensure directory exists.
Args:
path(str): dir path
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
class PluginImporter(object):
"""Plugin importer.
Example:
Add to myapp.plugins something like this:
```
importer = PluginImporter(virtual='myapp.plugins.', actual='myapp_')
importer.register()
del PluginImporter
del importer
```
"""
# Public
def __init__(self, virtual, actual):
self.__virtual = virtual
self.__actual = actual
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.virtual == other.virtual and
self.actual == other.actual)
@property
def virtual(self):
return self.__virtual
@property
def actual(self):
return self.__actual
def register(self):
if self not in sys.meta_path:
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if fullname.startswith(self.virtual):
return self
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
if not fullname.startswith(self.virtual):
raise ImportError(fullname)
realname = fullname.replace(self.virtual, self.actual)
try:
module = import_module(realname)
except ImportError:
message = 'Plugin "%s" is not installed. '
message += 'Run "pip install %s" to install.'
message = message % (fullname, realname.replace('_', '-'))
raise ImportError(message)
sys.modules[realname] = module
sys.modules[fullname] = module
return module
|
frictionlessdata/tableschema-py
|
tableschema/field.py
|
Field.cast_value
|
python
|
def cast_value(self, value, constraints=True):
# Null value
if value in self.__missing_values:
value = None
# Cast value
cast_value = value
if value is not None:
cast_value = self.__cast_function(value)
if cast_value == config.ERROR:
raise exceptions.CastError((
'Field "{field.name}" can\'t cast value "{value}" '
'for type "{field.type}" with format "{field.format}"'
).format(field=self, value=value))
# Check value
if constraints:
for name, check in self.__check_functions.items():
if isinstance(constraints, list):
if name not in constraints:
continue
passed = check(cast_value)
if not passed:
raise exceptions.CastError((
'Field "{field.name}" has constraint "{name}" '
'which is not satisfied for value "{value}"'
).format(field=self, name=name, value=value))
return cast_value
|
https://github.com/frictionlessdata/tableschema-py#field
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/field.py#L71-L102
| null |
class Field(object):
"""Table Schema field representation.
"""
# Public
def __init__(self, descriptor, missing_values=config.DEFAULT_MISSING_VALUES):
# Process descriptor
descriptor = helpers.expand_field_descriptor(descriptor)
# Set attributes
self.__descriptor = descriptor
self.__missing_values = missing_values
self.__cast_function = self.__get_cast_function()
self.__check_functions = self.__get_check_functions()
@property
def name(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.__descriptor.get('name')
@property
def type(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.__descriptor.get('type')
@property
def format(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.__descriptor.get('format')
@property
def required(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.constraints.get('required', False)
@property
def constraints(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.__descriptor.get('constraints', {})
@property
def descriptor(self):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
return self.__descriptor
def test_value(self, value, constraints=True):
"""https://github.com/frictionlessdata/tableschema-py#field
"""
try:
self.cast_value(value, constraints=constraints)
except exceptions.CastError:
return False
return True
# Private
def __get_cast_function(self):
options = {}
# Get cast options
for key in ['decimalChar', 'groupChar', 'bareNumber', 'trueValues', 'falseValues']:
value = self.descriptor.get(key)
if value is not None:
options[key] = value
cast = getattr(types, 'cast_%s' % self.type)
cast = partial(cast, self.format, **options)
return cast
def __get_check_functions(self):
checks = {}
cast = partial(self.cast_value, constraints=False)
whitelist = _get_field_constraints(self.type)
for name, constraint in self.constraints.items():
if name in whitelist:
# Cast enum constraint
if name in ['enum']:
constraint = list(map(cast, constraint))
# Cast maximum/minimum constraint
if name in ['maximum', 'minimum']:
constraint = cast(constraint)
check = getattr(constraints, 'check_%s' % name)
checks[name] = partial(check, constraint)
return checks
|
frictionlessdata/tableschema-py
|
tableschema/table.py
|
Table.iter
|
python
|
def iter(self, keyed=False, extended=False, cast=True, relations=False):
# Prepare unique checks
if cast:
unique_fields_cache = {}
if self.schema:
unique_fields_cache = _create_unique_fields_cache(self.schema)
# Open/iterate stream
self.__stream.open()
iterator = self.__stream.iter(extended=True)
iterator = self.__apply_processors(iterator, cast=cast)
for row_number, headers, row in iterator:
# Get headers
if not self.__headers:
self.__headers = headers
# Check headers
if cast:
if self.schema and self.headers:
if self.headers != self.schema.field_names:
self.__stream.close()
message = 'Table headers don\'t match schema field names'
raise exceptions.CastError(message)
# Check unique
if cast:
for indexes, cache in unique_fields_cache.items():
values = tuple(value for i, value in enumerate(row) if i in indexes)
if not all(map(lambda value: value is None, values)):
if values in cache['data']:
self.__stream.close()
message = 'Field(s) "%s" duplicates in row "%s"'
message = message % (cache['name'], row_number)
raise exceptions.CastError(message)
cache['data'].add(values)
# Resolve relations
if relations:
if self.schema:
for foreign_key in self.schema.foreign_keys:
row = _resolve_relations(row, headers, relations, foreign_key)
if row is None:
self.__stream.close()
message = 'Foreign key "%s" violation in row "%s"'
message = message % (foreign_key['fields'], row_number)
raise exceptions.RelationError(message)
# Form row
if extended:
yield (row_number, headers, row)
elif keyed:
yield dict(zip(headers, row))
else:
yield row
# Close stream
self.__stream.close()
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/table.py#L68-L128
|
[
"def _create_unique_fields_cache(schema):\n primary_key_indexes = []\n cache = {}\n\n # Unique\n for index, field in enumerate(schema.fields):\n if field.name in schema.primary_key:\n primary_key_indexes.append(index)\n if field.constraints.get('unique'):\n cache[tuple([index])] = {\n 'name': field.name,\n 'data': set(),\n }\n\n # Primary key\n if primary_key_indexes:\n cache[tuple(primary_key_indexes)] = {\n 'name': ', '.join(schema.primary_key),\n 'data': set(),\n }\n\n return cache\n",
"def _resolve_relations(row, headers, relations, foreign_key):\n\n # Prepare helpers - needed data structures\n keyed_row = OrderedDict(zip(headers, row))\n fields = list(zip(foreign_key['fields'], foreign_key['reference']['fields']))\n reference = relations.get(foreign_key['reference']['resource'])\n if not reference:\n return row\n\n # Collect values - valid if all None\n values = {}\n valid = True\n for field, ref_field in fields:\n if field and ref_field:\n values[ref_field] = keyed_row[field]\n if keyed_row[field] is not None:\n valid = False\n\n # Resolve values - valid if match found\n if not valid:\n for refValues in reference:\n if set(values.items()).issubset(set(refValues.items())):\n for field, ref_field in fields:\n keyed_row[field] = refValues\n valid = True\n break\n\n return list(keyed_row.values()) if valid else None\n",
"def __apply_processors(self, iterator, cast=True):\n\n # Apply processors to iterator\n def builtin_processor(extended_rows):\n for row_number, headers, row in extended_rows:\n if self.__schema and cast:\n row = self.__schema.cast_row(row)\n yield (row_number, headers, row)\n processors = [builtin_processor] + self.__post_cast\n for processor in processors:\n iterator = processor(iterator)\n\n return iterator\n"
] |
class Table(object):
# Public
def __init__(self, source, schema=None, strict=False,
post_cast=[], storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Set attributes
self.__source = source
self.__stream = None
self.__schema = None
self.__headers = None
self.__storage = None
self.__post_cast = copy(post_cast)
# Schema
if isinstance(schema, Schema):
self.__schema = schema
elif schema is not None:
self.__schema = Schema(schema)
# Stream (tabulator)
if storage is None:
options.setdefault('headers', 1)
self.__stream = Stream(source, **options)
# Stream (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
if self.__schema:
storage.describe(source, self.__schema.descriptor)
headers = Schema(storage.describe(source)).field_names
self.__stream = Stream(partial(storage.iter, source), headers=headers)
self.__storage = storage
@property
def headers(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__headers
@property
def schema(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__schema
def read(self, keyed=False, extended=False, cast=True, relations=False, limit=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
result = []
rows = self.iter(keyed=keyed, extended=extended, cast=cast, relations=relations)
for count, row in enumerate(rows, start=1):
result.append(row)
if count == limit:
break
return result
def infer(self, limit=100, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if self.__schema is None or self.__headers is None:
# Infer (tabulator)
if not self.__storage:
with self.__stream as stream:
if self.__schema is None:
self.__schema = Schema()
self.__schema.infer(stream.sample[:limit],
headers=stream.headers,
confidence=confidence)
if self.__headers is None:
self.__headers = stream.headers
# Infer (storage)
else:
descriptor = self.__storage.describe(self.__source)
if self.__schema is None:
self.__schema = Schema(descriptor)
if self.__headers is None:
self.__headers = self.__schema.field_names
return self.__schema.descriptor
def save(self, target, storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Save (tabulator)
if storage is None:
with Stream(self.iter, headers=self.__schema.headers) as stream:
stream.save(target, **options)
return True
# Save (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
storage.create(target, self.__schema.descriptor, force=True)
storage.write(target, self.iter(cast=False))
return storage
# Private
def __apply_processors(self, iterator, cast=True):
# Apply processors to iterator
def builtin_processor(extended_rows):
for row_number, headers, row in extended_rows:
if self.__schema and cast:
row = self.__schema.cast_row(row)
yield (row_number, headers, row)
processors = [builtin_processor] + self.__post_cast
for processor in processors:
iterator = processor(iterator)
return iterator
|
frictionlessdata/tableschema-py
|
tableschema/table.py
|
Table.read
|
python
|
def read(self, keyed=False, extended=False, cast=True, relations=False, limit=None):
result = []
rows = self.iter(keyed=keyed, extended=extended, cast=cast, relations=relations)
for count, row in enumerate(rows, start=1):
result.append(row)
if count == limit:
break
return result
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/table.py#L130-L139
|
[
"def iter(self, keyed=False, extended=False, cast=True, relations=False):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n\n # Prepare unique checks\n if cast:\n unique_fields_cache = {}\n if self.schema:\n unique_fields_cache = _create_unique_fields_cache(self.schema)\n\n # Open/iterate stream\n self.__stream.open()\n iterator = self.__stream.iter(extended=True)\n iterator = self.__apply_processors(iterator, cast=cast)\n for row_number, headers, row in iterator:\n\n # Get headers\n if not self.__headers:\n self.__headers = headers\n\n # Check headers\n if cast:\n if self.schema and self.headers:\n if self.headers != self.schema.field_names:\n self.__stream.close()\n message = 'Table headers don\\'t match schema field names'\n raise exceptions.CastError(message)\n\n # Check unique\n if cast:\n for indexes, cache in unique_fields_cache.items():\n values = tuple(value for i, value in enumerate(row) if i in indexes)\n if not all(map(lambda value: value is None, values)):\n if values in cache['data']:\n self.__stream.close()\n message = 'Field(s) \"%s\" duplicates in row \"%s\"'\n message = message % (cache['name'], row_number)\n raise exceptions.CastError(message)\n cache['data'].add(values)\n\n # Resolve relations\n if relations:\n if self.schema:\n for foreign_key in self.schema.foreign_keys:\n row = _resolve_relations(row, headers, relations, foreign_key)\n if row is None:\n self.__stream.close()\n message = 'Foreign key \"%s\" violation in row \"%s\"'\n message = message % (foreign_key['fields'], row_number)\n raise exceptions.RelationError(message)\n\n # Form row\n if extended:\n yield (row_number, headers, row)\n elif keyed:\n yield dict(zip(headers, row))\n else:\n yield row\n\n # Close stream\n self.__stream.close()\n"
] |
class Table(object):
# Public
def __init__(self, source, schema=None, strict=False,
post_cast=[], storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Set attributes
self.__source = source
self.__stream = None
self.__schema = None
self.__headers = None
self.__storage = None
self.__post_cast = copy(post_cast)
# Schema
if isinstance(schema, Schema):
self.__schema = schema
elif schema is not None:
self.__schema = Schema(schema)
# Stream (tabulator)
if storage is None:
options.setdefault('headers', 1)
self.__stream = Stream(source, **options)
# Stream (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
if self.__schema:
storage.describe(source, self.__schema.descriptor)
headers = Schema(storage.describe(source)).field_names
self.__stream = Stream(partial(storage.iter, source), headers=headers)
self.__storage = storage
@property
def headers(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__headers
@property
def schema(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__schema
def iter(self, keyed=False, extended=False, cast=True, relations=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare unique checks
if cast:
unique_fields_cache = {}
if self.schema:
unique_fields_cache = _create_unique_fields_cache(self.schema)
# Open/iterate stream
self.__stream.open()
iterator = self.__stream.iter(extended=True)
iterator = self.__apply_processors(iterator, cast=cast)
for row_number, headers, row in iterator:
# Get headers
if not self.__headers:
self.__headers = headers
# Check headers
if cast:
if self.schema and self.headers:
if self.headers != self.schema.field_names:
self.__stream.close()
message = 'Table headers don\'t match schema field names'
raise exceptions.CastError(message)
# Check unique
if cast:
for indexes, cache in unique_fields_cache.items():
values = tuple(value for i, value in enumerate(row) if i in indexes)
if not all(map(lambda value: value is None, values)):
if values in cache['data']:
self.__stream.close()
message = 'Field(s) "%s" duplicates in row "%s"'
message = message % (cache['name'], row_number)
raise exceptions.CastError(message)
cache['data'].add(values)
# Resolve relations
if relations:
if self.schema:
for foreign_key in self.schema.foreign_keys:
row = _resolve_relations(row, headers, relations, foreign_key)
if row is None:
self.__stream.close()
message = 'Foreign key "%s" violation in row "%s"'
message = message % (foreign_key['fields'], row_number)
raise exceptions.RelationError(message)
# Form row
if extended:
yield (row_number, headers, row)
elif keyed:
yield dict(zip(headers, row))
else:
yield row
# Close stream
self.__stream.close()
def infer(self, limit=100, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if self.__schema is None or self.__headers is None:
# Infer (tabulator)
if not self.__storage:
with self.__stream as stream:
if self.__schema is None:
self.__schema = Schema()
self.__schema.infer(stream.sample[:limit],
headers=stream.headers,
confidence=confidence)
if self.__headers is None:
self.__headers = stream.headers
# Infer (storage)
else:
descriptor = self.__storage.describe(self.__source)
if self.__schema is None:
self.__schema = Schema(descriptor)
if self.__headers is None:
self.__headers = self.__schema.field_names
return self.__schema.descriptor
def save(self, target, storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Save (tabulator)
if storage is None:
with Stream(self.iter, headers=self.__schema.headers) as stream:
stream.save(target, **options)
return True
# Save (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
storage.create(target, self.__schema.descriptor, force=True)
storage.write(target, self.iter(cast=False))
return storage
# Private
def __apply_processors(self, iterator, cast=True):
# Apply processors to iterator
def builtin_processor(extended_rows):
for row_number, headers, row in extended_rows:
if self.__schema and cast:
row = self.__schema.cast_row(row)
yield (row_number, headers, row)
processors = [builtin_processor] + self.__post_cast
for processor in processors:
iterator = processor(iterator)
return iterator
|
frictionlessdata/tableschema-py
|
tableschema/table.py
|
Table.infer
|
python
|
def infer(self, limit=100, confidence=0.75):
if self.__schema is None or self.__headers is None:
# Infer (tabulator)
if not self.__storage:
with self.__stream as stream:
if self.__schema is None:
self.__schema = Schema()
self.__schema.infer(stream.sample[:limit],
headers=stream.headers,
confidence=confidence)
if self.__headers is None:
self.__headers = stream.headers
# Infer (storage)
else:
descriptor = self.__storage.describe(self.__source)
if self.__schema is None:
self.__schema = Schema(descriptor)
if self.__headers is None:
self.__headers = self.__schema.field_names
return self.__schema.descriptor
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/table.py#L141-L165
|
[
"def infer(self, rows, headers=1, confidence=0.75):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n\n # Get headers\n if isinstance(headers, int):\n headers_row = headers\n while True:\n headers_row -= 1\n headers = rows.pop(0)\n if not headers_row:\n break\n elif not isinstance(headers, list):\n headers = []\n\n # Get descriptor\n guesser = _TypeGuesser()\n resolver = _TypeResolver()\n descriptor = {'fields': []}\n type_matches = {}\n for header in headers:\n descriptor['fields'].append({'name': header})\n for index, row in enumerate(rows):\n # Normalize rows with invalid dimensions for sanity\n row_length = len(row)\n headers_length = len(headers)\n if row_length > headers_length:\n row = row[:len(headers)]\n if row_length < headers_length:\n diff = headers_length - row_length\n fill = [''] * diff\n row = row + fill\n # build a column-wise lookup of type matches\n for index, value in enumerate(row):\n rv = guesser.cast(value)\n if type_matches.get(index):\n type_matches[index].extend(rv)\n else:\n type_matches[index] = list(rv)\n # choose a type/format for each column based on the matches\n for index, results in type_matches.items():\n rv = resolver.get(results, confidence)\n descriptor['fields'][index].update(**rv)\n\n # Save descriptor\n self.__current_descriptor = descriptor\n self.__build()\n\n return descriptor\n"
] |
class Table(object):
# Public
def __init__(self, source, schema=None, strict=False,
post_cast=[], storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Set attributes
self.__source = source
self.__stream = None
self.__schema = None
self.__headers = None
self.__storage = None
self.__post_cast = copy(post_cast)
# Schema
if isinstance(schema, Schema):
self.__schema = schema
elif schema is not None:
self.__schema = Schema(schema)
# Stream (tabulator)
if storage is None:
options.setdefault('headers', 1)
self.__stream = Stream(source, **options)
# Stream (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
if self.__schema:
storage.describe(source, self.__schema.descriptor)
headers = Schema(storage.describe(source)).field_names
self.__stream = Stream(partial(storage.iter, source), headers=headers)
self.__storage = storage
@property
def headers(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__headers
@property
def schema(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__schema
def iter(self, keyed=False, extended=False, cast=True, relations=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare unique checks
if cast:
unique_fields_cache = {}
if self.schema:
unique_fields_cache = _create_unique_fields_cache(self.schema)
# Open/iterate stream
self.__stream.open()
iterator = self.__stream.iter(extended=True)
iterator = self.__apply_processors(iterator, cast=cast)
for row_number, headers, row in iterator:
# Get headers
if not self.__headers:
self.__headers = headers
# Check headers
if cast:
if self.schema and self.headers:
if self.headers != self.schema.field_names:
self.__stream.close()
message = 'Table headers don\'t match schema field names'
raise exceptions.CastError(message)
# Check unique
if cast:
for indexes, cache in unique_fields_cache.items():
values = tuple(value for i, value in enumerate(row) if i in indexes)
if not all(map(lambda value: value is None, values)):
if values in cache['data']:
self.__stream.close()
message = 'Field(s) "%s" duplicates in row "%s"'
message = message % (cache['name'], row_number)
raise exceptions.CastError(message)
cache['data'].add(values)
# Resolve relations
if relations:
if self.schema:
for foreign_key in self.schema.foreign_keys:
row = _resolve_relations(row, headers, relations, foreign_key)
if row is None:
self.__stream.close()
message = 'Foreign key "%s" violation in row "%s"'
message = message % (foreign_key['fields'], row_number)
raise exceptions.RelationError(message)
# Form row
if extended:
yield (row_number, headers, row)
elif keyed:
yield dict(zip(headers, row))
else:
yield row
# Close stream
self.__stream.close()
def read(self, keyed=False, extended=False, cast=True, relations=False, limit=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
result = []
rows = self.iter(keyed=keyed, extended=extended, cast=cast, relations=relations)
for count, row in enumerate(rows, start=1):
result.append(row)
if count == limit:
break
return result
def save(self, target, storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Save (tabulator)
if storage is None:
with Stream(self.iter, headers=self.__schema.headers) as stream:
stream.save(target, **options)
return True
# Save (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
storage.create(target, self.__schema.descriptor, force=True)
storage.write(target, self.iter(cast=False))
return storage
# Private
def __apply_processors(self, iterator, cast=True):
# Apply processors to iterator
def builtin_processor(extended_rows):
for row_number, headers, row in extended_rows:
if self.__schema and cast:
row = self.__schema.cast_row(row)
yield (row_number, headers, row)
processors = [builtin_processor] + self.__post_cast
for processor in processors:
iterator = processor(iterator)
return iterator
|
frictionlessdata/tableschema-py
|
tableschema/table.py
|
Table.save
|
python
|
def save(self, target, storage=None, **options):
# Save (tabulator)
if storage is None:
with Stream(self.iter, headers=self.__schema.headers) as stream:
stream.save(target, **options)
return True
# Save (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
storage.create(target, self.__schema.descriptor, force=True)
storage.write(target, self.iter(cast=False))
return storage
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/table.py#L167-L183
|
[
"def iter(self, keyed=False, extended=False, cast=True, relations=False):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n\n # Prepare unique checks\n if cast:\n unique_fields_cache = {}\n if self.schema:\n unique_fields_cache = _create_unique_fields_cache(self.schema)\n\n # Open/iterate stream\n self.__stream.open()\n iterator = self.__stream.iter(extended=True)\n iterator = self.__apply_processors(iterator, cast=cast)\n for row_number, headers, row in iterator:\n\n # Get headers\n if not self.__headers:\n self.__headers = headers\n\n # Check headers\n if cast:\n if self.schema and self.headers:\n if self.headers != self.schema.field_names:\n self.__stream.close()\n message = 'Table headers don\\'t match schema field names'\n raise exceptions.CastError(message)\n\n # Check unique\n if cast:\n for indexes, cache in unique_fields_cache.items():\n values = tuple(value for i, value in enumerate(row) if i in indexes)\n if not all(map(lambda value: value is None, values)):\n if values in cache['data']:\n self.__stream.close()\n message = 'Field(s) \"%s\" duplicates in row \"%s\"'\n message = message % (cache['name'], row_number)\n raise exceptions.CastError(message)\n cache['data'].add(values)\n\n # Resolve relations\n if relations:\n if self.schema:\n for foreign_key in self.schema.foreign_keys:\n row = _resolve_relations(row, headers, relations, foreign_key)\n if row is None:\n self.__stream.close()\n message = 'Foreign key \"%s\" violation in row \"%s\"'\n message = message % (foreign_key['fields'], row_number)\n raise exceptions.RelationError(message)\n\n # Form row\n if extended:\n yield (row_number, headers, row)\n elif keyed:\n yield dict(zip(headers, row))\n else:\n yield row\n\n # Close stream\n self.__stream.close()\n"
] |
class Table(object):
# Public
def __init__(self, source, schema=None, strict=False,
post_cast=[], storage=None, **options):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Set attributes
self.__source = source
self.__stream = None
self.__schema = None
self.__headers = None
self.__storage = None
self.__post_cast = copy(post_cast)
# Schema
if isinstance(schema, Schema):
self.__schema = schema
elif schema is not None:
self.__schema = Schema(schema)
# Stream (tabulator)
if storage is None:
options.setdefault('headers', 1)
self.__stream = Stream(source, **options)
# Stream (storage)
else:
if not isinstance(storage, Storage):
storage = Storage.connect(storage, **options)
if self.__schema:
storage.describe(source, self.__schema.descriptor)
headers = Schema(storage.describe(source)).field_names
self.__stream = Stream(partial(storage.iter, source), headers=headers)
self.__storage = storage
@property
def headers(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__headers
@property
def schema(self):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
return self.__schema
def iter(self, keyed=False, extended=False, cast=True, relations=False):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
# Prepare unique checks
if cast:
unique_fields_cache = {}
if self.schema:
unique_fields_cache = _create_unique_fields_cache(self.schema)
# Open/iterate stream
self.__stream.open()
iterator = self.__stream.iter(extended=True)
iterator = self.__apply_processors(iterator, cast=cast)
for row_number, headers, row in iterator:
# Get headers
if not self.__headers:
self.__headers = headers
# Check headers
if cast:
if self.schema and self.headers:
if self.headers != self.schema.field_names:
self.__stream.close()
message = 'Table headers don\'t match schema field names'
raise exceptions.CastError(message)
# Check unique
if cast:
for indexes, cache in unique_fields_cache.items():
values = tuple(value for i, value in enumerate(row) if i in indexes)
if not all(map(lambda value: value is None, values)):
if values in cache['data']:
self.__stream.close()
message = 'Field(s) "%s" duplicates in row "%s"'
message = message % (cache['name'], row_number)
raise exceptions.CastError(message)
cache['data'].add(values)
# Resolve relations
if relations:
if self.schema:
for foreign_key in self.schema.foreign_keys:
row = _resolve_relations(row, headers, relations, foreign_key)
if row is None:
self.__stream.close()
message = 'Foreign key "%s" violation in row "%s"'
message = message % (foreign_key['fields'], row_number)
raise exceptions.RelationError(message)
# Form row
if extended:
yield (row_number, headers, row)
elif keyed:
yield dict(zip(headers, row))
else:
yield row
# Close stream
self.__stream.close()
def read(self, keyed=False, extended=False, cast=True, relations=False, limit=None):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
result = []
rows = self.iter(keyed=keyed, extended=extended, cast=cast, relations=relations)
for count, row in enumerate(rows, start=1):
result.append(row)
if count == limit:
break
return result
def infer(self, limit=100, confidence=0.75):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
if self.__schema is None or self.__headers is None:
# Infer (tabulator)
if not self.__storage:
with self.__stream as stream:
if self.__schema is None:
self.__schema = Schema()
self.__schema.infer(stream.sample[:limit],
headers=stream.headers,
confidence=confidence)
if self.__headers is None:
self.__headers = stream.headers
# Infer (storage)
else:
descriptor = self.__storage.describe(self.__source)
if self.__schema is None:
self.__schema = Schema(descriptor)
if self.__headers is None:
self.__headers = self.__schema.field_names
return self.__schema.descriptor
# Private
def __apply_processors(self, iterator, cast=True):
# Apply processors to iterator
def builtin_processor(extended_rows):
for row_number, headers, row in extended_rows:
if self.__schema and cast:
row = self.__schema.cast_row(row)
yield (row_number, headers, row)
processors = [builtin_processor] + self.__post_cast
for processor in processors:
iterator = processor(iterator)
return iterator
|
frictionlessdata/tableschema-py
|
tableschema/cli.py
|
infer
|
python
|
def infer(data, row_limit, confidence, encoding, to_file):
descriptor = tableschema.infer(data,
encoding=encoding,
limit=row_limit,
confidence=confidence)
if to_file:
with io.open(to_file, mode='w+t', encoding='utf-8') as dest:
dest.write(json.dumps(descriptor, ensure_ascii=False, indent=4))
click.echo(descriptor)
|
Infer a schema from data.
* data must be a local filepath
* data must be CSV
* the file encoding is assumed to be UTF-8 unless an encoding is passed
with --encoding
* the first line of data must be headers
* these constraints are just for the CLI
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/cli.py#L36-L53
|
[
"def infer(source, headers=1, limit=100, confidence=0.75, **options):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n\n # Deprecated arguments order\n is_string = lambda value: isinstance(value, six.string_types)\n if isinstance(source, list) and all(map(is_string, source)):\n warnings.warn('Correct arguments order infer(source, headers)', UserWarning)\n source, headers = headers, source\n\n table = Table(source, headers=headers, **options)\n descriptor = table.infer(limit=limit, confidence=confidence)\n return descriptor\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import sys
import json
import click
import tableschema
DIR = os.path.abspath(os.path.dirname(__file__))
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
VERSION = io.open(VERSION_FILE, encoding='utf-8').read().strip()
@click.group()
def main():
"""The entry point into the CLI."""
@main.command()
def info():
"""Return info on this version of Table Schema"""
click.echo(json.dumps({'version': VERSION}, ensure_ascii=False, indent=4))
@main.command()
@click.argument('data')
@click.option('--row_limit', default=100, type=int)
@click.option('--confidence', default=0.75, type=float)
@click.option('--encoding', default='utf-8')
@click.option('--to_file')
@main.command()
@click.argument('schema')
def validate(schema):
"""Validate that a supposed schema is in fact a Table Schema."""
try:
tableschema.validate(schema)
click.echo("Schema is valid")
sys.exit(0)
except tableschema.exceptions.ValidationError as exception:
click.echo("Schema is not valid")
click.echo(exception.errors)
sys.exit(1)
if __name__ == '__main__':
main()
|
frictionlessdata/tableschema-py
|
tableschema/cli.py
|
validate
|
python
|
def validate(schema):
try:
tableschema.validate(schema)
click.echo("Schema is valid")
sys.exit(0)
except tableschema.exceptions.ValidationError as exception:
click.echo("Schema is not valid")
click.echo(exception.errors)
sys.exit(1)
|
Validate that a supposed schema is in fact a Table Schema.
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/cli.py#L58-L67
|
[
"def validate(descriptor):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n Schema(descriptor, strict=True)\n return True\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import io
import sys
import json
import click
import tableschema
DIR = os.path.abspath(os.path.dirname(__file__))
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
VERSION = io.open(VERSION_FILE, encoding='utf-8').read().strip()
@click.group()
def main():
"""The entry point into the CLI."""
@main.command()
def info():
"""Return info on this version of Table Schema"""
click.echo(json.dumps({'version': VERSION}, ensure_ascii=False, indent=4))
@main.command()
@click.argument('data')
@click.option('--row_limit', default=100, type=int)
@click.option('--confidence', default=0.75, type=float)
@click.option('--encoding', default='utf-8')
@click.option('--to_file')
def infer(data, row_limit, confidence, encoding, to_file):
"""Infer a schema from data.
* data must be a local filepath
* data must be CSV
* the file encoding is assumed to be UTF-8 unless an encoding is passed
with --encoding
* the first line of data must be headers
* these constraints are just for the CLI
"""
descriptor = tableschema.infer(data,
encoding=encoding,
limit=row_limit,
confidence=confidence)
if to_file:
with io.open(to_file, mode='w+t', encoding='utf-8') as dest:
dest.write(json.dumps(descriptor, ensure_ascii=False, indent=4))
click.echo(descriptor)
@main.command()
@click.argument('schema')
if __name__ == '__main__':
main()
|
frictionlessdata/tableschema-py
|
tableschema/infer.py
|
infer
|
python
|
def infer(source, headers=1, limit=100, confidence=0.75, **options):
# Deprecated arguments order
is_string = lambda value: isinstance(value, six.string_types)
if isinstance(source, list) and all(map(is_string, source)):
warnings.warn('Correct arguments order infer(source, headers)', UserWarning)
source, headers = headers, source
table = Table(source, headers=headers, **options)
descriptor = table.infer(limit=limit, confidence=confidence)
return descriptor
|
https://github.com/frictionlessdata/tableschema-py#schema
|
train
|
https://github.com/frictionlessdata/tableschema-py/blob/9c5fa930319e7c5b10351f794091c5f9de5e8684/tableschema/infer.py#L14-L26
|
[
"def infer(self, limit=100, confidence=0.75):\n \"\"\"https://github.com/frictionlessdata/tableschema-py#schema\n \"\"\"\n if self.__schema is None or self.__headers is None:\n\n # Infer (tabulator)\n if not self.__storage:\n with self.__stream as stream:\n if self.__schema is None:\n self.__schema = Schema()\n self.__schema.infer(stream.sample[:limit],\n headers=stream.headers,\n confidence=confidence)\n if self.__headers is None:\n self.__headers = stream.headers\n\n # Infer (storage)\n else:\n descriptor = self.__storage.describe(self.__source)\n if self.__schema is None:\n self.__schema = Schema(descriptor)\n if self.__headers is None:\n self.__headers = self.__schema.field_names\n\n return self.__schema.descriptor\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import warnings
from .table import Table
# Module API
|
tmbo/questionary
|
questionary/utils.py
|
default_values_of
|
python
|
def default_values_of(func):
signature = inspect.signature(func)
return [k
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty or
v.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD]
|
Return the defaults of the function `func`.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/utils.py#L7-L14
| null |
# -*- coding: utf-8 -*-
import inspect
ACTIVATED_ASYNC_MODE = False
def arguments_of(func):
"""Return the parameters of the function `func`."""
return list(inspect.signature(func).parameters.keys())
def required_arguments(func):
"""Return all arguments of a function that do not have a default value."""
defaults = default_values_of(func)
args = arguments_of(func)
if defaults:
args = args[:-len(defaults)]
return args # all args without default values
def missing_arguments(func, argdict):
"""Return all arguments that are missing to call func."""
return set(required_arguments(func)) - set(argdict.keys())
async def activate_prompt_toolkit_async_mode():
"""Configure prompt toolkit to use the asyncio event loop.
Needs to be async, so we use the right event loop in py 3.5"""
from prompt_toolkit.eventloop import use_asyncio_event_loop
global ACTIVATED_ASYNC_MODE
# Tell prompt_toolkit to use asyncio for the event loop.
use_asyncio_event_loop()
ACTIVATED_ASYNC_MODE = True
|
tmbo/questionary
|
questionary/utils.py
|
required_arguments
|
python
|
def required_arguments(func):
defaults = default_values_of(func)
args = arguments_of(func)
if defaults:
args = args[:-len(defaults)]
return args
|
Return all arguments of a function that do not have a default value.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/utils.py#L23-L30
|
[
"def default_values_of(func):\n \"\"\"Return the defaults of the function `func`.\"\"\"\n\n signature = inspect.signature(func)\n return [k\n for k, v in signature.parameters.items()\n if v.default is not inspect.Parameter.empty or\n v.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD]\n",
"def arguments_of(func):\n \"\"\"Return the parameters of the function `func`.\"\"\"\n\n return list(inspect.signature(func).parameters.keys())\n"
] |
# -*- coding: utf-8 -*-
import inspect
ACTIVATED_ASYNC_MODE = False
def default_values_of(func):
"""Return the defaults of the function `func`."""
signature = inspect.signature(func)
return [k
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty or
v.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD]
def arguments_of(func):
"""Return the parameters of the function `func`."""
return list(inspect.signature(func).parameters.keys())
# all args without default values
def missing_arguments(func, argdict):
"""Return all arguments that are missing to call func."""
return set(required_arguments(func)) - set(argdict.keys())
async def activate_prompt_toolkit_async_mode():
"""Configure prompt toolkit to use the asyncio event loop.
Needs to be async, so we use the right event loop in py 3.5"""
from prompt_toolkit.eventloop import use_asyncio_event_loop
global ACTIVATED_ASYNC_MODE
# Tell prompt_toolkit to use asyncio for the event loop.
use_asyncio_event_loop()
ACTIVATED_ASYNC_MODE = True
|
tmbo/questionary
|
questionary/prompts/text.py
|
text
|
python
|
def text(message: Text,
default: Text = "",
validate: Union[Type[Validator],
Callable[[Text], bool],
None] = None, # noqa
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
merged_style = merge_styles([DEFAULT_STYLE, style])
validator = build_validator(validate)
def get_prompt_tokens():
return [("class:qmark", qmark),
("class:question", ' {} '.format(message))]
p = PromptSession(get_prompt_tokens,
style=merged_style,
validator=validator,
**kwargs)
p.default_buffer.reset(Document(default))
return Question(p.app)
|
Prompt the user to enter a free text message.
This question type can be used to prompt the user for some text input.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
validate: Require the entered value to pass a validation. The
value can not be submited until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/text.py#L15-L65
|
[
"def build_validator(validate: Any) -> Optional[Validator]:\n if validate:\n if inspect.isclass(validate) and issubclass(validate, Validator):\n return validate()\n elif callable(validate):\n class _InputValidator(Validator):\n def validate(self, document):\n verdict = validate(document.text)\n if verdict is not True:\n if verdict is False:\n verdict = 'invalid input'\n raise ValidationError(\n message=verdict,\n cursor_position=len(document.text))\n\n return _InputValidator()\n return None\n"
] |
# -*- coding: utf-8 -*-
from prompt_toolkit.document import Document
from prompt_toolkit.shortcuts.prompt import (
PromptSession)
from prompt_toolkit.styles import merge_styles, Style
from prompt_toolkit.validation import Validator
from typing import Text, Type, Union, Callable, Optional, Any
from questionary.constants import DEFAULT_STYLE, DEFAULT_QUESTION_PREFIX
from questionary.prompts.common import build_validator
from questionary.question import Question
|
tmbo/questionary
|
questionary/question.py
|
Question.ask_async
|
python
|
async def ask_async(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
if self.should_skip_question:
return self.default
try:
sys.stdout.flush()
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
|
Ask the question using asyncio and return user response.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/question.py#L21-L34
|
[
"async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:\n \"\"\"Ask the question using asyncio and return user response.\n\n Does not catch keyboard interrupts.\"\"\"\n\n if not utils.ACTIVATED_ASYNC_MODE:\n await utils.activate_prompt_toolkit_async_mode()\n\n if patch_stdout:\n # with prompt_toolkit.patch_stdout.patch_stdout():\n return await self.application.run_async().to_asyncio_future()\n else:\n return await self.application.run_async().to_asyncio_future()\n"
] |
class Question:
"""A question to be prompted.
This is an internal class. Questions should be created using the
predefined questions (e.g. text or password)."""
def __init__(self, application: prompt_toolkit.Application):
self.application = application
self.should_skip_question = False
self.default = None
def ask(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question synchronously and return user response."""
if self.should_skip_question:
return self.default
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def unsafe_ask(self, patch_stdout: bool = False) -> Any:
"""Ask the question synchronously and return user response.
Does not catch keyboard interrupts."""
if patch_stdout:
with prompt_toolkit.patch_stdout.patch_stdout():
return self.application.run()
else:
return self.application.run()
def skip_if(self, condition: bool, default: Any = None) -> 'Question':
"""Skip the question if flag is set and return the default instead."""
self.should_skip_question = condition
self.default = default
return self
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:
"""Ask the question using asyncio and return user response.
Does not catch keyboard interrupts."""
if not utils.ACTIVATED_ASYNC_MODE:
await utils.activate_prompt_toolkit_async_mode()
if patch_stdout:
# with prompt_toolkit.patch_stdout.patch_stdout():
return await self.application.run_async().to_asyncio_future()
else:
return await self.application.run_async().to_asyncio_future()
|
tmbo/questionary
|
questionary/question.py
|
Question.ask
|
python
|
def ask(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
if self.should_skip_question:
return self.default
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
|
Ask the question synchronously and return user response.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/question.py#L36-L48
|
[
"def unsafe_ask(self, patch_stdout: bool = False) -> Any:\n \"\"\"Ask the question synchronously and return user response.\n\n Does not catch keyboard interrupts.\"\"\"\n\n if patch_stdout:\n with prompt_toolkit.patch_stdout.patch_stdout():\n return self.application.run()\n else:\n return self.application.run()\n"
] |
class Question:
"""A question to be prompted.
This is an internal class. Questions should be created using the
predefined questions (e.g. text or password)."""
def __init__(self, application: prompt_toolkit.Application):
self.application = application
self.should_skip_question = False
self.default = None
async def ask_async(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question using asyncio and return user response."""
if self.should_skip_question:
return self.default
try:
sys.stdout.flush()
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def unsafe_ask(self, patch_stdout: bool = False) -> Any:
"""Ask the question synchronously and return user response.
Does not catch keyboard interrupts."""
if patch_stdout:
with prompt_toolkit.patch_stdout.patch_stdout():
return self.application.run()
else:
return self.application.run()
def skip_if(self, condition: bool, default: Any = None) -> 'Question':
"""Skip the question if flag is set and return the default instead."""
self.should_skip_question = condition
self.default = default
return self
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:
"""Ask the question using asyncio and return user response.
Does not catch keyboard interrupts."""
if not utils.ACTIVATED_ASYNC_MODE:
await utils.activate_prompt_toolkit_async_mode()
if patch_stdout:
# with prompt_toolkit.patch_stdout.patch_stdout():
return await self.application.run_async().to_asyncio_future()
else:
return await self.application.run_async().to_asyncio_future()
|
tmbo/questionary
|
questionary/question.py
|
Question.unsafe_ask
|
python
|
def unsafe_ask(self, patch_stdout: bool = False) -> Any:
if patch_stdout:
with prompt_toolkit.patch_stdout.patch_stdout():
return self.application.run()
else:
return self.application.run()
|
Ask the question synchronously and return user response.
Does not catch keyboard interrupts.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/question.py#L50-L59
| null |
class Question:
"""A question to be prompted.
This is an internal class. Questions should be created using the
predefined questions (e.g. text or password)."""
def __init__(self, application: prompt_toolkit.Application):
self.application = application
self.should_skip_question = False
self.default = None
async def ask_async(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question using asyncio and return user response."""
if self.should_skip_question:
return self.default
try:
sys.stdout.flush()
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def ask(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question synchronously and return user response."""
if self.should_skip_question:
return self.default
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def skip_if(self, condition: bool, default: Any = None) -> 'Question':
"""Skip the question if flag is set and return the default instead."""
self.should_skip_question = condition
self.default = default
return self
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:
"""Ask the question using asyncio and return user response.
Does not catch keyboard interrupts."""
if not utils.ACTIVATED_ASYNC_MODE:
await utils.activate_prompt_toolkit_async_mode()
if patch_stdout:
# with prompt_toolkit.patch_stdout.patch_stdout():
return await self.application.run_async().to_asyncio_future()
else:
return await self.application.run_async().to_asyncio_future()
|
tmbo/questionary
|
questionary/question.py
|
Question.skip_if
|
python
|
def skip_if(self, condition: bool, default: Any = None) -> 'Question':
self.should_skip_question = condition
self.default = default
return self
|
Skip the question if flag is set and return the default instead.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/question.py#L61-L66
| null |
class Question:
"""A question to be prompted.
This is an internal class. Questions should be created using the
predefined questions (e.g. text or password)."""
def __init__(self, application: prompt_toolkit.Application):
self.application = application
self.should_skip_question = False
self.default = None
async def ask_async(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question using asyncio and return user response."""
if self.should_skip_question:
return self.default
try:
sys.stdout.flush()
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def ask(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question synchronously and return user response."""
if self.should_skip_question:
return self.default
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def unsafe_ask(self, patch_stdout: bool = False) -> Any:
"""Ask the question synchronously and return user response.
Does not catch keyboard interrupts."""
if patch_stdout:
with prompt_toolkit.patch_stdout.patch_stdout():
return self.application.run()
else:
return self.application.run()
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:
"""Ask the question using asyncio and return user response.
Does not catch keyboard interrupts."""
if not utils.ACTIVATED_ASYNC_MODE:
await utils.activate_prompt_toolkit_async_mode()
if patch_stdout:
# with prompt_toolkit.patch_stdout.patch_stdout():
return await self.application.run_async().to_asyncio_future()
else:
return await self.application.run_async().to_asyncio_future()
|
tmbo/questionary
|
questionary/question.py
|
Question.unsafe_ask_async
|
python
|
async def unsafe_ask_async(self, patch_stdout: bool = False) -> Any:
if not utils.ACTIVATED_ASYNC_MODE:
await utils.activate_prompt_toolkit_async_mode()
if patch_stdout:
# with prompt_toolkit.patch_stdout.patch_stdout():
return await self.application.run_async().to_asyncio_future()
else:
return await self.application.run_async().to_asyncio_future()
|
Ask the question using asyncio and return user response.
Does not catch keyboard interrupts.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/question.py#L68-L80
|
[
"async def activate_prompt_toolkit_async_mode():\n \"\"\"Configure prompt toolkit to use the asyncio event loop.\n\n Needs to be async, so we use the right event loop in py 3.5\"\"\"\n from prompt_toolkit.eventloop import use_asyncio_event_loop\n\n global ACTIVATED_ASYNC_MODE\n\n # Tell prompt_toolkit to use asyncio for the event loop.\n use_asyncio_event_loop()\n ACTIVATED_ASYNC_MODE = True\n"
] |
class Question:
"""A question to be prompted.
This is an internal class. Questions should be created using the
predefined questions (e.g. text or password)."""
def __init__(self, application: prompt_toolkit.Application):
self.application = application
self.should_skip_question = False
self.default = None
async def ask_async(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question using asyncio and return user response."""
if self.should_skip_question:
return self.default
try:
sys.stdout.flush()
return await self.unsafe_ask_async(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def ask(self,
patch_stdout: bool = False,
kbi_msg: str = DEFAULT_KBI_MESSAGE) -> Any:
"""Ask the question synchronously and return user response."""
if self.should_skip_question:
return self.default
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print("\n{}\n".format(kbi_msg))
return None
def unsafe_ask(self, patch_stdout: bool = False) -> Any:
"""Ask the question synchronously and return user response.
Does not catch keyboard interrupts."""
if patch_stdout:
with prompt_toolkit.patch_stdout.patch_stdout():
return self.application.run()
else:
return self.application.run()
def skip_if(self, condition: bool, default: Any = None) -> 'Question':
"""Skip the question if flag is set and return the default instead."""
self.should_skip_question = condition
self.default = default
return self
|
tmbo/questionary
|
questionary/prompts/common.py
|
_fix_unecessary_blank_lines
|
python
|
def _fix_unecessary_blank_lines(ps: PromptSession) -> None:
default_container = ps.layout.container
default_buffer_window = \
default_container.get_children()[0].content.get_children()[1].content
assert isinstance(default_buffer_window, Window)
# this forces the main window to stay as small as possible, avoiding
# empty lines in selections
default_buffer_window.dont_extend_height = Always()
|
This is a fix for additional empty lines added by prompt toolkit.
This assumes the layout of the default session doesn't change, if it
does, this needs an update.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/common.py#L269-L283
| null |
# -*- coding: utf-8 -*-
import inspect
from prompt_toolkit import PromptSession
from prompt_toolkit.filters import IsDone, Always
from prompt_toolkit.layout import (
FormattedTextControl, Layout, HSplit,
ConditionalContainer, Window)
from prompt_toolkit.validation import Validator, ValidationError
from typing import Optional, Any, List, Text, Dict, Union, Callable, Tuple
from questionary.constants import (
SELECTED_POINTER, INDICATOR_SELECTED,
INDICATOR_UNSELECTED)
class Choice(object):
"""One choice in a select, rawselect or checkbox."""
def __init__(self,
title: Text,
value: Optional[Any] = None,
disabled: Optional[Text] = None,
checked: bool = False,
shortcut_key: Optional[Text] = None) -> None:
"""Create a new choice.
Args:
title: Text shown in the selection list.
value: Value returned, when the choice is selected.
disabled: If set, the choice can not be selected by the user. The
provided text is used to explain, why the selection is
disabled.
checked: Preselect this choice when displaying the options.
shortcut_key: Key shortcut used to select this item.
"""
self.disabled = disabled
self.value = value if value is not None else title
self.title = title
self.checked = checked
if shortcut_key is not None:
self.shortcut_key = str(shortcut_key)
else:
self.shortcut_key = None
@staticmethod
def build(c: Union[Text, 'Choice', Dict[Text, Any]]) -> 'Choice':
"""Create a choice object from different representations."""
if isinstance(c, Choice):
return c
elif isinstance(c, str):
return Choice(c, c)
else:
return Choice(c.get('name'),
c.get('value'),
c.get('disabled', None),
c.get('checked'),
c.get('key'))
class Separator(Choice):
"""Used to space/separate choices group."""
default_separator = '-' * 15
def __init__(self, line: Optional[Text] = None):
"""Create a separator in a list.
Args:
line: Text to be displayed in the list, by default uses `---`.
"""
self.line = line or self.default_separator
super(Separator, self).__init__(self.line, None, "-")
class InquirerControl(FormattedTextControl):
SHORTCUT_KEYS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
def __init__(self,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Any] = None,
use_indicator: bool = True,
use_shortcuts: bool = False,
**kwargs):
self.use_indicator = use_indicator
self.use_shortcuts = use_shortcuts
self.default = default
self.pointed_at = None
self.is_answered = False
self.choices = []
self.selected_options = []
self._init_choices(choices)
self._assign_shortcut_keys()
super(InquirerControl, self).__init__(self._get_choice_tokens,
**kwargs)
def _is_selected(self, choice):
return ((choice.checked or
choice.value == self.default and
self.default is not None) and
not choice.disabled)
def _assign_shortcut_keys(self):
available_shortcuts = self.SHORTCUT_KEYS[:]
# first, make sure we do not double assign a shortcut
for c in self.choices:
if c.shortcut_key is not None:
if c.shortcut_key in available_shortcuts:
available_shortcuts.remove(c.shortcut_key)
else:
raise ValueError("Invalid shortcut '{}'"
"for choice '{}'. Shortcuts "
"should be single characters or numbers. "
"Make sure that all your shortcuts are "
"unique.".format(c.shortcut_key, c.title))
shortcut_idx = 0
for c in self.choices:
if c.shortcut_key is None and not c.disabled:
c.shortcut_key = available_shortcuts[shortcut_idx]
shortcut_idx += 1
if shortcut_idx == len(available_shortcuts):
break # fail gracefully if we run out of shortcuts
def _init_choices(self, choices):
# helper to convert from question format to internal format
self.choices = []
for i, c in enumerate(choices):
choice = Choice.build(c)
if self._is_selected(choice):
self.selected_options.append(choice.value)
if self.pointed_at is None and not choice.disabled:
# find the first (available) choice
self.pointed_at = i
self.choices.append(choice)
@property
def choice_count(self):
return len(self.choices)
def _get_choice_tokens(self):
tokens = []
def append(index, choice):
# use value to check if option has been selected
selected = (choice.value in self.selected_options)
if index == self.pointed_at:
tokens.append(("class:pointer",
" {} ".format(SELECTED_POINTER)))
tokens.append(("[SetCursorPosition]", ""))
else:
tokens.append(("", " "))
if isinstance(choice, Separator):
tokens.append(("class:separator", "{}".format(choice.title)))
elif choice.disabled: # disabled
tokens.append(("class:selected" if selected else "",
"- {} ({})".format(choice.title,
choice.disabled)))
else:
if self.use_shortcuts and choice.shortcut_key is not None:
shortcut = "{}) ".format(choice.shortcut_key)
else:
shortcut = ""
if selected:
if self.use_indicator:
indicator = INDICATOR_SELECTED + " "
else:
indicator = ""
tokens.append(("class:selected",
"{}{}{}".format(indicator,
shortcut,
choice.title)))
else:
if self.use_indicator:
indicator = INDICATOR_UNSELECTED + " "
else:
indicator = ""
tokens.append(("",
"{}{}{}".format(indicator,
shortcut,
choice.title)))
tokens.append(("", "\n"))
# prepare the select choices
for i, c in enumerate(self.choices):
append(i, c)
if self.use_shortcuts:
tokens.append(("",
' Answer: {}'
''.format(self.get_pointed_at().shortcut_key)))
else:
tokens.pop() # Remove last newline.
return tokens
def is_selection_a_separator(self):
selected = self.choices[self.pointed_at]
return isinstance(selected, Separator)
def is_selection_disabled(self):
return self.choices[self.pointed_at].disabled
def is_selection_valid(self):
return (not self.is_selection_disabled() and
not self.is_selection_a_separator())
def select_previous(self):
self.pointed_at = (self.pointed_at - 1) % self.choice_count
def select_next(self):
self.pointed_at = (self.pointed_at + 1) % self.choice_count
def get_pointed_at(self):
return self.choices[self.pointed_at]
def get_selected_values(self):
# get values not labels
return [c
for c in self.choices
if (not isinstance(c, Separator) and
c.value in self.selected_options)]
def build_validator(validate: Any) -> Optional[Validator]:
if validate:
if inspect.isclass(validate) and issubclass(validate, Validator):
return validate()
elif callable(validate):
class _InputValidator(Validator):
def validate(self, document):
verdict = validate(document.text)
if verdict is not True:
if verdict is False:
verdict = 'invalid input'
raise ValidationError(
message=verdict,
cursor_position=len(document.text))
return _InputValidator()
return None
def create_inquirer_layout(
ic: InquirerControl,
get_prompt_tokens: Callable[[], List[Tuple[Text, Text]]],
**kwargs) -> Layout:
"""Create a layout combining question and inquirer selection."""
ps = PromptSession(get_prompt_tokens, reserve_space_for_menu=0, **kwargs)
_fix_unecessary_blank_lines(ps)
return Layout(HSplit([
ps.layout.container,
ConditionalContainer(
Window(ic),
filter=~IsDone()
)
]))
|
tmbo/questionary
|
questionary/prompts/common.py
|
create_inquirer_layout
|
python
|
def create_inquirer_layout(
ic: InquirerControl,
get_prompt_tokens: Callable[[], List[Tuple[Text, Text]]],
**kwargs) -> Layout:
ps = PromptSession(get_prompt_tokens, reserve_space_for_menu=0, **kwargs)
_fix_unecessary_blank_lines(ps)
return Layout(HSplit([
ps.layout.container,
ConditionalContainer(
Window(ic),
filter=~IsDone()
)
]))
|
Create a layout combining question and inquirer selection.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/common.py#L286-L302
|
[
"def _fix_unecessary_blank_lines(ps: PromptSession) -> None:\n \"\"\"This is a fix for additional empty lines added by prompt toolkit.\n\n This assumes the layout of the default session doesn't change, if it\n does, this needs an update.\"\"\"\n\n default_container = ps.layout.container\n\n default_buffer_window = \\\n default_container.get_children()[0].content.get_children()[1].content\n\n assert isinstance(default_buffer_window, Window)\n # this forces the main window to stay as small as possible, avoiding\n # empty lines in selections\n default_buffer_window.dont_extend_height = Always()\n"
] |
# -*- coding: utf-8 -*-
import inspect
from prompt_toolkit import PromptSession
from prompt_toolkit.filters import IsDone, Always
from prompt_toolkit.layout import (
FormattedTextControl, Layout, HSplit,
ConditionalContainer, Window)
from prompt_toolkit.validation import Validator, ValidationError
from typing import Optional, Any, List, Text, Dict, Union, Callable, Tuple
from questionary.constants import (
SELECTED_POINTER, INDICATOR_SELECTED,
INDICATOR_UNSELECTED)
class Choice(object):
"""One choice in a select, rawselect or checkbox."""
def __init__(self,
title: Text,
value: Optional[Any] = None,
disabled: Optional[Text] = None,
checked: bool = False,
shortcut_key: Optional[Text] = None) -> None:
"""Create a new choice.
Args:
title: Text shown in the selection list.
value: Value returned, when the choice is selected.
disabled: If set, the choice can not be selected by the user. The
provided text is used to explain, why the selection is
disabled.
checked: Preselect this choice when displaying the options.
shortcut_key: Key shortcut used to select this item.
"""
self.disabled = disabled
self.value = value if value is not None else title
self.title = title
self.checked = checked
if shortcut_key is not None:
self.shortcut_key = str(shortcut_key)
else:
self.shortcut_key = None
@staticmethod
def build(c: Union[Text, 'Choice', Dict[Text, Any]]) -> 'Choice':
"""Create a choice object from different representations."""
if isinstance(c, Choice):
return c
elif isinstance(c, str):
return Choice(c, c)
else:
return Choice(c.get('name'),
c.get('value'),
c.get('disabled', None),
c.get('checked'),
c.get('key'))
class Separator(Choice):
"""Used to space/separate choices group."""
default_separator = '-' * 15
def __init__(self, line: Optional[Text] = None):
"""Create a separator in a list.
Args:
line: Text to be displayed in the list, by default uses `---`.
"""
self.line = line or self.default_separator
super(Separator, self).__init__(self.line, None, "-")
class InquirerControl(FormattedTextControl):
SHORTCUT_KEYS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
def __init__(self,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Any] = None,
use_indicator: bool = True,
use_shortcuts: bool = False,
**kwargs):
self.use_indicator = use_indicator
self.use_shortcuts = use_shortcuts
self.default = default
self.pointed_at = None
self.is_answered = False
self.choices = []
self.selected_options = []
self._init_choices(choices)
self._assign_shortcut_keys()
super(InquirerControl, self).__init__(self._get_choice_tokens,
**kwargs)
def _is_selected(self, choice):
return ((choice.checked or
choice.value == self.default and
self.default is not None) and
not choice.disabled)
def _assign_shortcut_keys(self):
available_shortcuts = self.SHORTCUT_KEYS[:]
# first, make sure we do not double assign a shortcut
for c in self.choices:
if c.shortcut_key is not None:
if c.shortcut_key in available_shortcuts:
available_shortcuts.remove(c.shortcut_key)
else:
raise ValueError("Invalid shortcut '{}'"
"for choice '{}'. Shortcuts "
"should be single characters or numbers. "
"Make sure that all your shortcuts are "
"unique.".format(c.shortcut_key, c.title))
shortcut_idx = 0
for c in self.choices:
if c.shortcut_key is None and not c.disabled:
c.shortcut_key = available_shortcuts[shortcut_idx]
shortcut_idx += 1
if shortcut_idx == len(available_shortcuts):
break # fail gracefully if we run out of shortcuts
def _init_choices(self, choices):
# helper to convert from question format to internal format
self.choices = []
for i, c in enumerate(choices):
choice = Choice.build(c)
if self._is_selected(choice):
self.selected_options.append(choice.value)
if self.pointed_at is None and not choice.disabled:
# find the first (available) choice
self.pointed_at = i
self.choices.append(choice)
@property
def choice_count(self):
return len(self.choices)
def _get_choice_tokens(self):
tokens = []
def append(index, choice):
# use value to check if option has been selected
selected = (choice.value in self.selected_options)
if index == self.pointed_at:
tokens.append(("class:pointer",
" {} ".format(SELECTED_POINTER)))
tokens.append(("[SetCursorPosition]", ""))
else:
tokens.append(("", " "))
if isinstance(choice, Separator):
tokens.append(("class:separator", "{}".format(choice.title)))
elif choice.disabled: # disabled
tokens.append(("class:selected" if selected else "",
"- {} ({})".format(choice.title,
choice.disabled)))
else:
if self.use_shortcuts and choice.shortcut_key is not None:
shortcut = "{}) ".format(choice.shortcut_key)
else:
shortcut = ""
if selected:
if self.use_indicator:
indicator = INDICATOR_SELECTED + " "
else:
indicator = ""
tokens.append(("class:selected",
"{}{}{}".format(indicator,
shortcut,
choice.title)))
else:
if self.use_indicator:
indicator = INDICATOR_UNSELECTED + " "
else:
indicator = ""
tokens.append(("",
"{}{}{}".format(indicator,
shortcut,
choice.title)))
tokens.append(("", "\n"))
# prepare the select choices
for i, c in enumerate(self.choices):
append(i, c)
if self.use_shortcuts:
tokens.append(("",
' Answer: {}'
''.format(self.get_pointed_at().shortcut_key)))
else:
tokens.pop() # Remove last newline.
return tokens
def is_selection_a_separator(self):
selected = self.choices[self.pointed_at]
return isinstance(selected, Separator)
def is_selection_disabled(self):
return self.choices[self.pointed_at].disabled
def is_selection_valid(self):
return (not self.is_selection_disabled() and
not self.is_selection_a_separator())
def select_previous(self):
self.pointed_at = (self.pointed_at - 1) % self.choice_count
def select_next(self):
self.pointed_at = (self.pointed_at + 1) % self.choice_count
def get_pointed_at(self):
return self.choices[self.pointed_at]
def get_selected_values(self):
# get values not labels
return [c
for c in self.choices
if (not isinstance(c, Separator) and
c.value in self.selected_options)]
def build_validator(validate: Any) -> Optional[Validator]:
if validate:
if inspect.isclass(validate) and issubclass(validate, Validator):
return validate()
elif callable(validate):
class _InputValidator(Validator):
def validate(self, document):
verdict = validate(document.text)
if verdict is not True:
if verdict is False:
verdict = 'invalid input'
raise ValidationError(
message=verdict,
cursor_position=len(document.text))
return _InputValidator()
return None
def _fix_unecessary_blank_lines(ps: PromptSession) -> None:
"""This is a fix for additional empty lines added by prompt toolkit.
This assumes the layout of the default session doesn't change, if it
does, this needs an update."""
default_container = ps.layout.container
default_buffer_window = \
default_container.get_children()[0].content.get_children()[1].content
assert isinstance(default_buffer_window, Window)
# this forces the main window to stay as small as possible, avoiding
# empty lines in selections
default_buffer_window.dont_extend_height = Always()
|
tmbo/questionary
|
questionary/prompts/common.py
|
Choice.build
|
python
|
def build(c: Union[Text, 'Choice', Dict[Text, Any]]) -> 'Choice':
if isinstance(c, Choice):
return c
elif isinstance(c, str):
return Choice(c, c)
else:
return Choice(c.get('name'),
c.get('value'),
c.get('disabled', None),
c.get('checked'),
c.get('key'))
|
Create a choice object from different representations.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/common.py#L52-L64
| null |
class Choice(object):
"""One choice in a select, rawselect or checkbox."""
def __init__(self,
title: Text,
value: Optional[Any] = None,
disabled: Optional[Text] = None,
checked: bool = False,
shortcut_key: Optional[Text] = None) -> None:
"""Create a new choice.
Args:
title: Text shown in the selection list.
value: Value returned, when the choice is selected.
disabled: If set, the choice can not be selected by the user. The
provided text is used to explain, why the selection is
disabled.
checked: Preselect this choice when displaying the options.
shortcut_key: Key shortcut used to select this item.
"""
self.disabled = disabled
self.value = value if value is not None else title
self.title = title
self.checked = checked
if shortcut_key is not None:
self.shortcut_key = str(shortcut_key)
else:
self.shortcut_key = None
@staticmethod
|
tmbo/questionary
|
questionary/prompts/password.py
|
password
|
python
|
def password(message: Text,
default: Text = "",
validate: Union[Type[Validator],
Callable[[Text], bool],
None] = None, # noqa
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
return text.text(message, default, validate, qmark, style,
is_password=True, **kwargs)
|
Question the user to enter a secret text not displayed in the prompt.
This question type can be used to prompt the user for information
that should not be shown in the command line. The typed text will be
replaced with `*`.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
validate: Require the entered value to pass a validation. The
value can not be submited until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/password.py#L12-L51
|
[
"def text(message: Text,\n default: Text = \"\",\n validate: Union[Type[Validator],\n Callable[[Text], bool],\n None] = None, # noqa\n qmark: Text = DEFAULT_QUESTION_PREFIX,\n style: Optional[Style] = None,\n **kwargs: Any) -> Question:\n \"\"\"Prompt the user to enter a free text message.\n\n This question type can be used to prompt the user for some text input.\n\n Args:\n message: Question text\n\n default: Default value will be returned if the user just hits\n enter.\n\n validate: Require the entered value to pass a validation. The\n value can not be submited until the validator accepts\n it (e.g. to check minimum password length).\n\n This can either be a function accepting the input and\n returning a boolean, or an class reference to a\n subclass of the prompt toolkit Validator class.\n\n qmark: Question prefix displayed in front of the question.\n By default this is a `?`\n\n style: A custom color and style for the question parts. You can\n configure colors as well as font types for different elements.\n\n Returns:\n Question: Question instance, ready to be prompted (using `.ask()`).\n \"\"\"\n\n merged_style = merge_styles([DEFAULT_STYLE, style])\n\n validator = build_validator(validate)\n\n def get_prompt_tokens():\n return [(\"class:qmark\", qmark),\n (\"class:question\", ' {} '.format(message))]\n\n p = PromptSession(get_prompt_tokens,\n style=merged_style,\n validator=validator,\n **kwargs)\n p.default_buffer.reset(Document(default))\n\n return Question(p.app)\n"
] |
# -*- coding: utf-8 -*-
from typing import Text, Type, Union, Callable, Optional, Any
from prompt_toolkit.styles import Style
from prompt_toolkit.validation import Validator
from questionary.question import Question
from questionary.constants import DEFAULT_QUESTION_PREFIX
from questionary.prompts import text
|
tmbo/questionary
|
questionary/prompts/select.py
|
select
|
python
|
def select(message: Text,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Text] = None,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
use_shortcuts: bool = False,
use_indicator: bool = False,
**kwargs: Any) -> Question:
if choices is None or len(choices) == 0:
raise ValueError('A list of choices needs to be provided.')
if use_shortcuts and len(choices) > len(InquirerControl.SHORTCUT_KEYS):
raise ValueError('A list with shortcuts supports a maximum of {} '
'choices as this is the maximum number '
'of keyboard shortcuts that are available. You'
'provided {} choices!'
''.format(len(InquirerControl.SHORTCUT_KEYS),
len(choices)))
merged_style = merge_styles([DEFAULT_STYLE, style])
ic = InquirerControl(choices, default,
use_indicator=use_indicator,
use_shortcuts=use_shortcuts)
def get_prompt_tokens():
# noinspection PyListCreation
tokens = [("class:qmark", qmark),
("class:question", ' {} '.format(message))]
if ic.is_answered:
tokens.append(("class:answer", ' ' + ic.get_pointed_at().title))
else:
if use_shortcuts:
tokens.append(("class:instruction", ' (Use shortcuts)'))
else:
tokens.append(("class:instruction", ' (Use arrow keys)'))
return tokens
layout = common.create_inquirer_layout(ic, get_prompt_tokens, **kwargs)
bindings = KeyBindings()
@bindings.add(Keys.ControlQ, eager=True)
@bindings.add(Keys.ControlC, eager=True)
def _(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
if use_shortcuts:
# add key bindings for choices
for i, c in enumerate(ic.choices):
if isinstance(c, Separator):
continue
# noinspection PyShadowingNames
def _reg_binding(i, keys):
# trick out late evaluation with a "function factory":
# https://stackoverflow.com/a/3431699
@bindings.add(keys, eager=True)
def select_choice(event):
ic.pointed_at = i
_reg_binding(i, c.shortcut_key)
else:
@bindings.add(Keys.Down, eager=True)
@bindings.add("j", eager=True)
def move_cursor_down(event):
ic.select_next()
while not ic.is_selection_valid():
ic.select_next()
@bindings.add(Keys.Up, eager=True)
@bindings.add("k", eager=True)
def move_cursor_up(event):
ic.select_previous()
while not ic.is_selection_valid():
ic.select_previous()
@bindings.add(Keys.ControlM, eager=True)
def set_answer(event):
ic.is_answered = True
event.app.exit(result=ic.get_pointed_at().value)
@bindings.add(Keys.Any)
def other(event):
"""Disallow inserting other text. """
pass
return Question(Application(
layout=layout,
key_bindings=bindings,
style=merged_style,
**kwargs
))
|
Prompt the user to select one item from the list of choices.
The user can only select one option.
Args:
message: Question text
choices: Items shown in the selection, this can contain `Choice` or
or `Separator` objects or simple items as strings. Passing
`Choice` objects, allows you to configure the item more
(e.g. preselecting it or disabeling it).
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
use_indicator: Flag to enable the small indicator in front of the
list highlighting the current location of the selection
cursor.
use_shortcuts: Allow the user to select items from the list using
shortcuts. The shortcuts will be displayed in front of
the list items.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/select.py#L25-L149
|
[
"def create_inquirer_layout(\n ic: InquirerControl,\n get_prompt_tokens: Callable[[], List[Tuple[Text, Text]]],\n **kwargs) -> Layout:\n \"\"\"Create a layout combining question and inquirer selection.\"\"\"\n\n ps = PromptSession(get_prompt_tokens, reserve_space_for_menu=0, **kwargs)\n\n _fix_unecessary_blank_lines(ps)\n\n return Layout(HSplit([\n ps.layout.container,\n ConditionalContainer(\n Window(ic),\n filter=~IsDone()\n )\n ]))\n",
"def _reg_binding(i, keys):\n # trick out late evaluation with a \"function factory\":\n # https://stackoverflow.com/a/3431699\n @bindings.add(keys, eager=True)\n def select_choice(event):\n ic.pointed_at = i\n"
] |
# -*- coding: utf-8 -*-
import time
from questionary.prompts import common
from typing import Any, Optional, Text, List, Union, Dict
from prompt_toolkit.application import Application
from prompt_toolkit.filters import IsDone, Never, Always
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.containers import (
ConditionalContainer,
HSplit)
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.shortcuts.prompt import (
PromptSession)
from prompt_toolkit.styles import merge_styles, Style
from questionary.constants import DEFAULT_STYLE, DEFAULT_QUESTION_PREFIX
from questionary.prompts.common import InquirerControl, Separator, Choice
from questionary.question import Question
|
tmbo/questionary
|
questionary/form.py
|
form
|
python
|
def form(**kwargs: Question):
return Form(*(FormField(k, q) for k, q in kwargs.items()))
|
Create a form with multiple questions.
The parameter name of a question will be the key for the answer in
the returned dict.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/form.py#L9-L14
| null |
from collections import namedtuple
from questionary.constants import DEFAULT_KBI_MESSAGE
from questionary.question import Question
FormField = namedtuple("FormField", ["key", "question"])
class Form:
"""Multi question prompts. Questions are asked one after another.
All the answers are returned as a dict with one entry per question."""
def __init__(self, *form_fields: FormField):
self.form_fields = form_fields
def unsafe_ask(self, patch_stdout=False):
answers = {}
for f in self.form_fields:
answers[f.key] = f.question.unsafe_ask(patch_stdout)
return answers
def ask(self, patch_stdout=False, kbi_msg=DEFAULT_KBI_MESSAGE):
try:
return self.unsafe_ask(patch_stdout)
except KeyboardInterrupt:
print('')
print(kbi_msg)
print('')
return {}
|
tmbo/questionary
|
questionary/prompt.py
|
prompt
|
python
|
def prompt(questions: List[Dict[Text, Any]],
answers: Optional[Dict[Text, Any]] = None,
patch_stdout: bool = False,
true_color: bool = False,
kbi_msg: Text = DEFAULT_KBI_MESSAGE,
**kwargs):
if isinstance(questions, dict):
questions = [questions]
answers = answers or {}
for question_config in questions:
# import the question
if 'type' not in question_config:
raise PromptParameterException('type')
if 'name' not in question_config:
raise PromptParameterException('name')
choices = question_config.get('choices')
if choices is not None and callable(choices):
question_config['choices'] = choices(answers)
_kwargs = kwargs.copy()
_kwargs.update(question_config)
_type = _kwargs.pop('type')
_filter = _kwargs.pop('filter', None)
name = _kwargs.pop('name')
when = _kwargs.pop('when', None)
if true_color:
_kwargs["color_depth"] = ColorDepth.TRUE_COLOR
try:
if when:
# at least a little sanity check!
if callable(question_config['when']):
try:
if not question_config['when'](answers):
continue
except Exception as e:
raise ValueError("Problem in 'when' check of {} "
"question: {}".format(name, e))
else:
raise ValueError("'when' needs to be function that "
"accepts a dict argument")
if _filter:
# at least a little sanity check!
if not callable(_filter):
raise ValueError("'filter' needs to be function that "
"accepts an argument")
if callable(question_config.get('default')):
_kwargs['default'] = question_config['default'](answers)
create_question_func = prompt_by_name(_type)
if not create_question_func:
raise ValueError("No question type '{}' found. "
"Known question types are {}."
"".format(_type, ", ".join(AVAILABLE_PROMPTS)))
missing_args = list(utils.missing_arguments(create_question_func,
_kwargs))
if missing_args:
raise PromptParameterException(missing_args[0])
question = create_question_func(**_kwargs)
answer = question.unsafe_ask(patch_stdout)
if answer is not None:
if _filter:
try:
answer = _filter(answer)
except Exception as e:
raise ValueError("Problem processing 'filter' of {} "
"question: {}".format(name, e))
answers[name] = answer
except KeyboardInterrupt:
print('')
print(kbi_msg)
print('')
return {}
return answers
|
Prompt the user for input on all the questions.
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompt.py#L18-L104
|
[
"def prompt_by_name(name):\n return AVAILABLE_PROMPTS.get(name)\n",
"def missing_arguments(func, argdict):\n \"\"\"Return all arguments that are missing to call func.\"\"\"\n return set(required_arguments(func)) - set(argdict.keys())\n"
] |
# -*- coding: utf-8 -*-
from prompt_toolkit.output import ColorDepth
from typing import Any, Text, Dict, Optional, List
from questionary import utils
from questionary.constants import DEFAULT_KBI_MESSAGE
from questionary.prompts import AVAILABLE_PROMPTS, prompt_by_name
class PromptParameterException(ValueError):
def __init__(self, message, errors=None):
# Call the base class constructor with the parameters it needs
super(PromptParameterException, self).__init__(
'You must provide a `%s` value' % message, errors)
|
tmbo/questionary
|
questionary/prompts/confirm.py
|
confirm
|
python
|
def confirm(message: Text,
default: bool = True,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
merged_style = merge_styles([DEFAULT_STYLE, style])
status = {'answer': None}
def get_prompt_tokens():
tokens = []
tokens.append(("class:qmark", qmark))
tokens.append(("class:question", ' {} '.format(message)))
if status['answer'] is not None:
answer = ' {}'.format(YES if status['answer'] else NO)
tokens.append(("class:answer", answer))
else:
instruction = ' {}'.format(YES_OR_NO if default else NO_OR_YES)
tokens.append(("class:instruction", instruction))
return to_formatted_text(tokens)
bindings = KeyBindings()
@bindings.add(Keys.ControlQ, eager=True)
@bindings.add(Keys.ControlC, eager=True)
def _(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
@bindings.add('n')
@bindings.add('N')
def key_n(event):
status['answer'] = False
event.app.exit(result=False)
@bindings.add('y')
@bindings.add('Y')
def key_y(event):
status['answer'] = True
event.app.exit(result=True)
@bindings.add(Keys.ControlM, eager=True)
def set_answer(event):
status['answer'] = default
event.app.exit(result=default)
@bindings.add(Keys.Any)
def other(event):
"""Disallow inserting other text."""
pass
return Question(PromptSession(get_prompt_tokens,
key_bindings=bindings,
style=merged_style,
**kwargs).app)
|
Prompt the user to confirm or reject.
This question type can be used to prompt the user for a confirmation
of a yes-or-no question. If the user just hits enter, the default
value will be returned.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/confirm.py#L16-L94
| null |
# -*- coding: utf-8 -*-
from prompt_toolkit import PromptSession
from prompt_toolkit.formatted_text import (
to_formatted_text)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.styles import merge_styles, Style
from typing import Optional, Text, Any
from questionary.constants import (
NO_OR_YES, YES, NO, YES_OR_NO,
DEFAULT_STYLE, DEFAULT_QUESTION_PREFIX)
from questionary.question import Question
|
tmbo/questionary
|
questionary/prompts/rawselect.py
|
rawselect
|
python
|
def rawselect(message: Text,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Text] = None,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
return select.select(message, choices, default, qmark, style,
use_shortcuts=True,
**kwargs)
|
Ask the user to select one item from a list of choices using shortcuts.
The user can only select one option.
Args:
message: Question text
choices: Items shown in the selection, this can contain `Choice` or
or `Separator` objects or simple items as strings. Passing
`Choice` objects, allows you to configure the item more
(e.g. preselecting it or disabeling it).
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/rawselect.py#L12-L43
|
[
"def select(message: Text,\n choices: List[Union[Text, Choice, Dict[Text, Any]]],\n default: Optional[Text] = None,\n qmark: Text = DEFAULT_QUESTION_PREFIX,\n style: Optional[Style] = None,\n use_shortcuts: bool = False,\n use_indicator: bool = False,\n **kwargs: Any) -> Question:\n \"\"\"Prompt the user to select one item from the list of choices.\n\n The user can only select one option.\n\n Args:\n message: Question text\n\n choices: Items shown in the selection, this can contain `Choice` or\n or `Separator` objects or simple items as strings. Passing\n `Choice` objects, allows you to configure the item more\n (e.g. preselecting it or disabeling it).\n\n default: Default return value (single value).\n\n qmark: Question prefix displayed in front of the question.\n By default this is a `?`\n\n style: A custom color and style for the question parts. You can\n configure colors as well as font types for different elements.\n\n use_indicator: Flag to enable the small indicator in front of the\n list highlighting the current location of the selection\n cursor.\n\n use_shortcuts: Allow the user to select items from the list using\n shortcuts. The shortcuts will be displayed in front of\n the list items.\n Returns:\n Question: Question instance, ready to be prompted (using `.ask()`).\n \"\"\"\n if choices is None or len(choices) == 0:\n raise ValueError('A list of choices needs to be provided.')\n\n if use_shortcuts and len(choices) > len(InquirerControl.SHORTCUT_KEYS):\n raise ValueError('A list with shortcuts supports a maximum of {} '\n 'choices as this is the maximum number '\n 'of keyboard shortcuts that are available. You'\n 'provided {} choices!'\n ''.format(len(InquirerControl.SHORTCUT_KEYS),\n len(choices)))\n\n merged_style = merge_styles([DEFAULT_STYLE, style])\n\n ic = InquirerControl(choices, default,\n use_indicator=use_indicator,\n use_shortcuts=use_shortcuts)\n\n def get_prompt_tokens():\n # noinspection PyListCreation\n tokens = [(\"class:qmark\", qmark),\n (\"class:question\", ' {} '.format(message))]\n\n if ic.is_answered:\n tokens.append((\"class:answer\", ' ' + ic.get_pointed_at().title))\n else:\n if use_shortcuts:\n tokens.append((\"class:instruction\", ' (Use shortcuts)'))\n else:\n tokens.append((\"class:instruction\", ' (Use arrow keys)'))\n\n return tokens\n\n layout = common.create_inquirer_layout(ic, get_prompt_tokens, **kwargs)\n\n bindings = KeyBindings()\n\n @bindings.add(Keys.ControlQ, eager=True)\n @bindings.add(Keys.ControlC, eager=True)\n def _(event):\n event.app.exit(exception=KeyboardInterrupt, style='class:aborting')\n\n if use_shortcuts:\n # add key bindings for choices\n for i, c in enumerate(ic.choices):\n if isinstance(c, Separator):\n continue\n\n # noinspection PyShadowingNames\n def _reg_binding(i, keys):\n # trick out late evaluation with a \"function factory\":\n # https://stackoverflow.com/a/3431699\n @bindings.add(keys, eager=True)\n def select_choice(event):\n ic.pointed_at = i\n\n _reg_binding(i, c.shortcut_key)\n else:\n @bindings.add(Keys.Down, eager=True)\n @bindings.add(\"j\", eager=True)\n def move_cursor_down(event):\n ic.select_next()\n while not ic.is_selection_valid():\n ic.select_next()\n\n @bindings.add(Keys.Up, eager=True)\n @bindings.add(\"k\", eager=True)\n def move_cursor_up(event):\n ic.select_previous()\n while not ic.is_selection_valid():\n ic.select_previous()\n\n @bindings.add(Keys.ControlM, eager=True)\n def set_answer(event):\n ic.is_answered = True\n event.app.exit(result=ic.get_pointed_at().value)\n\n @bindings.add(Keys.Any)\n def other(event):\n \"\"\"Disallow inserting other text. \"\"\"\n pass\n\n return Question(Application(\n layout=layout,\n key_bindings=bindings,\n style=merged_style,\n **kwargs\n ))\n"
] |
# -*- coding: utf-8 -*-
from typing import Text, List, Optional, Any, Union, Dict
from prompt_toolkit.styles import Style
from questionary.constants import DEFAULT_QUESTION_PREFIX
from questionary.prompts import select
from questionary.prompts.common import Choice
from questionary.question import Question
|
tmbo/questionary
|
questionary/prompts/checkbox.py
|
checkbox
|
python
|
def checkbox(message: Text,
choices: List[Union[Text, Choice, Dict[Text, Any]]],
default: Optional[Text] = None,
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
**kwargs: Any) -> Question:
merged_style = merge_styles([DEFAULT_STYLE, style])
ic = InquirerControl(choices, default)
def get_prompt_tokens():
tokens = []
tokens.append(("class:qmark", qmark))
tokens.append(("class:question", ' {} '.format(message)))
if ic.is_answered:
nbr_selected = len(ic.selected_options)
if nbr_selected == 0:
tokens.append(("class:answer", ' done'))
elif nbr_selected == 1:
tokens.append(("class:answer",
' [{}]'.format(
ic.get_selected_values()[0].title)))
else:
tokens.append(("class:answer",
' done ({} selections)'.format(
nbr_selected)))
else:
tokens.append(("class:instruction",
' (Use arrow keys to move, '
'<space> to select, '
'<a> to toggle, '
'<i> to invert)'))
return tokens
layout = common.create_inquirer_layout(ic, get_prompt_tokens, **kwargs)
bindings = KeyBindings()
@bindings.add(Keys.ControlQ, eager=True)
@bindings.add(Keys.ControlC, eager=True)
def _(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
@bindings.add(' ', eager=True)
def toggle(event):
pointed_choice = ic.get_pointed_at().value
if pointed_choice in ic.selected_options:
ic.selected_options.remove(pointed_choice)
else:
ic.selected_options.append(pointed_choice)
@bindings.add('i', eager=True)
def invert(event):
inverted_selection = [c.value for c in ic.choices if
not isinstance(c, Separator) and
c.value not in ic.selected_options and
not c.disabled]
ic.selected_options = inverted_selection
@bindings.add('a', eager=True)
def all(event):
all_selected = True # all choices have been selected
for c in ic.choices:
if (not isinstance(c, Separator) and
c.value not in ic.selected_options and not c.disabled):
# add missing ones
ic.selected_options.append(c.value)
all_selected = False
if all_selected:
ic.selected_options = []
@bindings.add(Keys.Down, eager=True)
@bindings.add("j", eager=True)
def move_cursor_down(event):
ic.select_next()
while not ic.is_selection_valid():
ic.select_next()
@bindings.add(Keys.Up, eager=True)
@bindings.add("k", eager=True)
def move_cursor_up(event):
ic.select_previous()
while not ic.is_selection_valid():
ic.select_previous()
@bindings.add(Keys.ControlM, eager=True)
def set_answer(event):
ic.is_answered = True
event.app.exit(result=[c.value for c in ic.get_selected_values()])
@bindings.add(Keys.Any)
def other(event):
"""Disallow inserting other text. """
pass
return Question(Application(
layout=layout,
key_bindings=bindings,
style=merged_style,
**kwargs
))
|
Ask the user to select from a list of items.
This is a multiselect, the user can choose one, none or many of the
items.
Args:
message: Question text
choices: Items shown in the selection, this can contain `Choice` or
or `Separator` objects or simple items as strings. Passing
`Choice` objects, allows you to configure the item more
(e.g. preselecting it or disabeling it).
default: Default return value (single value). If you want to preselect
multiple items, use `Choice("foo", checked=True)` instead.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
train
|
https://github.com/tmbo/questionary/blob/3dbaa569a0d252404d547360bee495294bbd620d/questionary/prompts/checkbox.py#L23-L150
|
[
"def create_inquirer_layout(\n ic: InquirerControl,\n get_prompt_tokens: Callable[[], List[Tuple[Text, Text]]],\n **kwargs) -> Layout:\n \"\"\"Create a layout combining question and inquirer selection.\"\"\"\n\n ps = PromptSession(get_prompt_tokens, reserve_space_for_menu=0, **kwargs)\n\n _fix_unecessary_blank_lines(ps)\n\n return Layout(HSplit([\n ps.layout.container,\n ConditionalContainer(\n Window(ic),\n filter=~IsDone()\n )\n ]))\n"
] |
# -*- coding: utf-8 -*-
from prompt_toolkit.application import Application
from prompt_toolkit.filters import IsDone
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.containers import (
ConditionalContainer,
HSplit)
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.shortcuts.prompt import (
PromptSession)
from prompt_toolkit.styles import merge_styles, Style
from questionary.prompts import common
from typing import Text, List, Union, Dict, Any, Optional
from questionary.constants import DEFAULT_STYLE, DEFAULT_QUESTION_PREFIX
from questionary.prompts.common import Separator, InquirerControl, Choice
from questionary.question import Question
|
box/flaky
|
flaky/flaky_decorator.py
|
flaky
|
python
|
def flaky(max_runs=None, min_passes=None, rerun_filter=None):
# In case @flaky is applied to a function or class without arguments
# (and without parentheses), max_runs will refer to the wrapped object.
# In this case, the default value can be used.
wrapped = None
if hasattr(max_runs, '__call__'):
wrapped, max_runs = max_runs, None
attrib = default_flaky_attributes(max_runs, min_passes, rerun_filter)
def wrapper(wrapped_object):
for name, value in attrib.items():
setattr(wrapped_object, name, value)
return wrapped_object
return wrapper(wrapped) if wrapped is not None else wrapper
|
Decorator used to mark a test as "flaky". When used in conjuction with
the flaky nosetests plugin, will cause the decorated test to be retried
until min_passes successes are achieved out of up to max_runs test runs.
:param max_runs:
The maximum number of times the decorated test will be run.
:type max_runs:
`int`
:param min_passes:
The minimum number of times the test must pass to be a success.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
:return:
A wrapper function that includes attributes describing the flaky test.
:rtype:
`callable`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_decorator.py#L8-L56
|
[
"def default_flaky_attributes(max_runs=None, min_passes=None, rerun_filter=None):\n \"\"\"\n Returns the default flaky attributes to set on a flaky test.\n\n :param max_runs:\n The value of the FlakyNames.MAX_RUNS attribute to use.\n :type max_runs:\n `int`\n :param min_passes:\n The value of the FlakyNames.MIN_PASSES attribute to use.\n :type min_passes:\n `int`\n :param rerun_filter:\n Filter function to decide whether a test should be rerun if it fails.\n :type rerun_filter:\n `callable`\n :return:\n Default flaky attributes to set on a flaky test.\n :rtype:\n `dict`\n \"\"\"\n if max_runs is None:\n max_runs = 2\n if min_passes is None:\n min_passes = 1\n if min_passes <= 0:\n raise ValueError('min_passes must be positive')\n if max_runs < min_passes:\n raise ValueError('min_passes cannot be greater than max_runs!')\n\n return {\n FlakyNames.MAX_RUNS: max_runs,\n FlakyNames.MIN_PASSES: min_passes,\n FlakyNames.CURRENT_RUNS: 0,\n FlakyNames.CURRENT_PASSES: 0,\n FlakyNames.RERUN_FILTER: FilterWrapper(rerun_filter or _true),\n }\n",
"def wrapper(wrapped_object):\n for name, value in attrib.items():\n setattr(wrapped_object, name, value)\n return wrapped_object\n"
] |
# coding: utf-8
from __future__ import unicode_literals
from flaky.defaults import default_flaky_attributes
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin.options
|
python
|
def options(self, parser, env=os.environ):
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
|
Base class override.
Add options to the nose argument parser.
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L35-L46
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def _get_stream(self, multiprocess=False):
"""
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
"""
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
def configure(self, options, conf):
"""Base class override."""
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
def handleFailure(self, test, err):
"""
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
def addSuccess(self, test):
"""
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin._get_stream
|
python
|
def _get_stream(self, multiprocess=False):
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
|
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L48-L67
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def options(self, parser, env=os.environ):
"""
Base class override.
Add options to the nose argument parser.
"""
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
def configure(self, options, conf):
"""Base class override."""
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
def handleFailure(self, test, err):
"""
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
def addSuccess(self, test):
"""
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin.configure
|
python
|
def configure(self, options, conf):
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
|
Base class override.
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L69-L81
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def options(self, parser, env=os.environ):
"""
Base class override.
Add options to the nose argument parser.
"""
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
def _get_stream(self, multiprocess=False):
"""
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
"""
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
def handleFailure(self, test, err):
"""
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
def addSuccess(self, test):
"""
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin.handleError
|
python
|
def handleError(self, test, err):
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
|
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L129-L153
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def options(self, parser, env=os.environ):
"""
Base class override.
Add options to the nose argument parser.
"""
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
def _get_stream(self, multiprocess=False):
"""
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
"""
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
def configure(self, options, conf):
"""Base class override."""
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleFailure(self, test, err):
"""
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
def addSuccess(self, test):
"""
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin.handleFailure
|
python
|
def handleFailure(self, test, err):
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
|
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L155-L179
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def options(self, parser, env=os.environ):
"""
Base class override.
Add options to the nose argument parser.
"""
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
def _get_stream(self, multiprocess=False):
"""
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
"""
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
def configure(self, options, conf):
"""Base class override."""
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
def addSuccess(self, test):
"""
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/flaky_nose_plugin.py
|
FlakyPlugin.addSuccess
|
python
|
def addSuccess(self, test):
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None
|
Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/flaky_nose_plugin.py#L181-L210
| null |
class FlakyPlugin(_FlakyPlugin, Plugin):
"""
Plugin for nosetests that allows retrying flaky tests.
"""
name = 'flaky'
def __init__(self):
super(FlakyPlugin, self).__init__()
self._logger = logging.getLogger('nose.plugins.flaky')
self._flaky_result = None
self._nose_result = None
self._flaky_report = True
self._force_flaky = False
self._max_runs = None
self._min_passes = None
self._test_status = {}
self._tests_that_reran = set()
self._tests_that_have_been_reported = set()
def options(self, parser, env=os.environ):
"""
Base class override.
Add options to the nose argument parser.
"""
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group)
def _get_stream(self, multiprocess=False):
"""
Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO`
"""
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream
def configure(self, options, conf):
"""Base class override."""
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes
def startTest(self, test):
"""
Base class override. Called before a test is run.
Add the test to the test status tracker, so it can potentially
be rerun during afterTest.
:param test:
The test that is going to be run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
self._test_status[test] = None
def afterTest(self, test):
"""
Base class override. Called after a test is run.
If the test was marked for rerun, rerun the test.
:param test:
The test that has been run.
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if self._test_status[test]:
self._tests_that_reran.add(id(test))
test.run(self._flaky_result)
self._test_status.pop(test, None)
def _mark_test_for_rerun(self, test):
"""
Base class override. Rerun a flaky test.
In this case, don't actually rerun the test, but mark it for
rerun during afterTest.
:param test:
The test that is going to be rerun.
:type test:
:class:`nose.case.Test`
"""
self._test_status[test] = True
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None
def handleFailure(self, test, err):
"""
Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None
def report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
if self._flaky_report:
self._add_flaky_report(stream)
def prepareTestResult(self, result):
"""
Baseclass override. Called right before the first test is run.
Stores the test result so that errors and failures can be reported
to the nose test result.
:param result:
The nose test result that needs to be informed of test failures.
:type result:
:class:`nose.result.TextTestResult`
"""
# pylint:disable=invalid-name
self._nose_result = result
def prepareTestCase(self, test):
"""
Baseclass override. Called right before a test case is run.
If the test class is marked flaky and the test callable is not, copy
the flaky attributes from the test class to the test callable.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
# pylint:disable=invalid-name
if not isinstance(test.test, Failure):
test_class = test.test
self._copy_flaky_attributes(test, test_class)
if self._force_flaky and not self._has_flaky_attributes(test):
self._make_test_flaky(
test, self._max_runs, self._min_passes)
@staticmethod
def _get_test_callable_name(test):
"""
Base class override.
"""
_, _, class_and_callable_name = test.address()
first_dot_index = class_and_callable_name.find('.')
test_callable_name = class_and_callable_name[first_dot_index + 1:]
return test_callable_name
@classmethod
def _get_test_callable(cls, test):
"""
Base class override.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test`
"""
callable_name = cls._get_test_callable_name(test)
test_callable = getattr(
test.test,
callable_name,
getattr(test.test, 'test', test.test),
)
return test_callable
|
box/flaky
|
flaky/defaults.py
|
default_flaky_attributes
|
python
|
def default_flaky_attributes(max_runs=None, min_passes=None, rerun_filter=None):
if max_runs is None:
max_runs = 2
if min_passes is None:
min_passes = 1
if min_passes <= 0:
raise ValueError('min_passes must be positive')
if max_runs < min_passes:
raise ValueError('min_passes cannot be greater than max_runs!')
return {
FlakyNames.MAX_RUNS: max_runs,
FlakyNames.MIN_PASSES: min_passes,
FlakyNames.CURRENT_RUNS: 0,
FlakyNames.CURRENT_PASSES: 0,
FlakyNames.RERUN_FILTER: FilterWrapper(rerun_filter or _true),
}
|
Returns the default flaky attributes to set on a flaky test.
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
:type rerun_filter:
`callable`
:return:
Default flaky attributes to set on a flaky test.
:rtype:
`dict`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/defaults.py#L27-L63
| null |
# coding: utf-8
from flaky.names import FlakyNames
def _true(*args):
"""
Default rerun filter function that always returns True.
"""
# pylint:disable=unused-argument
return True
class FilterWrapper(object):
"""
Filter function wrapper. Expected to be called as though it's a filter
function. Since @flaky adds attributes to a decorated class, Python wants
to turn a bare function into an unbound method, which is not what we want.
"""
def __init__(self, rerun_filter):
self._filter = rerun_filter
def __call__(self, *args, **kwargs):
return self._filter(*args, **kwargs)
|
box/flaky
|
flaky/utils.py
|
ensure_unicode_string
|
python
|
def ensure_unicode_string(obj):
try:
return unicode_type(obj)
except UnicodeDecodeError:
if hasattr(obj, 'decode'):
return obj.decode('utf-8', 'replace')
return str(obj).decode('utf-8', 'replace')
|
Return a unicode string representation of the given obj.
:param obj:
The obj we want to represent in unicode
:type obj:
varies
:rtype:
`unicode`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/utils.py#L12-L28
| null |
# coding: utf-8
from __future__ import unicode_literals
# pylint:disable=invalid-name
try:
unicode_type = unicode
except NameError:
unicode_type = str
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._report_final_failure
|
python
|
def _report_final_failure(self, err, flaky, name):
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
|
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L50-L76
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._log_intermediate_failure
|
python
|
def _log_intermediate_failure(self, err, flaky, name):
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
|
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L78-L102
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin.add_report_option
|
python
|
def add_report_option(add_option):
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
|
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L300-L326
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin.add_force_flaky_options
|
python
|
def add_force_flaky_options(add_option):
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
|
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L329-L366
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._add_flaky_report
|
python
|
def _add_flaky_report(self, stream):
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
|
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L368-L395
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._copy_flaky_attributes
|
python
|
def _copy_flaky_attributes(cls, test, test_class):
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
|
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L398-L418
|
[
"def _set_flaky_attribute(test_item, flaky_attribute, value):\n \"\"\"\n Sets an attribute on a flaky test. Uses magic __dict__ since setattr\n doesn't work for bound methods.\n\n :param test_item:\n The test callable on which to set the attribute\n :type test_item:\n `callable` or :class:`nose.case.Test` or :class:`Function`\n :param flaky_attribute:\n The name of the attribute to set\n :type flaky_attribute:\n `unicode`\n :param value:\n The value to set the test callable's attribute to.\n :type value:\n varies\n \"\"\"\n test_item.__dict__[flaky_attribute] = value\n",
"def _get_flaky_attributes(cls, test_item):\n \"\"\"\n Get all the flaky related attributes from the test.\n\n :param test_item:\n The test callable from which to get the flaky related attributes.\n :type test_item:\n `callable` or :class:`nose.case.Test` or :class:`Function`\n :return:\n :rtype:\n `dict` of `unicode` to varies\n \"\"\"\n return {\n attr: cls._get_flaky_attribute(\n test_item,\n attr,\n ) for attr in FlakyNames()\n }\n",
"def _get_test_callable(cls, test):\n \"\"\"\n Base class override.\n\n :param test:\n The test that has raised an error or succeeded\n :type test:\n :class:`Function`\n :return:\n The test declaration, callable and name that is being run\n :rtype:\n `tuple` of `object`, `callable`, `unicode`\n \"\"\"\n callable_name = cls._get_test_callable_name(test)\n if callable_name.endswith(']') and '[' in callable_name:\n unparametrized_name = callable_name[:callable_name.index('[')]\n else:\n unparametrized_name = callable_name\n test_instance = cls._get_test_instance(test)\n if hasattr(test_instance, callable_name):\n # Test is a method of a class\n def_and_callable = getattr(test_instance, callable_name)\n return def_and_callable\n if hasattr(test_instance, unparametrized_name):\n # Test is a parametrized method of a class\n def_and_callable = getattr(test_instance, unparametrized_name)\n return def_and_callable\n if hasattr(test, 'module'):\n if hasattr(test.module, callable_name):\n # Test is a function in a module\n def_and_callable = getattr(test.module, callable_name)\n return def_and_callable\n if hasattr(test.module, unparametrized_name):\n # Test is a parametrized function in a module\n def_and_callable = getattr(test.module, unparametrized_name)\n return def_and_callable\n elif hasattr(test, 'runtest'):\n # Test is a doctest or other non-Function Item\n return test.runtest\n return None\n"
] |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._increment_flaky_attribute
|
python
|
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
|
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L463-L476
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._has_flaky_attributes
|
python
|
def _has_flaky_attributes(cls, test):
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
|
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L479-L492
|
[
"def _get_flaky_attribute(test_item, flaky_attribute):\n \"\"\"\n Gets an attribute describing the flaky test.\n\n :param test_item:\n The test method from which to get the attribute\n :type test_item:\n `callable` or :class:`nose.case.Test` or :class:`Function`\n :param flaky_attribute:\n The name of the attribute to get\n :type flaky_attribute:\n `unicode`\n :return:\n The test callable's attribute, or None if the test\n callable doesn't have that attribute.\n :rtype:\n varies\n \"\"\"\n return getattr(test_item, flaky_attribute, None)\n"
] |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
@classmethod
def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
box/flaky
|
flaky/_flaky_plugin.py
|
_FlakyPlugin._get_flaky_attributes
|
python
|
def _get_flaky_attributes(cls, test_item):
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
}
|
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
|
train
|
https://github.com/box/flaky/blob/c23126f09b2cc5a4071cfa43a11272927e9c0fcd/flaky/_flaky_plugin.py#L495-L512
| null |
class _FlakyPlugin(object):
_retry_failure_message = ' failed ({0} runs remaining out of {1}).'
_failure_message = ' failed; it passed {0} out of the required {1} times.'
_not_rerun_message = ' failed and was not selected for rerun.'
def __init__(self):
super(_FlakyPlugin, self).__init__()
self._stream = StringIO()
self._flaky_success_report = True
@property
def stream(self):
"""
Returns the stream used for building the flaky report.
Anything written to this stream before the end of the test run
will be written to the flaky report.
:return:
The stream used for building the flaky report.
:rtype:
:class:`StringIO`
"""
return self._stream
def _log_test_failure(self, test_callable_name, err, message):
"""
Add messaging about a test failure to the stream, which will be
printed by the plugin's report method.
"""
formatted_exception_info = ''.join(format_exception(*err)).replace('\n', '\n\t').rstrip()
self._stream.writelines([
ensure_unicode_string(test_callable_name),
ensure_unicode_string(message),
ensure_unicode_string(formatted_exception_info),
'\n',
])
def _report_final_failure(self, err, flaky, name):
"""
Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message)
def _log_intermediate_failure(self, err, flaky, name):
"""
Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode`
"""
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message)
def _should_handle_test_error_or_failure(self, test):
"""
Whether or not flaky should handle a test error or failure.
Only handle tests marked @flaky.
Count remaining retries and compare with number of required successes that have not yet been achieved.
This method may be called multiple times for the same test run, so it has no side effects.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test needs to be rerun; False, otherwise.
:rtype:
`bool`
"""
if not self._has_flaky_attributes(test):
return False
flaky_attributes = self._get_flaky_attributes(test)
flaky_attributes[FlakyNames.CURRENT_RUNS] += 1
has_failed = self._has_flaky_test_failed(flaky_attributes)
return not has_failed
def _will_handle_test_error_or_failure(self, test, name, err):
"""
Whether or not flaky will handle a test error or failure.
Returns True if the plugin should handle the test result, and
the `rerun_filter` returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The name of the test that has raised an error
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun by flaky; False, otherwise.
:rtype:
`bool`
"""
return self._should_handle_test_error_or_failure(test) and self._should_rerun_test(test, name, err)
def _handle_test_error_or_failure(self, test, err):
"""
Handle a flaky test error or failure.
Returning True from this method keeps the test runner from reporting
the test as a failure; this way we can retry and only report as a
failure if we are out of retries.
This method may only be called once per test run; it changes persisted flaky attributes.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `type`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun;
False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
if self._has_flaky_attributes(test):
self._add_flaky_test_failure(test, err)
should_handle = self._should_handle_test_error_or_failure(test)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if should_handle:
flaky_attributes = self._get_flaky_attributes(test)
if self._should_rerun_test(test, name, err):
self._log_intermediate_failure(err, flaky_attributes, name)
self._mark_test_for_rerun(test)
return True
self._log_test_failure(name, err, self._not_rerun_message)
return False
flaky_attributes = self._get_flaky_attributes(test)
self._report_final_failure(err, flaky_attributes, name)
return False
def _should_rerun_test(self, test, name, err):
"""
Whether or not a test should be rerun.
This is a pass-through to the test's rerun filter.
A flaky test will only be rerun if it hasn't failed too many
times to succeed at least min_passes times, and if
this method returns True.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:param name:
The test name
:type name:
`unicode`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
Whether flaky should rerun this test.
:rtype:
`bool`
"""
rerun_filter = self._get_flaky_attribute(test, FlakyNames.RERUN_FILTER)
return rerun_filter(err, name, test, self)
def _mark_test_for_rerun(self, test):
"""
Mark a flaky test for rerun.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`Function`
"""
raise NotImplementedError # pragma: no cover
def _should_handle_test_success(self, test):
if not self._has_flaky_attributes(test):
return False
flaky = self._get_flaky_attributes(test)
flaky[FlakyNames.CURRENT_PASSES] += 1
flaky[FlakyNames.CURRENT_RUNS] += 1
return not self._has_flaky_test_succeeded(flaky)
def _handle_test_success(self, test):
"""
Handle a flaky test success.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if the test has passed the required number of times.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
True, if the test will be rerun; False, if the test runner should handle it.
:rtype:
`bool`
"""
try:
name = self._get_test_callable_name(test)
except AttributeError:
return False
need_reruns = self._should_handle_test_success(test)
if self._has_flaky_attributes(test):
flaky = self._get_flaky_attributes(test)
min_passes = flaky[FlakyNames.MIN_PASSES]
passes = flaky[FlakyNames.CURRENT_PASSES] + 1
self._set_flaky_attribute(test, FlakyNames.CURRENT_PASSES, passes)
self._increment_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
if self._flaky_success_report:
self._stream.writelines([
ensure_unicode_string(name),
' passed {} out of the required {} times. '.format(
passes,
min_passes,
),
])
if need_reruns:
self._stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
)
)
else:
self._stream.write('Success!\n')
if need_reruns:
self._mark_test_for_rerun(test)
return need_reruns
@staticmethod
def add_report_option(add_option):
"""
Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
)
@staticmethod
def add_force_flaky_options(add_option):
"""
Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable`
"""
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
)
def _add_flaky_report(self, stream):
"""
Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file`
"""
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n')
@classmethod
def _copy_flaky_attributes(cls, test, test_class):
"""
Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test`
"""
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value)
@staticmethod
def _get_flaky_attribute(test_item, flaky_attribute):
"""
Gets an attribute describing the flaky test.
:param test_item:
The test method from which to get the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to get
:type flaky_attribute:
`unicode`
:return:
The test callable's attribute, or None if the test
callable doesn't have that attribute.
:rtype:
varies
"""
return getattr(test_item, flaky_attribute, None)
@staticmethod
def _set_flaky_attribute(test_item, flaky_attribute, value):
"""
Sets an attribute on a flaky test. Uses magic __dict__ since setattr
doesn't work for bound methods.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
:param value:
The value to set the test callable's attribute to.
:type value:
varies
"""
test_item.__dict__[flaky_attribute] = value
@classmethod
def _increment_flaky_attribute(cls, test_item, flaky_attribute):
"""
Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode`
"""
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1)
@classmethod
def _has_flaky_attributes(cls, test):
"""
Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool`
"""
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None
@classmethod
@classmethod
def _add_flaky_test_failure(cls, test, err):
"""
Store test error information on the test callable.
:param test:
The flaky test on which to update the flaky attributes.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
"""
errs = getattr(test, FlakyNames.CURRENT_ERRORS, None) or []
cls._set_flaky_attribute(test, FlakyNames.CURRENT_ERRORS, errs)
errs.append(err)
@classmethod
def _has_flaky_test_failed(cls, flaky):
"""
Whether or not the flaky test has failed
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as failure; False if
it should be rerun.
:rtype:
`bool`
"""
max_runs, current_runs, min_passes, current_passes = (
flaky[FlakyNames.MAX_RUNS],
flaky[FlakyNames.CURRENT_RUNS],
flaky[FlakyNames.MIN_PASSES],
flaky[FlakyNames.CURRENT_PASSES],
)
runs_left = max_runs - current_runs
passes_needed = min_passes - current_passes
no_retry = passes_needed > runs_left
return no_retry and not cls._has_flaky_test_succeeded(flaky)
@staticmethod
def _has_flaky_test_succeeded(flaky):
"""
Whether or not the flaky test has succeeded
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:return:
True if the flaky test should be marked as success; False if
it should be rerun.
:rtype:
`bool`
"""
return flaky[FlakyNames.CURRENT_PASSES] >= flaky[FlakyNames.MIN_PASSES]
@classmethod
def _get_test_callable(cls, test):
"""
Get the test callable, from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The test declaration, callable and name that is being run
:rtype:
`callable`
"""
raise NotImplementedError # pragma: no cover
@staticmethod
def _get_test_callable_name(test):
"""
Get the name of the test callable from the test.
:param test:
The test that has raised an error or succeeded
:type test:
:class:`nose.case.Test` or :class:`pytest.Item`
:return:
The name of the test callable that is being run by the test
:rtype:
`unicode`
"""
raise NotImplementedError # pragma: no cover
@classmethod
def _make_test_flaky(cls, test, max_runs=None, min_passes=None, rerun_filter=None):
"""
Make a given test flaky.
:param test:
The test in question.
:type test:
:class:`nose.case.Test` or :class:`Function`
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
"""
attrib_dict = defaults.default_flaky_attributes(max_runs, min_passes, rerun_filter)
for attr, value in attrib_dict.items():
cls._set_flaky_attribute(test, attr, value)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
_filehash
|
python
|
def _filehash(filepath, blocksize=4096):
sha = hashlib.sha256()
with open(filepath, 'rb') as fp:
while 1:
data = fp.read(blocksize)
if data:
sha.update(data)
else:
break
return sha
|
Return the hash object for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L34-L53
| null |
# -*- coding: utf-8 -*-
# Source: https://github.com/tsileo/dirtools (copied here because pypi package is not updated)
import logging
import os
import hashlib
from contextlib import closing # for Python2.6 compatibility
import tarfile
import tempfile
from datetime import datetime
import json
from globster import Globster
log = logging.getLogger("dirtools")
# TODO abs=True args for .files(), .subdirs() ?
def load_patterns(exclude_file=".exclude"):
""" Load patterns to exclude file from `exclude_file',
and return a list of pattern.
:type exclude_file: str
:param exclude_file: File containing exclude patterns
:rtype: list
:return: List a patterns
"""
return filter(None, open(exclude_file).read().split("\n"))
def filehash(filepath, blocksize=4096):
""" Return the hash hexdigest() for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file
"""
sha = _filehash(filepath, blocksize)
return sha.hexdigest()
class File(object):
def __init__(self, path):
self.file = os.path.basename(path)
self.path = os.path.abspath(path)
def _hash(self):
""" Return the hash object. """
return _filehash(self.path)
def hash(self):
""" Return the hash hexdigest. """
return filehash(self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = ()
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = (archive_path)
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname=self.file)
return _return
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
class DirState(object):
""" Hold a directory state / snapshot meta-data for later comparison. """
def __init__(self, _dir=None, state=None, index_cmp=os.path.getmtime):
self._dir = _dir
self.index_cmp = index_cmp
self.state = state or self.compute_state()
def compute_state(self):
""" Generate the index. """
data = {}
data['directory'] = self._dir.path
data['files'] = list(self._dir.files())
data['subdirs'] = list(self._dir.subdirs())
data['index'] = self.index()
return data
def index(self):
index = {}
for f in self._dir.iterfiles():
try:
index[f] = self.index_cmp(os.path.join(self._dir.path, f))
except Exception as exc:
print(f, exc)
return index
def __sub__(self, other):
""" Compute diff with operator overloading.
>>> path = DirState(Dir('/path'))
>>> path_copy = DirState(Dir('/path_copy'))
>>> diff = path_copy - path
>>> # Equals to
>>> diff = compute_diff(path_copy.state, path.state)
"""
if self.index_cmp != other.index_cmp:
raise Exception('Both DirState instance must have the same index_cmp.')
return compute_diff(self.state, other.state)
def to_json(self, base_path='.', dt=None, fmt=None):
if fmt is None:
fmt = '{0}@{1}.json'
if dt is None:
dt = datetime.utcnow()
path = fmt.format(self._dir.path.strip('/').split('/')[-1],
dt.isoformat())
path = os.path.join(base_path, path)
with open(path, 'wb') as f:
f.write(json.dumps(self.state))
return path
@classmethod
def from_json(cls, path):
with open(path, 'rb') as f:
return cls(state=json.loads(f.read()))
def compute_diff(dir_base, dir_cmp):
""" Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
"""
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f)
return data
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
compute_diff
|
python
|
def compute_diff(dir_base, dir_cmp):
data = {}
data['deleted'] = list(set(dir_cmp['files']) - set(dir_base['files']))
data['created'] = list(set(dir_base['files']) - set(dir_cmp['files']))
data['updated'] = []
data['deleted_dirs'] = list(set(dir_cmp['subdirs']) - set(dir_base['subdirs']))
for f in set(dir_cmp['files']).intersection(set(dir_base['files'])):
if dir_base['index'][f] != dir_cmp['index'][f]:
data['updated'].append(f)
return data
|
Compare `dir_base' and `dir_cmp' and returns a list with
the following keys:
- deleted files `deleted'
- created files `created'
- updated files `updated'
- deleted directories `deleted_dirs'
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L373-L392
| null |
# -*- coding: utf-8 -*-
# Source: https://github.com/tsileo/dirtools (copied here because pypi package is not updated)
import logging
import os
import hashlib
from contextlib import closing # for Python2.6 compatibility
import tarfile
import tempfile
from datetime import datetime
import json
from globster import Globster
log = logging.getLogger("dirtools")
# TODO abs=True args for .files(), .subdirs() ?
def load_patterns(exclude_file=".exclude"):
""" Load patterns to exclude file from `exclude_file',
and return a list of pattern.
:type exclude_file: str
:param exclude_file: File containing exclude patterns
:rtype: list
:return: List a patterns
"""
return filter(None, open(exclude_file).read().split("\n"))
def _filehash(filepath, blocksize=4096):
""" Return the hash object for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file
"""
sha = hashlib.sha256()
with open(filepath, 'rb') as fp:
while 1:
data = fp.read(blocksize)
if data:
sha.update(data)
else:
break
return sha
def filehash(filepath, blocksize=4096):
""" Return the hash hexdigest() for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file
"""
sha = _filehash(filepath, blocksize)
return sha.hexdigest()
class File(object):
def __init__(self, path):
self.file = os.path.basename(path)
self.path = os.path.abspath(path)
def _hash(self):
""" Return the hash object. """
return _filehash(self.path)
def hash(self):
""" Return the hash hexdigest. """
return filehash(self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = ()
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = (archive_path)
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname=self.file)
return _return
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
class DirState(object):
""" Hold a directory state / snapshot meta-data for later comparison. """
def __init__(self, _dir=None, state=None, index_cmp=os.path.getmtime):
self._dir = _dir
self.index_cmp = index_cmp
self.state = state or self.compute_state()
def compute_state(self):
""" Generate the index. """
data = {}
data['directory'] = self._dir.path
data['files'] = list(self._dir.files())
data['subdirs'] = list(self._dir.subdirs())
data['index'] = self.index()
return data
def index(self):
index = {}
for f in self._dir.iterfiles():
try:
index[f] = self.index_cmp(os.path.join(self._dir.path, f))
except Exception as exc:
print(f, exc)
return index
def __sub__(self, other):
""" Compute diff with operator overloading.
>>> path = DirState(Dir('/path'))
>>> path_copy = DirState(Dir('/path_copy'))
>>> diff = path_copy - path
>>> # Equals to
>>> diff = compute_diff(path_copy.state, path.state)
"""
if self.index_cmp != other.index_cmp:
raise Exception('Both DirState instance must have the same index_cmp.')
return compute_diff(self.state, other.state)
def to_json(self, base_path='.', dt=None, fmt=None):
if fmt is None:
fmt = '{0}@{1}.json'
if dt is None:
dt = datetime.utcnow()
path = fmt.format(self._dir.path.strip('/').split('/')[-1],
dt.isoformat())
path = os.path.join(base_path, path)
with open(path, 'wb') as f:
f.write(json.dumps(self.state))
return path
@classmethod
def from_json(cls, path):
with open(path, 'rb') as f:
return cls(state=json.loads(f.read()))
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
File.compress_to
|
python
|
def compress_to(self, archive_path=None):
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = ()
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = (archive_path)
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname=self.file)
return _return
|
Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L84-L104
| null |
class File(object):
def __init__(self, path):
self.file = os.path.basename(path)
self.path = os.path.abspath(path)
def _hash(self):
""" Return the hash object. """
return _filehash(self.path)
def hash(self):
""" Return the hash hexdigest. """
return filehash(self.path)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.hash
|
python
|
def hash(self, index_func=os.path.getmtime):
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
|
Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L138-L153
|
[
"def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):\n \"\"\" Return a sorted list containing relative path of all files (recursively).\n\n :type pattern: str\n :param pattern: Unix style (glob like/gitignore like) pattern\n\n :param sort_key: key argument for sorted\n\n :param sort_reverse: reverse argument for sorted\n\n :rtype: list\n :return: List of all relative files paths.\n\n \"\"\"\n return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.iterfiles
|
python
|
def iterfiles(self, pattern=None, abspath=False):
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
|
Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L155-L172
|
[
"def walk(self):\n \"\"\" Walk the directory like os.path\n (yields a 3-tuple (dirpath, dirnames, filenames)\n except it exclude all files/directories on the fly. \"\"\"\n for root, dirs, files in os.walk(self.path, topdown=True):\n # TODO relative walk, recursive call if root excluder found???\n # root_excluder = get_root_excluder(root)\n ndirs = []\n # First we exclude directories\n for d in list(dirs):\n if self.is_excluded(os.path.join(root, d)):\n dirs.remove(d)\n elif not os.path.islink(os.path.join(root, d)):\n ndirs.append(d)\n\n nfiles = []\n for fpath in (os.path.join(root, f) for f in files):\n if not self.is_excluded(fpath) and not os.path.islink(fpath):\n nfiles.append(os.path.relpath(fpath, root))\n\n yield root, ndirs, nfiles\n",
"def relpath(self, path):\n \"\"\" Return a relative filepath to path from Dir path. \"\"\"\n return os.path.relpath(path, start=self.path)\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.files
|
python
|
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
|
Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L174-L188
|
[
"def iterfiles(self, pattern=None, abspath=False):\n \"\"\" Generator for all the files not excluded recursively.\n\n Return relative path.\n\n :type pattern: str\n :param pattern: Unix style (glob like/gitignore like) pattern\n\n \"\"\"\n if pattern is not None:\n globster = Globster([pattern])\n for root, dirs, files in self.walk():\n for f in files:\n if pattern is None or (pattern is not None and globster.match(f)):\n if abspath:\n yield os.path.join(root, f)\n else:\n yield self.relpath(os.path.join(root, f))\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.itersubdirs
|
python
|
def itersubdirs(self, pattern=None, abspath=False):
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
|
Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L195-L210
|
[
"def walk(self):\n \"\"\" Walk the directory like os.path\n (yields a 3-tuple (dirpath, dirnames, filenames)\n except it exclude all files/directories on the fly. \"\"\"\n for root, dirs, files in os.walk(self.path, topdown=True):\n # TODO relative walk, recursive call if root excluder found???\n # root_excluder = get_root_excluder(root)\n ndirs = []\n # First we exclude directories\n for d in list(dirs):\n if self.is_excluded(os.path.join(root, d)):\n dirs.remove(d)\n elif not os.path.islink(os.path.join(root, d)):\n ndirs.append(d)\n\n nfiles = []\n for fpath in (os.path.join(root, f) for f in files):\n if not self.is_excluded(fpath) and not os.path.islink(fpath):\n nfiles.append(os.path.relpath(fpath, root))\n\n yield root, ndirs, nfiles\n",
"def relpath(self, path):\n \"\"\" Return a relative filepath to path from Dir path. \"\"\"\n return os.path.relpath(path, start=self.path)\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.subdirs
|
python
|
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
|
Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L212-L225
|
[
"def itersubdirs(self, pattern=None, abspath=False):\n \"\"\" Generator for all subdirs (except excluded).\n\n :type pattern: str\n :param pattern: Unix style (glob like/gitignore like) pattern\n\n \"\"\"\n if pattern is not None:\n globster = Globster([pattern])\n for root, dirs, files in self.walk():\n for d in dirs:\n if pattern is None or (pattern is not None and globster.match(d)):\n if abspath:\n yield os.path.join(root, d)\n else:\n yield self.relpath(os.path.join(root, d))\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.size
|
python
|
def size(self):
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
|
Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L227-L236
|
[
"def iterfiles(self, pattern=None, abspath=False):\n \"\"\" Generator for all the files not excluded recursively.\n\n Return relative path.\n\n :type pattern: str\n :param pattern: Unix style (glob like/gitignore like) pattern\n\n \"\"\"\n if pattern is not None:\n globster = Globster([pattern])\n for root, dirs, files in self.walk():\n for f in files:\n if pattern is None or (pattern is not None and globster.match(f)):\n if abspath:\n yield os.path.join(root, f)\n else:\n yield self.relpath(os.path.join(root, f))\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.is_excluded
|
python
|
def is_excluded(self, path):
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
|
Return True if `path' should be excluded
given patterns in the `exclude_file'.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L238-L245
|
[
"def relpath(self, path):\n \"\"\" Return a relative filepath to path from Dir path. \"\"\"\n return os.path.relpath(path, start=self.path)\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.find_projects
|
python
|
def find_projects(self, file_identifier=".project"):
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
|
Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L269-L285
|
[
"def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):\n \"\"\" Return a sorted list containing relative path of all subdirs (recursively).\n\n :type pattern: str\n :param pattern: Unix style (glob like/gitignore like) pattern\n\n :param sort_key: key argument for sorted\n\n :param sort_reverse: reverse argument for sorted\n\n :rtype: list\n :return: List of all relative files paths.\n \"\"\"\n return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)\n"
] |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def relpath(self, path):
""" Return a relative filepath to path from Dir path. """
return os.path.relpath(path, start=self.path)
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
Dir.relpath
|
python
|
def relpath(self, path):
return os.path.relpath(path, start=self.path)
|
Return a relative filepath to path from Dir path.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L287-L289
| null |
class Dir(object):
""" Wrapper for dirtools arround a path.
Try to load a .exclude file, ready to compute hashdir,
:type directory: str
:param directory: Root directory for initialization
:type exclude_file: str
:param exclude_file: File containing exclusion pattern,
.exclude by default, you can also load .gitignore files.
:type excludes: list
:param excludes: List of additionals patterns for exclusion,
by default: ['.git/', '.hg/', '.svn/']
"""
def __init__(self, directory=".", exclude_file=".exclude",
excludes=['.git/', '.hg/', '.svn/']):
if not os.path.isdir(directory):
raise TypeError("Directory must be a directory.")
self.directory = os.path.basename(directory)
self.path = os.path.abspath(directory)
self.parent = os.path.dirname(self.path)
self.exclude_file = os.path.join(self.path, exclude_file)
self.patterns = excludes
if os.path.isfile(self.exclude_file):
self.patterns.extend(load_patterns(self.exclude_file))
self.globster = Globster(self.patterns)
def hash(self, index_func=os.path.getmtime):
""" Hash for the entire directory (except excluded files) recursively.
Use mtime instead of sha256 by default for a faster hash.
>>> dir.hash(index_func=dirtools.filehash)
"""
# TODO alternative to filehash => mtime as a faster alternative
shadir = hashlib.sha256()
for f in self.files():
try:
shadir.update(str(index_func(os.path.join(self.path, f))))
except (IOError, OSError):
pass
return shadir.hexdigest()
def iterfiles(self, pattern=None, abspath=False):
""" Generator for all the files not excluded recursively.
Return relative path.
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for f in files:
if pattern is None or (pattern is not None and globster.match(f)):
if abspath:
yield os.path.join(root, f)
else:
yield self.relpath(os.path.join(root, f))
def files(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all files (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.iterfiles(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def get(self, pattern, sort_key=lambda k: k, sort_reverse=False, abspath=False):
res = self.files(pattern, sort_key=sort_key, sort_reverse=sort_reverse, abspath=abspath)
if res:
return res[0]
def itersubdirs(self, pattern=None, abspath=False):
""" Generator for all subdirs (except excluded).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
"""
if pattern is not None:
globster = Globster([pattern])
for root, dirs, files in self.walk():
for d in dirs:
if pattern is None or (pattern is not None and globster.match(d)):
if abspath:
yield os.path.join(root, d)
else:
yield self.relpath(os.path.join(root, d))
def subdirs(self, pattern=None, sort_key=lambda k: k, sort_reverse=False, abspath=False):
""" Return a sorted list containing relative path of all subdirs (recursively).
:type pattern: str
:param pattern: Unix style (glob like/gitignore like) pattern
:param sort_key: key argument for sorted
:param sort_reverse: reverse argument for sorted
:rtype: list
:return: List of all relative files paths.
"""
return sorted(self.itersubdirs(pattern, abspath=abspath), key=sort_key, reverse=sort_reverse)
def size(self):
""" Return directory size in bytes.
:rtype: int
:return: Total directory size in bytes.
"""
dir_size = 0
for f in self.iterfiles(abspath=True):
dir_size += os.path.getsize(f)
return dir_size
def is_excluded(self, path):
""" Return True if `path' should be excluded
given patterns in the `exclude_file'. """
match = self.globster.match(self.relpath(path))
if match:
log.debug("{0} matched {1} for exclusion".format(path, match))
return True
return False
def walk(self):
""" Walk the directory like os.path
(yields a 3-tuple (dirpath, dirnames, filenames)
except it exclude all files/directories on the fly. """
for root, dirs, files in os.walk(self.path, topdown=True):
# TODO relative walk, recursive call if root excluder found???
# root_excluder = get_root_excluder(root)
ndirs = []
# First we exclude directories
for d in list(dirs):
if self.is_excluded(os.path.join(root, d)):
dirs.remove(d)
elif not os.path.islink(os.path.join(root, d)):
ndirs.append(d)
nfiles = []
for fpath in (os.path.join(root, f) for f in files):
if not self.is_excluded(fpath) and not os.path.islink(fpath):
nfiles.append(os.path.relpath(fpath, root))
yield root, ndirs, nfiles
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
def compress_to(self, archive_path=None):
""" Compress the directory with gzip using tarlib.
:type archive_path: str
:param archive_path: Path to the archive, if None, a tempfile is created
"""
if archive_path is None:
archive = tempfile.NamedTemporaryFile(delete=False)
tar_args = []
tar_kwargs = {'fileobj': archive}
_return = archive.name
else:
tar_args = [archive_path]
tar_kwargs = {}
_return = archive_path
tar_kwargs.update({'mode': 'w:gz'})
with closing(tarfile.open(*tar_args, **tar_kwargs)) as tar:
tar.add(self.path, arcname='', exclude=self.is_excluded)
return _return
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/utilities/dirtools.py
|
DirState.compute_state
|
python
|
def compute_state(self):
data = {}
data['directory'] = self._dir.path
data['files'] = list(self._dir.files())
data['subdirs'] = list(self._dir.subdirs())
data['index'] = self.index()
return data
|
Generate the index.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/utilities/dirtools.py#L321-L328
|
[
"def index(self):\n index = {}\n for f in self._dir.iterfiles():\n try:\n index[f] = self.index_cmp(os.path.join(self._dir.path, f))\n except Exception as exc:\n print(f, exc)\n return index\n"
] |
class DirState(object):
""" Hold a directory state / snapshot meta-data for later comparison. """
def __init__(self, _dir=None, state=None, index_cmp=os.path.getmtime):
self._dir = _dir
self.index_cmp = index_cmp
self.state = state or self.compute_state()
def index(self):
index = {}
for f in self._dir.iterfiles():
try:
index[f] = self.index_cmp(os.path.join(self._dir.path, f))
except Exception as exc:
print(f, exc)
return index
def __sub__(self, other):
""" Compute diff with operator overloading.
>>> path = DirState(Dir('/path'))
>>> path_copy = DirState(Dir('/path_copy'))
>>> diff = path_copy - path
>>> # Equals to
>>> diff = compute_diff(path_copy.state, path.state)
"""
if self.index_cmp != other.index_cmp:
raise Exception('Both DirState instance must have the same index_cmp.')
return compute_diff(self.state, other.state)
def to_json(self, base_path='.', dt=None, fmt=None):
if fmt is None:
fmt = '{0}@{1}.json'
if dt is None:
dt = datetime.utcnow()
path = fmt.format(self._dir.path.strip('/').split('/')[-1],
dt.isoformat())
path = os.path.join(base_path, path)
with open(path, 'wb') as f:
f.write(json.dumps(self.state))
return path
@classmethod
def from_json(cls, path):
with open(path, 'rb') as f:
return cls(state=json.loads(f.read()))
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/cli.py
|
run
|
python
|
def run(pipeline_id, verbose, use_cache, dirty, force, concurrency, slave):
exitcode = 0
running = []
progress = {}
def progress_cb(report):
pid, count, success, *_, stats = report
print('\x1b[%sA' % (1+len(running)))
if pid not in progress:
running.append(pid)
progress[pid] = count, success
for pid in running:
count, success = progress[pid]
if success is None:
if count == 0:
print('\x1b[2K%s: \x1b[31m%s\x1b[0m' % (pid, 'WAITING FOR OUTPUT'))
else:
print('\x1b[2K%s: \x1b[33mRUNNING, processed %s rows\x1b[0m' % (pid, count))
else:
if success:
print('\x1b[2K%s: \x1b[32mSUCCESS, processed %s rows\x1b[0m' % (pid, count))
else:
print('\x1b[2K%s: \x1b[31mFAILURE, processed %s rows\x1b[0m' % (pid, count))
results = run_pipelines(pipeline_id, '.', use_cache,
dirty, force, concurrency,
verbose, progress_cb if not verbose else None,
slave)
if not slave:
logging.info('RESULTS:')
errd = False
for result in results:
stats = user_facing_stats(result.stats)
errd = errd or result.errors or not result.success
logging.info('%s: %s %s%s',
'SUCCESS' if result.success else 'FAILURE',
result.pipeline_id,
repr(stats) if stats is not None else '',
(
'\nERROR log from processor %s:\n+--------\n| ' % result.errors[0] +
'\n| '.join(result.errors[1:]) +
'\n+--------'
) if result.errors else '')
else:
result_obj = []
errd = False
for result in results:
errd = errd or result.errors or not result.success
stats = user_facing_stats(result.stats)
result_obj.append(dict(
success=result.success,
pipeline_id=result.pipeline_id,
stats=result.stats,
errors=result.errors
))
json.dump(result_obj, sys.stderr)
if errd:
exitcode = 1
exit(exitcode)
|
Run a pipeline by pipeline-id.
pipeline-id supports '%' wildcard for any-suffix matching,
'all' for running all pipelines and
comma-delimited list of pipeline ids
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/cli.py#L50-L117
|
[
"def run_pipelines(pipeline_id_pattern,\n root_dir,\n use_cache=True,\n dirty=False,\n force=False,\n concurrency=1,\n verbose_logs=True,\n progress_cb=None,\n slave=False):\n \"\"\"Run a pipeline by pipeline-id.\n pipeline-id supports the '%' wildcard for any-suffix matching.\n Use 'all' or '%' for running all pipelines\"\"\"\n with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency,\n thread_name_prefix='T') as executor:\n try:\n results = []\n pending_futures = set()\n done_futures = set()\n finished_futures = []\n progress_thread = None\n progress_queue = None\n status_manager = status_mgr(root_dir)\n\n if progress_cb is not None:\n progress_queue = Queue()\n progress_thread = threading.Thread(target=progress_report_handler, args=(progress_cb, progress_queue))\n progress_thread.start()\n\n all_specs = specs_to_execute(pipeline_id_pattern, root_dir, status_manager, force, dirty, results)\n\n while True:\n\n done = None\n if len(done_futures) > 0:\n done = done_futures.pop()\n finished_futures.append(done)\n done = done.result()[0]\n\n try:\n spec = all_specs.send(done)\n except StopIteration:\n spec = None\n\n if spec is None:\n # Wait for all runners to idle...\n if len(done_futures) == 0:\n if len(pending_futures) > 0:\n done_futures, pending_futures = \\\n concurrent.futures.wait(pending_futures,\n return_when=concurrent.futures.FIRST_COMPLETED)\n continue\n else:\n break\n else:\n continue\n\n if len(spec.validation_errors) > 0:\n results.append(\n ExecutionResult(spec.pipeline_id,\n False,\n {},\n ['init'] + list(map(str, spec.validation_errors)))\n )\n continue\n\n if slave:\n ps = status_manager.get(spec.pipeline_id)\n ps.init(spec.pipeline_details,\n spec.source_details,\n spec.validation_errors,\n spec.cache_hash)\n eid = gen_execution_id()\n if ps.queue_execution(eid, 'manual'):\n success, stats, errors = \\\n execute_pipeline(spec, eid,\n use_cache=use_cache)\n results.append(ExecutionResult(\n spec.pipeline_id,\n success,\n stats,\n errors\n ))\n else:\n results.append(\n ExecutionResult(spec.pipeline_id,\n False,\n None,\n ['Already Running'])\n )\n\n else:\n f = executor.submit(remote_execute_pipeline,\n spec,\n root_dir,\n use_cache,\n verbose_logs,\n progress_queue)\n pending_futures.add(f)\n\n for f in finished_futures:\n ret = f.result()\n results.append(ExecutionResult(*ret))\n\n except KeyboardInterrupt:\n pass\n finally:\n if slave:\n finalize()\n\n if progress_thread is not None:\n progress_queue.put(None)\n progress_thread.join()\n\n return results\n",
"def user_facing_stats(stats):\n if stats is not None and isinstance(stats, dict):\n return dict((k, v) for k, v in stats.items() if k != STATS_DPP_KEY)\n return None\n"
] |
import sys
import json
import os
import click
import requests
from .utilities.stat_utils import user_facing_stats
from .manager.logging_config import logging
from .specs import pipelines, PipelineSpec #noqa
from .status import status_mgr
from .manager import run_pipelines
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo('Available Pipelines:')
for spec in pipelines(): # type: PipelineSpec
ps = status_mgr().get(spec.pipeline_id)
click.echo('- {} {}{}'
.format(spec.pipeline_id,
'(*)' if ps.dirty() else '',
'(E)' if len(spec.validation_errors) > 0 else ''))
for error in spec.validation_errors:
click.echo('\t{}: {}'.format(error.short_msg,
error.long_msg))
@cli.command()
def serve():
"""Start the web server"""
from .web import app
app.run(host='0.0.0.0', debug=False, port=5000)
@cli.command()
@click.argument('pipeline_id')
@click.option('--verbose', default=False, is_flag=True)
@click.option('--use-cache/--no-use-cache', default=True,
help='Cache (or don\'t) intermediate results (if requested in the pipeline)')
@click.option('--dirty', default=False, is_flag=True,
help='Only run dirty pipelines')
@click.option('--force', default=False, is_flag=True)
@click.option('--concurrency', default=1)
@click.option('--slave', default=False, is_flag=True)
@cli.command()
def init():
"""Reset the status of all pipelines"""
status_mgr().initialize()
@cli.command()
def version():
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'VERSION')) as f:
installed = f.read().strip()
latest = requests.get('https://pypi.org/pypi/datapackage-pipelines/json').json()['info']['version']
print(f'Installed version: {installed}')
print(f'Latest version: {latest}\n')
if installed != latest:
print('Datapackage Pipelines upgrade is available, upgrade using pip:\n')
print(' python3 -m pip install -U datapackage-pipelines\n')
exit(1)
else:
exit(0)
if __name__ == "__main__":
sys.exit(cli())
# For Profiling:
# import cProfile
# sys.exit(cProfile.run('cli()', sort='cumulative'))
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/web/server.py
|
basic_auth_required
|
python
|
def basic_auth_required(view_func):
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
|
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/web/server.py#L87-L102
| null |
import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(l):
pipelines = list(filter(lambda x: len(x['id']) == 1, l))
children_ = list(filter(lambda x: len(x['id']) > 1, l))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/web/server.py
|
badge
|
python
|
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
|
An individual pipeline status
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/web/server.py#L267-L288
|
[
"def _make_badge_response(subject, text, colour):\n image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(\n subject, text, colour)\n r = requests.get(image_url)\n buffer_image = BytesIO(r.content)\n buffer_image.seek(0)\n res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))\n res.headers['Cache-Control'] = \\\n 'max-age=0, no-cache, no-store, must-revalidate'\n res.headers['Expires'] = '0'\n return res\n",
"def get_last_execution(self) -> Optional[PipelineExecution]:\n return self.last_execution\n",
"def state(self):\n if not self.runnable():\n return 'INVALID'\n last_execution = self.get_last_execution()\n if last_execution is None:\n return 'INIT'\n if last_execution.success is None:\n if last_execution.start_time is None:\n return 'QUEUED'\n else:\n return 'RUNNING'\n if last_execution.success:\n return 'SUCCEEDED'\n else:\n return 'FAILED'\n",
"def get(self, _id) -> PipelineStatus:\n return PipelineStatus(self.backend, _id)\n"
] |
import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(l):
pipelines = list(filter(lambda x: len(x['id']) == 1, l))
children_ = list(filter(lambda x: len(x['id']) > 1, l))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/web/server.py
|
badge_collection
|
python
|
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
|
Status badge for a collection of pipelines.
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/web/server.py#L292-L326
|
[
"def _make_badge_response(subject, text, colour):\n image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(\n subject, text, colour)\n r = requests.get(image_url)\n buffer_image = BytesIO(r.content)\n buffer_image.seek(0)\n res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))\n res.headers['Cache-Control'] = \\\n 'max-age=0, no-cache, no-store, must-revalidate'\n res.headers['Expires'] = '0'\n return res\n",
"def state(self):\n if not self.runnable():\n return 'INVALID'\n last_execution = self.get_last_execution()\n if last_execution is None:\n return 'INIT'\n if last_execution.success is None:\n if last_execution.start_time is None:\n return 'QUEUED'\n else:\n return 'RUNNING'\n if last_execution.success:\n return 'SUCCEEDED'\n else:\n return 'FAILED'\n",
"def get(self, _id) -> PipelineStatus:\n return PipelineStatus(self.backend, _id)\n",
"def all_pipeline_ids(self):\n return self.backend.all_pipeline_ids()\n"
] |
import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(l):
pipelines = list(filter(lambda x: len(x['id']) == 1, l))
children_ = list(filter(lambda x: len(x['id']) > 1, l))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
frictionlessdata/datapackage-pipelines
|
datapackage_pipelines/manager/runner.py
|
run_pipelines
|
python
|
def run_pipelines(pipeline_id_pattern,
root_dir,
use_cache=True,
dirty=False,
force=False,
concurrency=1,
verbose_logs=True,
progress_cb=None,
slave=False):
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency,
thread_name_prefix='T') as executor:
try:
results = []
pending_futures = set()
done_futures = set()
finished_futures = []
progress_thread = None
progress_queue = None
status_manager = status_mgr(root_dir)
if progress_cb is not None:
progress_queue = Queue()
progress_thread = threading.Thread(target=progress_report_handler, args=(progress_cb, progress_queue))
progress_thread.start()
all_specs = specs_to_execute(pipeline_id_pattern, root_dir, status_manager, force, dirty, results)
while True:
done = None
if len(done_futures) > 0:
done = done_futures.pop()
finished_futures.append(done)
done = done.result()[0]
try:
spec = all_specs.send(done)
except StopIteration:
spec = None
if spec is None:
# Wait for all runners to idle...
if len(done_futures) == 0:
if len(pending_futures) > 0:
done_futures, pending_futures = \
concurrent.futures.wait(pending_futures,
return_when=concurrent.futures.FIRST_COMPLETED)
continue
else:
break
else:
continue
if len(spec.validation_errors) > 0:
results.append(
ExecutionResult(spec.pipeline_id,
False,
{},
['init'] + list(map(str, spec.validation_errors)))
)
continue
if slave:
ps = status_manager.get(spec.pipeline_id)
ps.init(spec.pipeline_details,
spec.source_details,
spec.validation_errors,
spec.cache_hash)
eid = gen_execution_id()
if ps.queue_execution(eid, 'manual'):
success, stats, errors = \
execute_pipeline(spec, eid,
use_cache=use_cache)
results.append(ExecutionResult(
spec.pipeline_id,
success,
stats,
errors
))
else:
results.append(
ExecutionResult(spec.pipeline_id,
False,
None,
['Already Running'])
)
else:
f = executor.submit(remote_execute_pipeline,
spec,
root_dir,
use_cache,
verbose_logs,
progress_queue)
pending_futures.add(f)
for f in finished_futures:
ret = f.result()
results.append(ExecutionResult(*ret))
except KeyboardInterrupt:
pass
finally:
if slave:
finalize()
if progress_thread is not None:
progress_queue.put(None)
progress_thread.join()
return results
|
Run a pipeline by pipeline-id.
pipeline-id supports the '%' wildcard for any-suffix matching.
Use 'all' or '%' for running all pipelines
|
train
|
https://github.com/frictionlessdata/datapackage-pipelines/blob/3a34bbdf042d13c3bec5eef46ff360ee41403874/datapackage_pipelines/manager/runner.py#L161-L274
|
[
"def execute_pipeline(spec,\n execution_id,\n trigger='manual',\n use_cache=True):\n\n debug = trigger == 'manual' or os.environ.get('DPP_DEBUG')\n logging.info(\"%s RUNNING %s\", execution_id[:8], spec.pipeline_id)\n\n loop = asyncio.get_event_loop()\n\n if debug:\n logging.info(\"%s Collecting dependencies\", execution_id[:8])\n dependencies = {}\n for dep in spec.pipeline_details.get('dependencies', []):\n if 'pipeline' in dep:\n dep_pipeline_id = dep['pipeline']\n pipeline_execution = status_mgr().get(dep_pipeline_id).get_last_successful_execution()\n if pipeline_execution is not None:\n result_dp = pipeline_execution.stats.get(STATS_DPP_KEY, {}).get(STATS_OUT_DP_URL_KEY)\n if result_dp is not None:\n dependencies[dep_pipeline_id] = result_dp\n\n if debug:\n logging.info(\"%s Running async task\", execution_id[:8])\n\n pipeline_task = \\\n asyncio.ensure_future(async_execute_pipeline(spec.pipeline_id,\n spec.pipeline_details.get('pipeline', []),\n spec.path,\n trigger,\n execution_id,\n use_cache,\n dependencies,\n debug))\n try:\n if debug:\n logging.info(\"%s Waiting for completion\", execution_id[:8])\n return loop.run_until_complete(pipeline_task)\n except KeyboardInterrupt:\n logging.info(\"Caught keyboard interrupt. Cancelling tasks...\")\n pipeline_task.cancel()\n loop.run_forever()\n logging.info(\"Caught keyboard interrupt. DONE!\")\n raise KeyboardInterrupt()\n",
"def status_mgr(root_dir='.') -> StatusManager:\n global _status\n global _root_dir\n\n if _status is not None and _root_dir == root_dir:\n return _status\n _root_dir = root_dir\n _status = StatusManager(host=os.environ.get('DPP_REDIS_HOST'), root_dir=root_dir)\n return _status\n",
"def finalize():\n loop = asyncio.get_event_loop()\n loop.close()\n",
"def gen_execution_id():\n return str(uuid.uuid4())\n",
"def specs_to_execute(argument, root_dir, status_manager, ignore_missing_deps, dirty, results):\n\n pending = set()\n executed = set()\n completed = set()\n\n for spec in pipelines(ignore_missing_deps=ignore_missing_deps,\n root_dir=root_dir, status_manager=status_manager):\n if match_pipeline_id(argument, spec.pipeline_id):\n\n # If only dirty was requested\n if dirty:\n ps = status_manager.get(spec.pipeline_id)\n if not ps.dirty():\n continue\n\n pending.add(spec.pipeline_id)\n\n while len(pending) > 0:\n to_yield = None\n for spec in pipelines(ignore_missing_deps=ignore_missing_deps,\n root_dir=root_dir, status_manager=status_manager):\n pipeline_id = spec.pipeline_id\n if pipeline_id not in pending:\n continue\n\n unresolved = set(spec.dependencies) - completed\n if len(unresolved) == 0:\n to_yield = spec\n break\n\n unresolved = unresolved - executed - pending\n if len(unresolved) > 0:\n # No point in waiting for dependencies that will never be resolved\n to_yield = spec\n break\n\n if to_yield is not None:\n executed.add(to_yield.pipeline_id)\n pending.remove(to_yield.pipeline_id)\n completed_pipeline_id = yield(to_yield)\n if completed_pipeline_id is not None:\n completed.add(completed_pipeline_id)\n\n yield None\n"
] |
import sys
import os
import json
import concurrent
import subprocess
import threading
from queue import Queue
from collections import namedtuple
from ..utilities.execution_id import gen_execution_id
from ..specs import pipelines, PipelineSpec #noqa
from ..status import status_mgr
from ..lib.internal.sink import SINK_MAGIC
from .tasks import execute_pipeline, finalize
ExecutionResult = namedtuple('ExecutionResult',
['pipeline_id', 'success', 'stats', 'errors'])
ProgressReport = namedtuple('ProgressReport',
['pipeline_id', 'row_count', 'success', 'errors', 'stats'])
MAGIC = 'INFO :(sink): '+SINK_MAGIC
def remote_execute_pipeline(spec, root_dir, use_cache, verbose, progress_report_queue):
args = ['dpp',
'run',
'--slave',
'--use-cache' if use_cache else '--no-use-cache',
spec.pipeline_id]
popen = subprocess.Popen(args, encoding='utf8',
stderr=subprocess.PIPE,
stdout=subprocess.DEVNULL,
env=os.environ.copy(),
cwd=root_dir)
progress = 0
lines = []
for line in popen.stderr:
if len(line) == 0:
continue
if line.startswith(MAGIC):
if progress_report_queue is not None:
progress = int(line[len(MAGIC):].strip())
progress_report_queue.put(ProgressReport(spec.pipeline_id, progress, None, None, None))
continue
while len(lines) > 0:
log = lines.pop(0)
if verbose:
sys.stderr.write('[%s:%s] >>> %s' %
(spec.pipeline_id, threading.current_thread().name, log))
lines.append(line)
if len(lines) > 0:
results = lines.pop(0)
else:
if progress_report_queue is not None:
progress_report_queue.put(ProgressReport(spec.pipeline_id,
progress,
False,
['Empty'],
None
))
return (spec.pipeline_id, False, {}, ['Empty'])
try:
results = json.loads(results)
if progress_report_queue is not None:
progress_report_queue.put(ProgressReport(spec.pipeline_id,
progress,
results[0]['success'],
results[0]['errors'],
results[0]['stats']
))
except json.decoder.JSONDecodeError:
if verbose:
sys.stderr.write('[%s:%s] >>> %s' % (spec.pipeline_id, threading.current_thread().name, results))
if progress_report_queue is not None:
progress_report_queue.put(ProgressReport(spec.pipeline_id,
progress,
False,
['Crashed', results],
None
))
return (spec.pipeline_id, False, {}, [results])
results = results[0]
return (spec.pipeline_id,
results['success'],
results['stats'],
results['errors'])
def progress_report_handler(callback, queue):
while True:
report = queue.get()
if report is not None:
callback(report)
else:
return
def match_pipeline_id(arg, pipeline_id):
if arg == 'all':
return True
elif ',' in arg:
return any((match_pipeline_id(a, pipeline_id) for a in arg.split(',')))
elif arg.endswith('%'):
return pipeline_id.startswith(arg[:-1])
else:
return pipeline_id == arg
def specs_to_execute(argument, root_dir, status_manager, ignore_missing_deps, dirty, results):
pending = set()
executed = set()
completed = set()
for spec in pipelines(ignore_missing_deps=ignore_missing_deps,
root_dir=root_dir, status_manager=status_manager):
if match_pipeline_id(argument, spec.pipeline_id):
# If only dirty was requested
if dirty:
ps = status_manager.get(spec.pipeline_id)
if not ps.dirty():
continue
pending.add(spec.pipeline_id)
while len(pending) > 0:
to_yield = None
for spec in pipelines(ignore_missing_deps=ignore_missing_deps,
root_dir=root_dir, status_manager=status_manager):
pipeline_id = spec.pipeline_id
if pipeline_id not in pending:
continue
unresolved = set(spec.dependencies) - completed
if len(unresolved) == 0:
to_yield = spec
break
unresolved = unresolved - executed - pending
if len(unresolved) > 0:
# No point in waiting for dependencies that will never be resolved
to_yield = spec
break
if to_yield is not None:
executed.add(to_yield.pipeline_id)
pending.remove(to_yield.pipeline_id)
completed_pipeline_id = yield(to_yield)
if completed_pipeline_id is not None:
completed.add(completed_pipeline_id)
yield None
|
kennethreitz/records
|
records.py
|
isexception
|
python
|
def isexception(obj):
if isinstance(obj, Exception):
return True
if isclass(obj) and issubclass(obj, Exception):
return True
return False
|
Given an object, return a boolean indicating whether it is an instance
or subclass of :py:class:`Exception`.
|
train
|
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L16-L24
| null |
# -*- coding: utf-8 -*-
import os
from sys import stdout
from collections import OrderedDict
from contextlib import contextmanager
from inspect import isclass
import tablib
from docopt import docopt
from sqlalchemy import create_engine, exc, inspect, text
DATABASE_URL = os.environ.get('DATABASE_URL')
class Record(object):
"""A row, from a query, from a database."""
__slots__ = ('_keys', '_values')
def __init__(self, keys, values):
self._keys = keys
self._values = values
# Ensure that lengths match properly.
assert len(self._keys) == len(self._values)
def keys(self):
"""Returns the list of column names from the query."""
return self._keys
def values(self):
"""Returns the list of values from the query."""
return self._values
def __repr__(self):
return '<Record {}>'.format(self.export('json')[1:-1])
def __getitem__(self, key):
# Support for index-based lookup.
if isinstance(key, int):
return self.values()[key]
# Support for string-based lookup.
if key in self.keys():
i = self.keys().index(key)
if self.keys().count(key) > 1:
raise KeyError("Record contains multiple '{}' fields.".format(key))
return self.values()[i]
raise KeyError("Record contains no '{}' field.".format(key))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
standard = dir(super(Record, self))
# Merge standard attrs with generated ones (from column names).
return sorted(standard + [str(k) for k in self.keys()])
def get(self, key, default=None):
"""Returns the value for a given key, or default."""
try:
return self[key]
except KeyError:
return default
def as_dict(self, ordered=False):
"""Returns the row as a dictionary, as ordered."""
items = zip(self.keys(), self.values())
return OrderedDict(items) if ordered else dict(items)
@property
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data
def export(self, format, **kwargs):
"""Exports the row to the given format."""
return self.dataset.export(format, **kwargs)
class RecordCollection(object):
"""A set of excellent Records from a query."""
def __init__(self, rows):
self._rows = rows
self._all_rows = []
self.pending = True
def __repr__(self):
return '<RecordCollection size={} pending={}>'.format(len(self), self.pending)
def __iter__(self):
"""Iterate over all rows, consuming the underlying generator
only when necessary."""
i = 0
while True:
# Other code may have iterated between yields,
# so always check the cache.
if i < len(self):
yield self[i]
else:
# Throws StopIteration when done.
# Prevent StopIteration bubbling from generator, following https://www.python.org/dev/peps/pep-0479/
try:
yield next(self)
except StopIteration:
return
i += 1
def next(self):
return self.__next__()
def __next__(self):
try:
nextrow = next(self._rows)
self._all_rows.append(nextrow)
return nextrow
except StopIteration:
self.pending = False
raise StopIteration('RecordCollection contains no more rows.')
def __getitem__(self, key):
is_int = isinstance(key, int)
# Convert RecordCollection[1] into slice.
if is_int:
key = slice(key, key + 1)
while len(self) < key.stop or key.stop is None:
try:
next(self)
except StopIteration:
break
rows = self._all_rows[key]
if is_int:
return rows[0]
else:
return RecordCollection(iter(rows))
def __len__(self):
return len(self._all_rows)
def export(self, format, **kwargs):
"""Export the RecordCollection to a given format (courtesy of Tablib)."""
return self.dataset.export(format, **kwargs)
@property
def dataset(self):
"""A Tablib Dataset representation of the RecordCollection."""
# Create a new Tablib Dataset.
data = tablib.Dataset()
# If the RecordCollection is empty, just return the empty set
# Check number of rows by typecasting to list
if len(list(self)) == 0:
return data
# Set the column names as headers on Tablib Dataset.
first = self[0]
data.headers = first.keys()
for row in self.all():
row = _reduce_datetimes(row.values())
data.append(row)
return data
def all(self, as_dict=False, as_ordereddict=False):
"""Returns a list of all rows for the RecordCollection. If they haven't
been fetched yet, consume the iterator and cache the results."""
# By calling list it calls the __iter__ method
rows = list(self)
if as_dict:
return [r.as_dict() for r in rows]
elif as_ordereddict:
return [r.as_dict(ordered=True) for r in rows]
return rows
def as_dict(self, ordered=False):
return self.all(as_dict=not(ordered), as_ordereddict=ordered)
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it."""
# Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
if isexception(default):
raise default
return default
# Cast and return.
if as_dict:
return record.as_dict()
elif as_ordereddict:
return record.as_dict(ordered=True)
else:
return record
def one(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, ensuring that it
is the only record, or returns `default`. If `default` is an instance
or subclass of Exception, then raise it instead of returning it."""
# Ensure that we don't have more than one row.
try:
self[1]
except IndexError:
return self.first(default=default, as_dict=as_dict, as_ordereddict=as_ordereddict)
else:
raise ValueError('RecordCollection contained more than one row. '
'Expects only one row when using '
'RecordCollection.one')
def scalar(self, default=None):
"""Returns the first column of the first row, or `default`."""
row = self.one()
return row[0] if row else default
class Database(object):
"""A Database. Encapsulates a url and an SQLAlchemy engine with a pool of
connections.
"""
def __init__(self, db_url=None, **kwargs):
# If no db_url was provided, fallback to $DATABASE_URL.
self.db_url = db_url or DATABASE_URL
if not self.db_url:
raise ValueError('You must provide a db_url.')
# Create an engine.
self._engine = create_engine(self.db_url, **kwargs)
self.open = True
def close(self):
"""Closes the Database."""
self._engine.dispose()
self.open = False
def __enter__(self):
return self
def __exit__(self, exc, val, traceback):
self.close()
def __repr__(self):
return '<Database open={}>'.format(self.open)
def get_table_names(self, internal=False):
"""Returns a list of table names for the connected database."""
# Setup SQLAlchemy for Database inspection.
return inspect(self._engine).get_table_names()
def get_connection(self):
"""Get a connection to this Database. Connections are retrieved from a
pool.
"""
if not self.open:
raise exc.ResourceClosedError('Database closed.')
return Connection(self._engine.connect())
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the Database. Parameters can,
optionally, be provided. Returns a RecordCollection, which can be
iterated over to get result rows as dictionaries.
"""
with self.get_connection() as conn:
return conn.query(query, fetchall, **params)
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
with self.get_connection() as conn:
conn.bulk_query(query, *multiparams)
def query_file(self, path, fetchall=False, **params):
"""Like Database.query, but takes a filename to load a query from."""
with self.get_connection() as conn:
return conn.query_file(path, fetchall, **params)
def bulk_query_file(self, path, *multiparams):
"""Like Database.bulk_query, but takes a filename to load a query from."""
with self.get_connection() as conn:
conn.bulk_query_file(path, *multiparams)
@contextmanager
def transaction(self):
"""A context manager for executing a transaction on this Database."""
conn = self.get_connection()
tx = conn.transaction()
try:
yield conn
tx.commit()
except:
tx.rollback()
finally:
conn.close()
class Connection(object):
"""A Database connection."""
def __init__(self, connection):
self._conn = connection
self.open = not connection.closed
def close(self):
self._conn.close()
self.open = False
def __enter__(self):
return self
def __exit__(self, exc, val, traceback):
self.close()
def __repr__(self):
return '<Connection open={}>'.format(self.open)
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the connected Database.
Parameters can, optionally, be provided. Returns a RecordCollection,
which can be iterated over to get result rows as dictionaries.
"""
# Execute the given query.
cursor = self._conn.execute(text(query), **params) # TODO: PARAMS GO HERE
# Row-by-row Record generator.
row_gen = (Record(cursor.keys(), row) for row in cursor)
# Convert psycopg2 results to RecordCollection.
results = RecordCollection(row_gen)
# Fetch all results if desired.
if fetchall:
results.all()
return results
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
self._conn.execute(text(query), *multiparams)
def query_file(self, path, fetchall=False, **params):
"""Like Connection.query, but takes a filename to load a query from."""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}' not found!".format(path))
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path))
# Read the given .sql file into memory.
with open(path) as f:
query = f.read()
# Defer processing to self.query method.
return self.query(query=query, fetchall=fetchall, **params)
def bulk_query_file(self, path, *multiparams):
"""Like Connection.bulk_query, but takes a filename to load a query
from.
"""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}'' not found!".format(path))
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path))
# Read the given .sql file into memory.
with open(path) as f:
query = f.read()
self._conn.execute(text(query), *multiparams)
def transaction(self):
"""Returns a transaction object. Call ``commit`` or ``rollback``
on the returned object as appropriate."""
return self._conn.begin()
def _reduce_datetimes(row):
"""Receives a row, converts datetimes to strings."""
row = list(row)
for i in range(len(row)):
if hasattr(row[i], 'isoformat'):
row[i] = row[i].isoformat()
return tuple(row)
def cli():
supported_formats = 'csv tsv json yaml html xls xlsx dbf latex ods'.split()
formats_lst=", ".join(supported_formats)
cli_docs ="""Records: SQL for Humans™
A Kenneth Reitz project.
Usage:
records <query> [<format>] [<params>...] [--url=<url>]
records (-h | --help)
Options:
-h --help Show this screen.
--url=<url> The database URL to use. Defaults to $DATABASE_URL.
Supported Formats:
%(formats_lst)s
Note: xls, xlsx, dbf, and ods formats are binary, and should only be
used with redirected output e.g. '$ records sql xls > sql.xls'.
Query Parameters:
Query parameters can be specified in key=value format, and injected
into your query in :key format e.g.:
$ records 'select * from repos where language ~= :lang' lang=python
Notes:
- While you may specify a database connection string with --url, records
will automatically default to the value of $DATABASE_URL, if available.
- Query is intended to be the path of a SQL file, however a query string
can be provided instead. Use this feature discernfully; it's dangerous.
- Records is intended for report-style exports of database queries, and
has not yet been optimized for extremely large data dumps.
""" % dict(formats_lst=formats_lst)
# Parse the command-line arguments.
arguments = docopt(cli_docs)
query = arguments['<query>']
params = arguments['<params>']
format = arguments.get('<format>')
if format and "=" in format:
del arguments['<format>']
arguments['<params>'].append(format)
format = None
if format and format not in supported_formats:
print('%s format not supported.' % format)
print('Supported formats are %s.' % formats_lst)
exit(62)
# Can't send an empty list if params aren't expected.
try:
params = dict([i.split('=') for i in params])
except ValueError:
print('Parameters must be given in key=value format.')
exit(64)
# Be ready to fail on missing packages
try:
# Create the Database.
db = Database(arguments['--url'])
# Execute the query, if it is a found file.
if os.path.isfile(query):
rows = db.query_file(query, **params)
# Execute the query, if it appears to be a query string.
elif len(query.split()) > 2:
rows = db.query(query, **params)
# Otherwise, say the file wasn't found.
else:
print('The given query could not be found.')
exit(66)
# Print results in desired format.
if format:
content = rows.export(format)
if isinstance(content, bytes):
print_bytes(content)
else:
print(content)
else:
print(rows.dataset)
except ImportError as impexc:
print(impexc.msg)
print("Used database or format require a package, which is missing.")
print("Try to install missing packages.")
exit(60)
def print_bytes(content):
try:
stdout.buffer.write(content)
except AttributeError:
stdout.write(content)
# Run the CLI when executed directly.
if __name__ == '__main__':
cli()
|
kennethreitz/records
|
records.py
|
_reduce_datetimes
|
python
|
def _reduce_datetimes(row):
row = list(row)
for i in range(len(row)):
if hasattr(row[i], 'isoformat'):
row[i] = row[i].isoformat()
return tuple(row)
|
Receives a row, converts datetimes to strings.
|
train
|
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L424-L432
| null |
# -*- coding: utf-8 -*-
import os
from sys import stdout
from collections import OrderedDict
from contextlib import contextmanager
from inspect import isclass
import tablib
from docopt import docopt
from sqlalchemy import create_engine, exc, inspect, text
DATABASE_URL = os.environ.get('DATABASE_URL')
def isexception(obj):
"""Given an object, return a boolean indicating whether it is an instance
or subclass of :py:class:`Exception`.
"""
if isinstance(obj, Exception):
return True
if isclass(obj) and issubclass(obj, Exception):
return True
return False
class Record(object):
"""A row, from a query, from a database."""
__slots__ = ('_keys', '_values')
def __init__(self, keys, values):
self._keys = keys
self._values = values
# Ensure that lengths match properly.
assert len(self._keys) == len(self._values)
def keys(self):
"""Returns the list of column names from the query."""
return self._keys
def values(self):
"""Returns the list of values from the query."""
return self._values
def __repr__(self):
return '<Record {}>'.format(self.export('json')[1:-1])
def __getitem__(self, key):
# Support for index-based lookup.
if isinstance(key, int):
return self.values()[key]
# Support for string-based lookup.
if key in self.keys():
i = self.keys().index(key)
if self.keys().count(key) > 1:
raise KeyError("Record contains multiple '{}' fields.".format(key))
return self.values()[i]
raise KeyError("Record contains no '{}' field.".format(key))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
standard = dir(super(Record, self))
# Merge standard attrs with generated ones (from column names).
return sorted(standard + [str(k) for k in self.keys()])
def get(self, key, default=None):
"""Returns the value for a given key, or default."""
try:
return self[key]
except KeyError:
return default
def as_dict(self, ordered=False):
"""Returns the row as a dictionary, as ordered."""
items = zip(self.keys(), self.values())
return OrderedDict(items) if ordered else dict(items)
@property
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data
def export(self, format, **kwargs):
"""Exports the row to the given format."""
return self.dataset.export(format, **kwargs)
class RecordCollection(object):
"""A set of excellent Records from a query."""
def __init__(self, rows):
self._rows = rows
self._all_rows = []
self.pending = True
def __repr__(self):
return '<RecordCollection size={} pending={}>'.format(len(self), self.pending)
def __iter__(self):
"""Iterate over all rows, consuming the underlying generator
only when necessary."""
i = 0
while True:
# Other code may have iterated between yields,
# so always check the cache.
if i < len(self):
yield self[i]
else:
# Throws StopIteration when done.
# Prevent StopIteration bubbling from generator, following https://www.python.org/dev/peps/pep-0479/
try:
yield next(self)
except StopIteration:
return
i += 1
def next(self):
return self.__next__()
def __next__(self):
try:
nextrow = next(self._rows)
self._all_rows.append(nextrow)
return nextrow
except StopIteration:
self.pending = False
raise StopIteration('RecordCollection contains no more rows.')
def __getitem__(self, key):
is_int = isinstance(key, int)
# Convert RecordCollection[1] into slice.
if is_int:
key = slice(key, key + 1)
while len(self) < key.stop or key.stop is None:
try:
next(self)
except StopIteration:
break
rows = self._all_rows[key]
if is_int:
return rows[0]
else:
return RecordCollection(iter(rows))
def __len__(self):
return len(self._all_rows)
def export(self, format, **kwargs):
"""Export the RecordCollection to a given format (courtesy of Tablib)."""
return self.dataset.export(format, **kwargs)
@property
def dataset(self):
"""A Tablib Dataset representation of the RecordCollection."""
# Create a new Tablib Dataset.
data = tablib.Dataset()
# If the RecordCollection is empty, just return the empty set
# Check number of rows by typecasting to list
if len(list(self)) == 0:
return data
# Set the column names as headers on Tablib Dataset.
first = self[0]
data.headers = first.keys()
for row in self.all():
row = _reduce_datetimes(row.values())
data.append(row)
return data
def all(self, as_dict=False, as_ordereddict=False):
"""Returns a list of all rows for the RecordCollection. If they haven't
been fetched yet, consume the iterator and cache the results."""
# By calling list it calls the __iter__ method
rows = list(self)
if as_dict:
return [r.as_dict() for r in rows]
elif as_ordereddict:
return [r.as_dict(ordered=True) for r in rows]
return rows
def as_dict(self, ordered=False):
return self.all(as_dict=not(ordered), as_ordereddict=ordered)
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it."""
# Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
if isexception(default):
raise default
return default
# Cast and return.
if as_dict:
return record.as_dict()
elif as_ordereddict:
return record.as_dict(ordered=True)
else:
return record
def one(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, ensuring that it
is the only record, or returns `default`. If `default` is an instance
or subclass of Exception, then raise it instead of returning it."""
# Ensure that we don't have more than one row.
try:
self[1]
except IndexError:
return self.first(default=default, as_dict=as_dict, as_ordereddict=as_ordereddict)
else:
raise ValueError('RecordCollection contained more than one row. '
'Expects only one row when using '
'RecordCollection.one')
def scalar(self, default=None):
"""Returns the first column of the first row, or `default`."""
row = self.one()
return row[0] if row else default
class Database(object):
"""A Database. Encapsulates a url and an SQLAlchemy engine with a pool of
connections.
"""
def __init__(self, db_url=None, **kwargs):
# If no db_url was provided, fallback to $DATABASE_URL.
self.db_url = db_url or DATABASE_URL
if not self.db_url:
raise ValueError('You must provide a db_url.')
# Create an engine.
self._engine = create_engine(self.db_url, **kwargs)
self.open = True
def close(self):
"""Closes the Database."""
self._engine.dispose()
self.open = False
def __enter__(self):
return self
def __exit__(self, exc, val, traceback):
self.close()
def __repr__(self):
return '<Database open={}>'.format(self.open)
def get_table_names(self, internal=False):
"""Returns a list of table names for the connected database."""
# Setup SQLAlchemy for Database inspection.
return inspect(self._engine).get_table_names()
def get_connection(self):
"""Get a connection to this Database. Connections are retrieved from a
pool.
"""
if not self.open:
raise exc.ResourceClosedError('Database closed.')
return Connection(self._engine.connect())
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the Database. Parameters can,
optionally, be provided. Returns a RecordCollection, which can be
iterated over to get result rows as dictionaries.
"""
with self.get_connection() as conn:
return conn.query(query, fetchall, **params)
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
with self.get_connection() as conn:
conn.bulk_query(query, *multiparams)
def query_file(self, path, fetchall=False, **params):
"""Like Database.query, but takes a filename to load a query from."""
with self.get_connection() as conn:
return conn.query_file(path, fetchall, **params)
def bulk_query_file(self, path, *multiparams):
"""Like Database.bulk_query, but takes a filename to load a query from."""
with self.get_connection() as conn:
conn.bulk_query_file(path, *multiparams)
@contextmanager
def transaction(self):
"""A context manager for executing a transaction on this Database."""
conn = self.get_connection()
tx = conn.transaction()
try:
yield conn
tx.commit()
except:
tx.rollback()
finally:
conn.close()
class Connection(object):
"""A Database connection."""
def __init__(self, connection):
self._conn = connection
self.open = not connection.closed
def close(self):
self._conn.close()
self.open = False
def __enter__(self):
return self
def __exit__(self, exc, val, traceback):
self.close()
def __repr__(self):
return '<Connection open={}>'.format(self.open)
def query(self, query, fetchall=False, **params):
"""Executes the given SQL query against the connected Database.
Parameters can, optionally, be provided. Returns a RecordCollection,
which can be iterated over to get result rows as dictionaries.
"""
# Execute the given query.
cursor = self._conn.execute(text(query), **params) # TODO: PARAMS GO HERE
# Row-by-row Record generator.
row_gen = (Record(cursor.keys(), row) for row in cursor)
# Convert psycopg2 results to RecordCollection.
results = RecordCollection(row_gen)
# Fetch all results if desired.
if fetchall:
results.all()
return results
def bulk_query(self, query, *multiparams):
"""Bulk insert or update."""
self._conn.execute(text(query), *multiparams)
def query_file(self, path, fetchall=False, **params):
"""Like Connection.query, but takes a filename to load a query from."""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}' not found!".format(path))
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path))
# Read the given .sql file into memory.
with open(path) as f:
query = f.read()
# Defer processing to self.query method.
return self.query(query=query, fetchall=fetchall, **params)
def bulk_query_file(self, path, *multiparams):
"""Like Connection.bulk_query, but takes a filename to load a query
from.
"""
# If path doesn't exists
if not os.path.exists(path):
raise IOError("File '{}'' not found!".format(path))
# If it's a directory
if os.path.isdir(path):
raise IOError("'{}' is a directory!".format(path))
# Read the given .sql file into memory.
with open(path) as f:
query = f.read()
self._conn.execute(text(query), *multiparams)
def transaction(self):
"""Returns a transaction object. Call ``commit`` or ``rollback``
on the returned object as appropriate."""
return self._conn.begin()
def cli():
supported_formats = 'csv tsv json yaml html xls xlsx dbf latex ods'.split()
formats_lst=", ".join(supported_formats)
cli_docs ="""Records: SQL for Humans™
A Kenneth Reitz project.
Usage:
records <query> [<format>] [<params>...] [--url=<url>]
records (-h | --help)
Options:
-h --help Show this screen.
--url=<url> The database URL to use. Defaults to $DATABASE_URL.
Supported Formats:
%(formats_lst)s
Note: xls, xlsx, dbf, and ods formats are binary, and should only be
used with redirected output e.g. '$ records sql xls > sql.xls'.
Query Parameters:
Query parameters can be specified in key=value format, and injected
into your query in :key format e.g.:
$ records 'select * from repos where language ~= :lang' lang=python
Notes:
- While you may specify a database connection string with --url, records
will automatically default to the value of $DATABASE_URL, if available.
- Query is intended to be the path of a SQL file, however a query string
can be provided instead. Use this feature discernfully; it's dangerous.
- Records is intended for report-style exports of database queries, and
has not yet been optimized for extremely large data dumps.
""" % dict(formats_lst=formats_lst)
# Parse the command-line arguments.
arguments = docopt(cli_docs)
query = arguments['<query>']
params = arguments['<params>']
format = arguments.get('<format>')
if format and "=" in format:
del arguments['<format>']
arguments['<params>'].append(format)
format = None
if format and format not in supported_formats:
print('%s format not supported.' % format)
print('Supported formats are %s.' % formats_lst)
exit(62)
# Can't send an empty list if params aren't expected.
try:
params = dict([i.split('=') for i in params])
except ValueError:
print('Parameters must be given in key=value format.')
exit(64)
# Be ready to fail on missing packages
try:
# Create the Database.
db = Database(arguments['--url'])
# Execute the query, if it is a found file.
if os.path.isfile(query):
rows = db.query_file(query, **params)
# Execute the query, if it appears to be a query string.
elif len(query.split()) > 2:
rows = db.query(query, **params)
# Otherwise, say the file wasn't found.
else:
print('The given query could not be found.')
exit(66)
# Print results in desired format.
if format:
content = rows.export(format)
if isinstance(content, bytes):
print_bytes(content)
else:
print(content)
else:
print(rows.dataset)
except ImportError as impexc:
print(impexc.msg)
print("Used database or format require a package, which is missing.")
print("Try to install missing packages.")
exit(60)
def print_bytes(content):
try:
stdout.buffer.write(content)
except AttributeError:
stdout.write(content)
# Run the CLI when executed directly.
if __name__ == '__main__':
cli()
|
kennethreitz/records
|
records.py
|
Record.as_dict
|
python
|
def as_dict(self, ordered=False):
items = zip(self.keys(), self.values())
return OrderedDict(items) if ordered else dict(items)
|
Returns the row as a dictionary, as ordered.
|
train
|
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L81-L85
|
[
"def keys(self):\n \"\"\"Returns the list of column names from the query.\"\"\"\n return self._keys\n",
"def values(self):\n \"\"\"Returns the list of values from the query.\"\"\"\n return self._values\n"
] |
class Record(object):
"""A row, from a query, from a database."""
__slots__ = ('_keys', '_values')
def __init__(self, keys, values):
self._keys = keys
self._values = values
# Ensure that lengths match properly.
assert len(self._keys) == len(self._values)
def keys(self):
"""Returns the list of column names from the query."""
return self._keys
def values(self):
"""Returns the list of values from the query."""
return self._values
def __repr__(self):
return '<Record {}>'.format(self.export('json')[1:-1])
def __getitem__(self, key):
# Support for index-based lookup.
if isinstance(key, int):
return self.values()[key]
# Support for string-based lookup.
if key in self.keys():
i = self.keys().index(key)
if self.keys().count(key) > 1:
raise KeyError("Record contains multiple '{}' fields.".format(key))
return self.values()[i]
raise KeyError("Record contains no '{}' field.".format(key))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
standard = dir(super(Record, self))
# Merge standard attrs with generated ones (from column names).
return sorted(standard + [str(k) for k in self.keys()])
def get(self, key, default=None):
"""Returns the value for a given key, or default."""
try:
return self[key]
except KeyError:
return default
@property
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data
def export(self, format, **kwargs):
"""Exports the row to the given format."""
return self.dataset.export(format, **kwargs)
|
kennethreitz/records
|
records.py
|
Record.dataset
|
python
|
def dataset(self):
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data
|
A Tablib Dataset containing the row.
|
train
|
https://github.com/kennethreitz/records/blob/ecd857266c5e7830d657cbe0196816314790563b/records.py#L88-L96
|
[
"def _reduce_datetimes(row):\n \"\"\"Receives a row, converts datetimes to strings.\"\"\"\n\n row = list(row)\n\n for i in range(len(row)):\n if hasattr(row[i], 'isoformat'):\n row[i] = row[i].isoformat()\n return tuple(row)\n",
"def keys(self):\n \"\"\"Returns the list of column names from the query.\"\"\"\n return self._keys\n",
"def values(self):\n \"\"\"Returns the list of values from the query.\"\"\"\n return self._values\n"
] |
class Record(object):
"""A row, from a query, from a database."""
__slots__ = ('_keys', '_values')
def __init__(self, keys, values):
self._keys = keys
self._values = values
# Ensure that lengths match properly.
assert len(self._keys) == len(self._values)
def keys(self):
"""Returns the list of column names from the query."""
return self._keys
def values(self):
"""Returns the list of values from the query."""
return self._values
def __repr__(self):
return '<Record {}>'.format(self.export('json')[1:-1])
def __getitem__(self, key):
# Support for index-based lookup.
if isinstance(key, int):
return self.values()[key]
# Support for string-based lookup.
if key in self.keys():
i = self.keys().index(key)
if self.keys().count(key) > 1:
raise KeyError("Record contains multiple '{}' fields.".format(key))
return self.values()[i]
raise KeyError("Record contains no '{}' field.".format(key))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
standard = dir(super(Record, self))
# Merge standard attrs with generated ones (from column names).
return sorted(standard + [str(k) for k in self.keys()])
def get(self, key, default=None):
"""Returns the value for a given key, or default."""
try:
return self[key]
except KeyError:
return default
def as_dict(self, ordered=False):
"""Returns the row as a dictionary, as ordered."""
items = zip(self.keys(), self.values())
return OrderedDict(items) if ordered else dict(items)
@property
def export(self, format, **kwargs):
"""Exports the row to the given format."""
return self.dataset.export(format, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.