repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232.send_commands | python | def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses | Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]] | train | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L529-L631 | [
"def command_error(self, response):\n \"\"\" Checks whether a command produced an error.\n\n Checks whether a command procuded an error based on its\n processed response. The two types of errors are an error\n returned by the drive and the command that the drive received\n being different than the one that was sent (error in\n transmission).\n\n Parameters\n ----------\n response : processed response (list)\n The processed response ``list`` for the command that was\n executed.\n\n Returns\n -------\n error : bool\n ``True`` if there was an error and ``False`` otherwise.\n\n \"\"\"\n # The command should be echoed back accurately (might be\n # preceeded by a '- ' if it is part of a program definition) and\n # no errors should be returned, if it has no errors.\n return (response[2] not in [response[0], '- ' + response[0]]\n or response[3] is not None)\n",
"def send_command(self, command, immediate=False, timeout=1.0,\n max_retries=0, eor=('\\n', '\\n- ')):\n \"\"\" Sends a single command to the drive and returns output.\n\n Takes a single given `command`, sanitizes it, sends it to the\n drive, reads the response, and returns the processed response.\n The command is first sanitized by removing comments, extra\n whitespace, and newline characters. If `immediate` is set, the\n command is made to be an immediate command. Note, the command is\n **NOT** checked for validity. If the drive returns an error, the\n command is re-executed up to `max_tries` more times. The\n response from the final execution is processed and returned. The\n response from the drive is broken down into the echoed command\n (drive echoes it back), any error returned by the drive (leading\n '*' is stripped), and the different lines of the response; which\n are all returned.\n\n Parameters\n ----------\n command : str\n The command to send to the Gemini drive.\n immediate : bool, optional\n Whether to make it so the command is executed immediately or\n not.\n timeout : float or None, optional\n Optional timeout in seconds to use when reading the\n response. A negative value or ``None`` indicates that the\n an infinite timeout should be used.\n max_retries : int, optional\n Maximum number of retries to do per command in the case of\n errors.\n eor : str or iterable of str, optional\n ``str`` or an iterable of ``str`` that denote the allowed\n End Of Response. For most commands, it should be\n ``('\\\\n', '\\\\n- ')``, but for running a program, it should\n be ``'*END\\\\n'``. The default is ``('\\\\n', '\\\\n- ')``.\n\n Returns\n -------\n output : list\n A 5-element ``list``. The elements, in order, are the\n sanitized command (``str``), the full response (``str``),\n the echoed command (``str``), any error response (``None``\n if none, or the ``str`` of the error), and the lines of the\n response that are not the echo or error line (``list`` of\n ``str`` with newlines stripped).\n\n See Also\n --------\n send_commands : Send multiple commands.\n\n Examples\n --------\n\n Simple command energizing the motor with no response and no\n errors.\n\n >>> from GeminiMotorDrive.drivers import ASCII_RS232\n >>> ar = ASCII_RS232('/dev/ttyS1')\n >>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)\n ['DRIVE1', 'DRIVE1\\\\r\\\\r\\\\n', 'DRIVE1', None, []]\n\n Same command but made immediate.\n\n >>> from GeminiMotorDrive.drivers import ASCII_RS232\n >>> ar = ASCII_RS232('/dev/ttyS1')\n >>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)\n ['!DRIVE1', '!DRIVE1\\\\r\\\\r\\\\n', '!DRIVE1', None, []]\n\n Same command with a typo.\n\n >>> from GeminiMotorDrive.drivers import ASCII_RS232\n >>> ar = ASCII_RS232('/dev/ttyS1')\n >>> ar.send_command('DRIV1', immediate=False, timeout=1.0)\n ['DRIV1', 'DRIV1\\\\r*UNDEFINED_LABEL\\\\r\\\\r\\\\n', 'DRIV1',\n 'UNDEFINED_LABEL', []]\n\n Simple command asking whether the motor is energized or not.\n\n >>> from GeminiMotorDrive.drivers import ASCII_RS232\n >>> ar = ASCII_RS232('/dev/ttyS1')\n >>> ar.send_command('DRIVE', immediate=False, timeout=1.0)\n ['DRIVE', 'DRIVE\\\\r*DRIVE1\\\\r\\\\r\\\\n', 'DRIVE', None,\n ['*DRIVE1']]\n\n \"\"\"\n # Execute the command till it either doesn't have an error or\n # the maximum number of retries is exceeded.\n for i in range(0, max_retries+1):\n # Send the command and stuff the sanitized version in a\n # list. Then process the response and add it to the list.\n response = [self._send_command(command,\n immediate=immediate)]\n output = self._get_response(timeout=timeout, eor=eor)\n # If echo checking was done, the echo was already grabbed,\n # is identical to the command, and needs to be placed back\n # in front of the output so that it can be processed\n # properly.\n if self._check_echo:\n output = response[0] + output\n response.extend(self._process_response(output))\n # We are done if there is no error.\n if not self.command_error(response):\n break\n # Put in a slight pause so the drive has a bit of breathing\n # time between retries.\n time.sleep(0.25)\n return response\n"
] | class ASCII_RS232(object):
""" ASCII RS232 comm. driver for a Parker Motion Gemini drive.
Communications driver to talk to a Parker Motion Gemini drive in
ASCII mode over RS232.
Parameters
----------
port : serial port string
The serial port (RS232) that the Gemini drive is connected to.
check_echo : bool, optional
Whether the echoing of the commands as they are being written
to the drive should be used to correct mistakes in what the
drive is seeing or not as the default.
writeTimout : float, optional
The write timeout for the RS232 port. See ``serial.Serial``.
interCharTimeout : float or None, optional
The inter-character timeout for writing on the RS232 port.
``None`` disables. See ``serial.Serial``.
Raises
------
serial.SerialException
If `port` does not correspond to an available RS232 port or
can't be opened.
Notes
-----
The ASCII communications settings of the Gemini drive are changed
while this object is connected and are returned to the default
values when this object is deleted. Thus, the values of the
communications settings before this object is created are lost.
See Also
--------
serial.Serial
"""
def __init__(self, port, check_echo=True, writeTimeout=1.0,
interCharTimeout=0.002):
# Set private variable holding the echo parameters.
self._check_echo = check_echo
# Initialize the serial port to connect to the Gemini drive. The
# only timeout being explicitly set right now is the write
# timeout. Read timeouts are handled in a more manual fasion.
self._ser = serial.Serial(port, baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
writeTimeout=writeTimeout,
interCharTimeout=interCharTimeout,
xonxoff=True, rtscts=False,
dsrdtr=False)
# It is convenient to have a text wrapper around the serial
# port for reading and writing.
self._sio = io.TextIOWrapper(io.BufferedRWPair(self._ser,
self._ser, 1), newline='\n',
encoding='ASCII')
# Change the communications parameters so that commands are
# echoed, on error level 4, no characters are used to preceed
# each response, carriage returns are used for newlines in
# responses, responses are terminated by a '\n', and there are
# no prompts (there are separate prompts depending on whether
# the previous command had an error or not). The echo command is
# the one command that echo checking cannot be done on since
# echo may not be enabled yet.
self._send_command('ECHO1', check_echo=False, immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT10,0,0', immediate=True)
self._send_command('EOL13,0,0', immediate=True)
self._send_command('ERRBAD0,0,0,0', immediate=True)
self._send_command('ERROK0,0,0,0', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def __del__(self):
""" Returns all communications settings to their defaults.
"""
# Return all communicatsions parameters to their default values
# (from the manual).
self._send_command('ECHO1', immediate=True)
self._send_command('ERRLVL4', immediate=True)
self._send_command('BOT0,0,0', immediate=True)
self._send_command('EOT13,0,0', immediate=True)
self._send_command('EOL13,10,0', immediate=True)
self._send_command('ERRBAD13,10,63,32', immediate=True)
self._send_command('ERROK13,10,62,32', immediate=True)
# Wait a little while for the commands to be processed and then
# discard all the responses.
time.sleep(2)
self._ser.read(self._ser.inWaiting())
def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c
def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf
def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines]
def command_error(self, response):
""" Checks whether a command produced an error.
Checks whether a command procuded an error based on its
processed response. The two types of errors are an error
returned by the drive and the command that the drive received
being different than the one that was sent (error in
transmission).
Parameters
----------
response : processed response (list)
The processed response ``list`` for the command that was
executed.
Returns
-------
error : bool
``True`` if there was an error and ``False`` otherwise.
"""
# The command should be echoed back accurately (might be
# preceeded by a '- ' if it is part of a program definition) and
# no errors should be returned, if it has no errors.
return (response[2] not in [response[0], '- ' + response[0]]
or response[3] is not None)
def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | query_yes_no | python | def query_yes_no(question, default="yes"):
res = _query_yes_no(question, default)
if res == "yes":
return True
else:
return False | Just prompt the user for a yes/no question | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L1324-L1330 | [
"def _query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n \"\"\"\n valid = {\"yes\": \"yes\", \"y\": \"yes\", \"ye\": \"yes\",\n \"no\": \"no\", \"n\": \"no\"}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n try:\n choice = raw_input().lower()\n except KeyboardInterrupt:\n print\n return \"no\"\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n"
] | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
# This file is part of argtoolbox.
#
# argtoolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# argtoolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LinShare user cli. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 Frédéric MARTIN
#
# Contributors list:
#
# Frédéric MARTIN frederic.martin.fma@gmail.com
#
from __future__ import unicode_literals
import os
import sys
import logging
import base64
import copy
from ordereddict import OrderedDict
import ConfigParser
import argparse
import types
import locale
# -----------------------------------------------------------------------------
# global logger variable
#log = logging.getLogger('argtoolbox')
#log.setLevel(logging.INFO)
#log.setLevel(logging.DEBUG)
# logger formats
DEFAULT_LOGGING_FORMAT = logging.Formatter(
"%(asctime)s %(levelname)-8s: %(message)s", "%H:%M:%S")
DEBUG_LOGGING_FORMAT = logging.Formatter(
"%(asctime)s %(levelname)-8s %(module)s:%(name)s:%(funcName)s:%(lineno)d:%(message)s",
"%H:%M:%S")
# logger handlers
# pylint: disable-msg=C0103
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(DEFAULT_LOGGING_FORMAT)
# debug mode
# if you need debug during class construction, file config loading, ...,
# you need to modify the logger level here.
#log.addHandler(streamHandler)
#log.setLevel(logging.DEBUG)
#streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
# -----------------------------------------------------------------------------
class DefaultHook(object):
"""
This class does nothing. This is the default class for creating your own
hook. After reading an option from the config file, you can apply a
postprocessing, like the base64 decoding or every thing you want.
"""
def __init__(self):
pass
def __call__(self, elt):
pass
# -----------------------------------------------------------------------------
class Base64ElementHook(DefaultHook):
"""This hook is used as a post reading processing in order to convert
base64 data stored into the config file into plain text data."""
def __init__(self, warning=False):
super(Base64ElementHook, self).__init__()
self.warning = warning
def __call__(self, elt):
if elt.value:
try:
data = base64.b64decode(elt.value)
elt.value = data
except TypeError as ex:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self.warning:
log.warn("current field '%(name)s' is not \
stored in the configuration file with \
base64 encoding",
{"name": getattr(elt, "_name")})
else:
log.error("current field '%(name)s' is not stored in the \
configuration file with base64 encoding", {"name":
getattr(elt, "_name")})
raise ex
# -----------------------------------------------------------------------------
class SectionHook(object):
"""
The SectionHook class is used during the configuration reloading
process. After the parsing of CLI arguments, all section hooks are
applied.
This hook lets you modify the attribute called "self.section".
The hook is looking for a CLI argument in "args" using "opt_name" as the
argument name. Then, the value is stored into section.attribute.
"""
def __init__(self, section, attribute, opt_name):
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("First argument should be a subclass of _Section.")
self.section = section
if not isinstance(attribute, types.UnicodeType):
raise TypeError("Second argument should be a string, "
+ "attribute name.")
self.attribute = attribute
if not isinstance(opt_name, types.UnicodeType):
raise TypeError("Third argument should be a string, option name.")
self.opt_name = opt_name
def __call__(self, args):
# looking for a specific opt_name in command line args
value = getattr(args, self.opt_name)
# if defined, we set this value to a attribute of input Section.
if value is not None:
setattr(self.section, self.attribute, value)
# -----------------------------------------------------------------------------
class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
# -----------------------------------------------------------------------------
class _AbstractSection(object):
"""This class is the parent class of all Section classes. You can not use
it, you must implement abstract methods.
"""
def __init__(self, desc=None, prefix=None,
suffix=None, required=False):
self._name = None
self._desc = desc
self._prefix = prefix
self._suffix = suffix
self._required = required
def get_key_name(self):
"""This method return the name of the section, it Should be unique
because it is used as a key or identifier."""
return self._name
def get_section_name(self):
"""This method build the current section name that the program will
looking for into the configuration file.
The format is [<prefix>-]<name>[-<suffix>].
"""
a = []
if self._prefix:
a.append(self._prefix)
a.append(str(self._name))
if self._suffix:
a.append(self._suffix)
return "-".join(a)
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def load(self, file_parser):
""" This method must be implemented by the subclass. This method should
read and load all section elements.
"""
raise NotImplementedError("You must implement this method.")
def get_representation(self, prefix="", suffix="\n"):
"""return the string representation of the current object."""
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res
def __str__(self):
return "".join(self.get_representation())
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if comments:
f.write("#####################################\n")
f.write("# Section : ")
f.write("#".join(self.get_representation()) + "\n")
f.write("#####################################\n")
f.write("[" + self._name + "]\n")
if self._desc and comments:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
# -----------------------------------------------------------------------------
class _Section(_AbstractSection):
"""Simple abstract section object, container for Elements"""
def __init__(self, *args, **kwargs):
super(_Section, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt
def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs))
def count(self):
"""This method will return the number of Element in the current
Section"""
return len(self.elements)
def reset(self):
for e in self.elements.values():
e.reset()
def load(self, file_parser):
section = self.get_section_name()
try:
for e in self.elements.values():
e.load(file_parser, section)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def __getattr__(self, name):
e = self.elements.get(name)
if e is not None:
return e
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def get_element(self, name):
return self.elements.get(name)
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if len(self.elements) < 1:
return
super(_Section, self).write_config_file(f, comments)
for e in self.elements.values():
e.write_config_file(f, comments)
f.write("\n")
# -----------------------------------------------------------------------------
class SimpleSection(_Section):
"""A simple section object. This class is used to declare a section that
should be into the configuration file.
Then you need to add Elements to this section.
mysection = SimpleSection("section_1")
"""
def __init__(self, name, *args, **kwargs):
super(SimpleSection, self).__init__(*args, **kwargs)
self._name = name
def get_representation(self, prefix="", suffix="\n"):
res = []
if self.count() > 0:
res.append(prefix + "Section "
+ self.get_section_name().upper() + suffix)
for elt in self.elements.values():
res.append("".join(elt.get_representation(prefix)))
return res
# -----------------------------------------------------------------------------
class SubSection(_Section):
""" TODO """
def get_representation(self, prefix="", suffix="\n"):
res = []
if self.count() > 0:
res.append(prefix + "SubSection : "
+ self.get_section_name().upper() + suffix)
for elt in self.elements.values():
res.append("".join(elt.get_representation(prefix)))
return res
def __copy__(self):
newone = type(self)()
newone.__dict__.update(self.__dict__)
self.elements = OrderedDict()
return newone
# pylint: disable-msg=W0613
def __deepcopy__(self, *args):
newone = type(self)()
newone.__dict__.update(self.__dict__)
newone.elements = OrderedDict()
for e in self.elements.values():
newone.add_element(copy.deepcopy(e))
return newone
# -----------------------------------------------------------------------------
class ListSection(_AbstractSection):
""" TODO """
def __init__(self, name, *args, **kwargs):
super(ListSection, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
self._name = name
def load(self, file_parser):
section = self.get_section_name()
try:
# TODO : ? : data = data.decode(locale.getpreferredencoding())
for key in [item for item in file_parser.options(section)
if item not in file_parser.defaults().keys()]:
self.elements[key] = file_parser.get(section, key)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def reset(self):
for e in self.elements.values():
e.reset()
def get_representation(self, prefix="", suffix="\n"):
res = []
res.append(prefix + "Section " + self._name + suffix)
for key, val in self.elements.items():
a = []
a.append(prefix)
a.append(" - " + str(key) + " : " + str(val))
a.append(suffix)
res.append("".join(a))
return res
# -----------------------------------------------------------------------------
# warning| [R0902, Element] Too many instance attributes (13/7)
# pylint: disable-msg=R0902
class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
# -----------------------------------------------------------------------------
class ElementWithSubSections(Element):
""" This class extends the default class Element. It offers you the power
to add sections (SubSection) inside a element.
The simple case is one section containing some elements.
But in some situation you may represent your data like a tree.
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithSubSections
SubSection
Element1
Element2
"""
def __init__(self, *args, **kwargs):
super(ElementWithSubSections, self).__init__(*args, **kwargs)
self.e_type = str
self.sections = OrderedDict()
def get_representation(self, prefix="", suffix="\n"):
res = ['\n']
temp_line = prefix + " - " + str(self._name) + " : "
if self.hidden:
temp_line += "xxxxxxxx" + suffix
else:
temp_line += str(self.value) + suffix
res.append(temp_line)
if len(self.sections) > 0:
for elt in self.sections.values():
res.append("".join(elt.get_representation(prefix + "\t")))
return res
def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(section.__class__))
self.sections[section.name] = section
return section
def load(self, file_parser, section_name):
self._load(file_parser, section_name)
if len(self.sections) > 0:
for sec in self.sections.values():
sec.name = self.value
sec.load(file_parser)
self.post_load()
# -----------------------------------------------------------------------------
class ElementWithRelativeSubSection(ElementWithSubSections):
""" This class extends the default class Element. It offers you the power
to add sections (SubSection) inside a element.
The simple case is one section containing some elements.
But in some situation you may represent your data like a tree and, you want
to search a section dynamically.
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithRelativeSubSection :
name = list_of_section_name, value = sec_test1 sec_test2 sec_test3
This class will look for sections named sec_test1, sec_test2 and sec_test3.
The structure used to load the previous sections is second constructor
argument name "rss". It is a SubSection that will be used as a template.
ex :
SubSection :
- Element4
- Element5
- Element5
This class could be used to store, inside a element, a list of complex
structures.
At the end, when ElementWithRelativeSubSection will be loaded,
you should have the following struture :
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithRelativeSubSection :
SubSection : sec_test1
- Element4
- Element5
- Element5
SubSection : sec_test2
- Element4
- Element5
- Element5
SubSection : sec_test3
- Element4
- Element5
- Element5
"""
def __init__(self, name, rss, **kwargs):
super(ElementWithRelativeSubSection, self).__init__(name, **kwargs)
self.e_type = list
if not issubclass(rss.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(_Section.__class__))
self.rss = rss
def load(self, file_parser, section_name):
self._load(file_parser, section_name)
if isinstance(self.value, list):
for sec_name in self.value:
try:
sec = copy.deepcopy(self.rss, None)
setattr(sec, '_name', sec_name)
self.sections[sec_name] = sec
sec.load(file_parser)
except ValueError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
error = []
error.append("Missing relative section, attribute : ")
error.append("'[" + section_name + "]." + self._name)
error.append("', value : " + str(self.value))
log.error("".join(error))
raise ValueError(e)
self.post_load()
def get_representation(self, prefix="", suffix="\n"):
res = ['\n']
res.append('#')
temp_line = prefix + " - " + str(self._name) + " : "
if self.hidden:
temp_line += "xxxxxxxx" + suffix
else:
temp_line += str(self.value) + suffix
res.append(temp_line)
res.append("".join(self.rss.get_representation(prefix + "#\t")))
res.append('\n')
if len(self.sections) > 0:
for elt in self.sections.values():
res.append('\n')
res.append("".join(elt.get_representation(prefix + "\t")))
res.append('\n')
return res
# -----------------------------------------------------------------------------
class DefaultCommand(object):
"""This class do nothing, this is just the default structure to implement
your own class. Use this class as the base for every the command line
action you want to create."""
def __init__(self, config=None):
self.log = logging.getLogger(
'argtoolbox.' + str(self.__class__.__name__.lower()))
self.config = config
self.protected_args = ['password']
def __call__(self, args):
dict_tmp = copy.copy(args)
#delattr(dict_tmp, "__func__")
for field in getattr(self, 'protected_args', []):
if hasattr(dict_tmp, field):
setattr(dict_tmp, field, "xxxxxxxx")
self.log.debug("Namespace : begin :")
for i in dict_tmp.__dict__:
attribute = getattr(dict_tmp, i)
if isinstance(attribute, types.UnicodeType):
self.log.debug(i + " : " + attribute + " : <type 'unicode'>")
else:
self.log.debug(i + " : " + str(attribute) + " : " + str(type(attribute)))
self.log.debug("Namespace : end.")
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def complete(self, args, prefix):
"""Auto complete method, args is comming from argparse and prefix is
the input data from command line.
You must return a list."""
return []
# -----------------------------------------------------------------------------
class TestCommand(DefaultCommand):
"""Just a simple command, using the default command class."""
def __call__(self, args):
super(TestCommand, self).__call__(args)
print ""
print "This is the beginning of the TestCommand class."
print ""
print "The loaded configuration : "
print "---------------------------"
print unicode(self.config)
print ""
print "The command line arguments (argv) : "
print "------------------------------------"
print args
print ""
print "This is the end of the TestCommand class."
print ""
# -----------------------------------------------------------------------------
class DefaultCompleter(object):
""" TODO
"""
def __init__(self, func_name="complete"):
self.func_name = func_name
def __call__(self, prefix, **kwargs):
from argcomplete import debug
try:
debug("\n------------ DefaultCompleter -----------------")
debug("Kwargs content :")
for i, j in kwargs.items():
debug("key : " + str(i))
debug("\t - " + str(j))
debug("\n------------ DefaultCompleter -----------------\n")
args = kwargs.get('parsed_args')
# pylint: disable-msg=W0612
parser = kwargs.get('parser')
#a = parser.parse_known_args()
# getting form args the current Command and looking for a method
# called by default 'complete'. See __init__ method. The method
# name is store in the class member called self.func_name
fn = getattr(args.__func__, self.func_name, None)
if fn:
return fn(args, prefix)
# pylint: disable-msg=W0703
except Exception as e:
from argcomplete import warn
warn("\nERROR::COMPLETE:An exception was caught :" + str(e) + "\n")
import traceback
traceback.print_exc()
debug("\n------\n")
return ["comlete-error"]
# -----------------------------------------------------------------------------
class DefaultProgram(object):
""" TODO """
def __init__(self, parser, config=None, force_debug=False):
self.parser = parser
self.config = config
self.force_debug = force_debug
def __call__(self):
def patch(self, parser, namespace, values, option_string=None):
"""patch original __call__ method for argparse 1.1 (fix)"""
from argparse import _UNRECOGNIZED_ARGS_ATTR
from argparse import SUPPRESS
from argparse import _
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings,
namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# integration with argcomplete python module (bash completion)
try:
# Very ugly dirty hook/fix
if argparse.__version__ == "1.1":
# pylint: disable=protected-access
argparse._SubParsersAction.__call__ = patch
import argcomplete
argcomplete.autocomplete(self.parser)
except ImportError:
pass
# parse cli arguments
args = self.parse_args()
if getattr(args, 'debug', False) or self.force_debug:
llog = logging.getLogger()
llog.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.config:
print "debug>>>----------- config -------------------"
print unicode(self.config)
print "debug----------- processing --------------<<<<"
# run command
return args.__func__(args)
else:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
from argparse import ArgumentError
try:
# run command
return args.__func__(args)
except ValueError as a:
log.error("ValueError : " + str(a))
except KeyboardInterrupt as a:
log.warn("Keyboard interruption detected.")
except ArgumentError as a:
self.parser.error(a.message)
except Exception as a:
log.error("unexcepted error : " + str(a))
return False
def parse_args(self):
args = self.parser.parse_args()
for attr in args.__dict__:
data = getattr(args, attr)
if isinstance(data, str):
data = data.decode(locale.getpreferredencoding())
setattr(args, attr, data)
if isinstance(data, list):
res = []
for val in data:
if isinstance(val, str):
res.append(val.decode(locale.getpreferredencoding()))
else:
res.append(val)
setattr(args, attr, res)
return args
# -----------------------------------------------------------------------------
class BasicProgram(object):
""" TODO """
def __init__(self, name, config_file=None, desc=None,
mandatory=False, use_config_file=True, version="0.1-alpha",
force_debug=False, force_debug_to_file=False):
# create configuration
self.config = Config(name, config_file=config_file, desc=desc,
mandatory=mandatory,
use_config_file=use_config_file)
self.prog_name = name
self.parser = None
self.version = version
self.formatter_class = None
self.force_debug = force_debug
self.force_debug_to_file = force_debug_to_file
self.log = self.init_logger()
def init_logger(self):
# logger
log = logging.getLogger()
log.setLevel(logging.INFO)
# logger handlers
log.addHandler(streamHandler)
# debug mode
# if you want to enable debug during class construction, file
# configuration loading, ..., you need to modify the logger level here.
if self.force_debug:
log.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.force_debug_to_file:
dest = self.prog_name + ".log"
FILEHA = logging.FileHandler(dest, 'w', 'utf-8')
FILEHA.setFormatter(DEBUG_LOGGING_FORMAT)
log.setLevel(logging.DEBUG)
log.addHandler(FILEHA)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
log.warning("output log file : " + dest)
return log
def add_config_options(self):
""" You can override this method in order to add your options to the
config object."""
# default section
default = self.config.get_default_section()
default.add_element(
Element('debug',
e_type=int,
e_type_exclude=True,
default=0,
desc="""debug level : default : 0."""))
def load(self):
# loading default configuration from the file
self.config.load()
def init_parser(self):
# arguments parser
self.parser = self.config.get_parser()
if self.formatter_class:
self.parser.formatter_class = self.formatter_class
self.parser.add_argument('-v', '--verbose',
action="store_true",
default=False)
self.parser.add_argument('--version',
action="version",
version="%(prog)s " + self.version)
def add_pre_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file is already
loaded at this time."""
pass
def reload(self):
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.config.reload()
def add_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time."""
self.parser.add_argument(
'-d',
action="count",
**self.config.default.debug.get_arg_parse_arguments())
def __call__(self):
# adding some user options to the config object
self.add_config_options()
# loading default configuration from the file
self.load()
# initialisation of the cli parser,
# some default arguments are also added.
self.init_parser()
# adding some user arguments
self.add_pre_commands()
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.reload()
# adding all commands
self.add_commands()
# run
run = DefaultProgram(self.parser, self.config,
force_debug=self.force_debug)
if run():
sys.exit(0)
else:
sys.exit(1)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def _query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
try:
choice = raw_input().lower()
except KeyboardInterrupt:
print
return "no"
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _query_yes_no | python | def _query_yes_no(question, default="yes"):
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
try:
choice = raw_input().lower()
except KeyboardInterrupt:
print
return "no"
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n") | Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no". | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L1334-L1368 | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
# This file is part of argtoolbox.
#
# argtoolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# argtoolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LinShare user cli. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 Frédéric MARTIN
#
# Contributors list:
#
# Frédéric MARTIN frederic.martin.fma@gmail.com
#
from __future__ import unicode_literals
import os
import sys
import logging
import base64
import copy
from ordereddict import OrderedDict
import ConfigParser
import argparse
import types
import locale
# -----------------------------------------------------------------------------
# global logger variable
#log = logging.getLogger('argtoolbox')
#log.setLevel(logging.INFO)
#log.setLevel(logging.DEBUG)
# logger formats
DEFAULT_LOGGING_FORMAT = logging.Formatter(
"%(asctime)s %(levelname)-8s: %(message)s", "%H:%M:%S")
DEBUG_LOGGING_FORMAT = logging.Formatter(
"%(asctime)s %(levelname)-8s %(module)s:%(name)s:%(funcName)s:%(lineno)d:%(message)s",
"%H:%M:%S")
# logger handlers
# pylint: disable-msg=C0103
streamHandler = logging.StreamHandler(sys.stdout)
streamHandler.setFormatter(DEFAULT_LOGGING_FORMAT)
# debug mode
# if you need debug during class construction, file config loading, ...,
# you need to modify the logger level here.
#log.addHandler(streamHandler)
#log.setLevel(logging.DEBUG)
#streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
# -----------------------------------------------------------------------------
class DefaultHook(object):
"""
This class does nothing. This is the default class for creating your own
hook. After reading an option from the config file, you can apply a
postprocessing, like the base64 decoding or every thing you want.
"""
def __init__(self):
pass
def __call__(self, elt):
pass
# -----------------------------------------------------------------------------
class Base64ElementHook(DefaultHook):
"""This hook is used as a post reading processing in order to convert
base64 data stored into the config file into plain text data."""
def __init__(self, warning=False):
super(Base64ElementHook, self).__init__()
self.warning = warning
def __call__(self, elt):
if elt.value:
try:
data = base64.b64decode(elt.value)
elt.value = data
except TypeError as ex:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self.warning:
log.warn("current field '%(name)s' is not \
stored in the configuration file with \
base64 encoding",
{"name": getattr(elt, "_name")})
else:
log.error("current field '%(name)s' is not stored in the \
configuration file with base64 encoding", {"name":
getattr(elt, "_name")})
raise ex
# -----------------------------------------------------------------------------
class SectionHook(object):
"""
The SectionHook class is used during the configuration reloading
process. After the parsing of CLI arguments, all section hooks are
applied.
This hook lets you modify the attribute called "self.section".
The hook is looking for a CLI argument in "args" using "opt_name" as the
argument name. Then, the value is stored into section.attribute.
"""
def __init__(self, section, attribute, opt_name):
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("First argument should be a subclass of _Section.")
self.section = section
if not isinstance(attribute, types.UnicodeType):
raise TypeError("Second argument should be a string, "
+ "attribute name.")
self.attribute = attribute
if not isinstance(opt_name, types.UnicodeType):
raise TypeError("Third argument should be a string, option name.")
self.opt_name = opt_name
def __call__(self, args):
# looking for a specific opt_name in command line args
value = getattr(args, self.opt_name)
# if defined, we set this value to a attribute of input Section.
if value is not None:
setattr(self.section, self.attribute, value)
# -----------------------------------------------------------------------------
class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
# -----------------------------------------------------------------------------
class _AbstractSection(object):
"""This class is the parent class of all Section classes. You can not use
it, you must implement abstract methods.
"""
def __init__(self, desc=None, prefix=None,
suffix=None, required=False):
self._name = None
self._desc = desc
self._prefix = prefix
self._suffix = suffix
self._required = required
def get_key_name(self):
"""This method return the name of the section, it Should be unique
because it is used as a key or identifier."""
return self._name
def get_section_name(self):
"""This method build the current section name that the program will
looking for into the configuration file.
The format is [<prefix>-]<name>[-<suffix>].
"""
a = []
if self._prefix:
a.append(self._prefix)
a.append(str(self._name))
if self._suffix:
a.append(self._suffix)
return "-".join(a)
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def load(self, file_parser):
""" This method must be implemented by the subclass. This method should
read and load all section elements.
"""
raise NotImplementedError("You must implement this method.")
def get_representation(self, prefix="", suffix="\n"):
"""return the string representation of the current object."""
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res
def __str__(self):
return "".join(self.get_representation())
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if comments:
f.write("#####################################\n")
f.write("# Section : ")
f.write("#".join(self.get_representation()) + "\n")
f.write("#####################################\n")
f.write("[" + self._name + "]\n")
if self._desc and comments:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
# -----------------------------------------------------------------------------
class _Section(_AbstractSection):
"""Simple abstract section object, container for Elements"""
def __init__(self, *args, **kwargs):
super(_Section, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt
def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs))
def count(self):
"""This method will return the number of Element in the current
Section"""
return len(self.elements)
def reset(self):
for e in self.elements.values():
e.reset()
def load(self, file_parser):
section = self.get_section_name()
try:
for e in self.elements.values():
e.load(file_parser, section)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def __getattr__(self, name):
e = self.elements.get(name)
if e is not None:
return e
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def get_element(self, name):
return self.elements.get(name)
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if len(self.elements) < 1:
return
super(_Section, self).write_config_file(f, comments)
for e in self.elements.values():
e.write_config_file(f, comments)
f.write("\n")
# -----------------------------------------------------------------------------
class SimpleSection(_Section):
"""A simple section object. This class is used to declare a section that
should be into the configuration file.
Then you need to add Elements to this section.
mysection = SimpleSection("section_1")
"""
def __init__(self, name, *args, **kwargs):
super(SimpleSection, self).__init__(*args, **kwargs)
self._name = name
def get_representation(self, prefix="", suffix="\n"):
res = []
if self.count() > 0:
res.append(prefix + "Section "
+ self.get_section_name().upper() + suffix)
for elt in self.elements.values():
res.append("".join(elt.get_representation(prefix)))
return res
# -----------------------------------------------------------------------------
class SubSection(_Section):
""" TODO """
def get_representation(self, prefix="", suffix="\n"):
res = []
if self.count() > 0:
res.append(prefix + "SubSection : "
+ self.get_section_name().upper() + suffix)
for elt in self.elements.values():
res.append("".join(elt.get_representation(prefix)))
return res
def __copy__(self):
newone = type(self)()
newone.__dict__.update(self.__dict__)
self.elements = OrderedDict()
return newone
# pylint: disable-msg=W0613
def __deepcopy__(self, *args):
newone = type(self)()
newone.__dict__.update(self.__dict__)
newone.elements = OrderedDict()
for e in self.elements.values():
newone.add_element(copy.deepcopy(e))
return newone
# -----------------------------------------------------------------------------
class ListSection(_AbstractSection):
""" TODO """
def __init__(self, name, *args, **kwargs):
super(ListSection, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
self._name = name
def load(self, file_parser):
section = self.get_section_name()
try:
# TODO : ? : data = data.decode(locale.getpreferredencoding())
for key in [item for item in file_parser.options(section)
if item not in file_parser.defaults().keys()]:
self.elements[key] = file_parser.get(section, key)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def reset(self):
for e in self.elements.values():
e.reset()
def get_representation(self, prefix="", suffix="\n"):
res = []
res.append(prefix + "Section " + self._name + suffix)
for key, val in self.elements.items():
a = []
a.append(prefix)
a.append(" - " + str(key) + " : " + str(val))
a.append(suffix)
res.append("".join(a))
return res
# -----------------------------------------------------------------------------
# warning| [R0902, Element] Too many instance attributes (13/7)
# pylint: disable-msg=R0902
class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
# -----------------------------------------------------------------------------
class ElementWithSubSections(Element):
""" This class extends the default class Element. It offers you the power
to add sections (SubSection) inside a element.
The simple case is one section containing some elements.
But in some situation you may represent your data like a tree.
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithSubSections
SubSection
Element1
Element2
"""
def __init__(self, *args, **kwargs):
super(ElementWithSubSections, self).__init__(*args, **kwargs)
self.e_type = str
self.sections = OrderedDict()
def get_representation(self, prefix="", suffix="\n"):
res = ['\n']
temp_line = prefix + " - " + str(self._name) + " : "
if self.hidden:
temp_line += "xxxxxxxx" + suffix
else:
temp_line += str(self.value) + suffix
res.append(temp_line)
if len(self.sections) > 0:
for elt in self.sections.values():
res.append("".join(elt.get_representation(prefix + "\t")))
return res
def add_section(self, section):
"""You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree.
"""
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(section.__class__))
self.sections[section.name] = section
return section
def load(self, file_parser, section_name):
self._load(file_parser, section_name)
if len(self.sections) > 0:
for sec in self.sections.values():
sec.name = self.value
sec.load(file_parser)
self.post_load()
# -----------------------------------------------------------------------------
class ElementWithRelativeSubSection(ElementWithSubSections):
""" This class extends the default class Element. It offers you the power
to add sections (SubSection) inside a element.
The simple case is one section containing some elements.
But in some situation you may represent your data like a tree and, you want
to search a section dynamically.
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithRelativeSubSection :
name = list_of_section_name, value = sec_test1 sec_test2 sec_test3
This class will look for sections named sec_test1, sec_test2 and sec_test3.
The structure used to load the previous sections is second constructor
argument name "rss". It is a SubSection that will be used as a template.
ex :
SubSection :
- Element4
- Element5
- Element5
This class could be used to store, inside a element, a list of complex
structures.
At the end, when ElementWithRelativeSubSection will be loaded,
you should have the following struture :
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithRelativeSubSection :
SubSection : sec_test1
- Element4
- Element5
- Element5
SubSection : sec_test2
- Element4
- Element5
- Element5
SubSection : sec_test3
- Element4
- Element5
- Element5
"""
def __init__(self, name, rss, **kwargs):
super(ElementWithRelativeSubSection, self).__init__(name, **kwargs)
self.e_type = list
if not issubclass(rss.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(_Section.__class__))
self.rss = rss
def load(self, file_parser, section_name):
self._load(file_parser, section_name)
if isinstance(self.value, list):
for sec_name in self.value:
try:
sec = copy.deepcopy(self.rss, None)
setattr(sec, '_name', sec_name)
self.sections[sec_name] = sec
sec.load(file_parser)
except ValueError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
error = []
error.append("Missing relative section, attribute : ")
error.append("'[" + section_name + "]." + self._name)
error.append("', value : " + str(self.value))
log.error("".join(error))
raise ValueError(e)
self.post_load()
def get_representation(self, prefix="", suffix="\n"):
res = ['\n']
res.append('#')
temp_line = prefix + " - " + str(self._name) + " : "
if self.hidden:
temp_line += "xxxxxxxx" + suffix
else:
temp_line += str(self.value) + suffix
res.append(temp_line)
res.append("".join(self.rss.get_representation(prefix + "#\t")))
res.append('\n')
if len(self.sections) > 0:
for elt in self.sections.values():
res.append('\n')
res.append("".join(elt.get_representation(prefix + "\t")))
res.append('\n')
return res
# -----------------------------------------------------------------------------
class DefaultCommand(object):
"""This class do nothing, this is just the default structure to implement
your own class. Use this class as the base for every the command line
action you want to create."""
def __init__(self, config=None):
self.log = logging.getLogger(
'argtoolbox.' + str(self.__class__.__name__.lower()))
self.config = config
self.protected_args = ['password']
def __call__(self, args):
dict_tmp = copy.copy(args)
#delattr(dict_tmp, "__func__")
for field in getattr(self, 'protected_args', []):
if hasattr(dict_tmp, field):
setattr(dict_tmp, field, "xxxxxxxx")
self.log.debug("Namespace : begin :")
for i in dict_tmp.__dict__:
attribute = getattr(dict_tmp, i)
if isinstance(attribute, types.UnicodeType):
self.log.debug(i + " : " + attribute + " : <type 'unicode'>")
else:
self.log.debug(i + " : " + str(attribute) + " : " + str(type(attribute)))
self.log.debug("Namespace : end.")
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def complete(self, args, prefix):
"""Auto complete method, args is comming from argparse and prefix is
the input data from command line.
You must return a list."""
return []
# -----------------------------------------------------------------------------
class TestCommand(DefaultCommand):
"""Just a simple command, using the default command class."""
def __call__(self, args):
super(TestCommand, self).__call__(args)
print ""
print "This is the beginning of the TestCommand class."
print ""
print "The loaded configuration : "
print "---------------------------"
print unicode(self.config)
print ""
print "The command line arguments (argv) : "
print "------------------------------------"
print args
print ""
print "This is the end of the TestCommand class."
print ""
# -----------------------------------------------------------------------------
class DefaultCompleter(object):
""" TODO
"""
def __init__(self, func_name="complete"):
self.func_name = func_name
def __call__(self, prefix, **kwargs):
from argcomplete import debug
try:
debug("\n------------ DefaultCompleter -----------------")
debug("Kwargs content :")
for i, j in kwargs.items():
debug("key : " + str(i))
debug("\t - " + str(j))
debug("\n------------ DefaultCompleter -----------------\n")
args = kwargs.get('parsed_args')
# pylint: disable-msg=W0612
parser = kwargs.get('parser')
#a = parser.parse_known_args()
# getting form args the current Command and looking for a method
# called by default 'complete'. See __init__ method. The method
# name is store in the class member called self.func_name
fn = getattr(args.__func__, self.func_name, None)
if fn:
return fn(args, prefix)
# pylint: disable-msg=W0703
except Exception as e:
from argcomplete import warn
warn("\nERROR::COMPLETE:An exception was caught :" + str(e) + "\n")
import traceback
traceback.print_exc()
debug("\n------\n")
return ["comlete-error"]
# -----------------------------------------------------------------------------
class DefaultProgram(object):
""" TODO """
def __init__(self, parser, config=None, force_debug=False):
self.parser = parser
self.config = config
self.force_debug = force_debug
def __call__(self):
def patch(self, parser, namespace, values, option_string=None):
"""patch original __call__ method for argparse 1.1 (fix)"""
from argparse import _UNRECOGNIZED_ARGS_ATTR
from argparse import SUPPRESS
from argparse import _
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)') % tup
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings,
namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# integration with argcomplete python module (bash completion)
try:
# Very ugly dirty hook/fix
if argparse.__version__ == "1.1":
# pylint: disable=protected-access
argparse._SubParsersAction.__call__ = patch
import argcomplete
argcomplete.autocomplete(self.parser)
except ImportError:
pass
# parse cli arguments
args = self.parse_args()
if getattr(args, 'debug', False) or self.force_debug:
llog = logging.getLogger()
llog.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.config:
print "debug>>>----------- config -------------------"
print unicode(self.config)
print "debug----------- processing --------------<<<<"
# run command
return args.__func__(args)
else:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
from argparse import ArgumentError
try:
# run command
return args.__func__(args)
except ValueError as a:
log.error("ValueError : " + str(a))
except KeyboardInterrupt as a:
log.warn("Keyboard interruption detected.")
except ArgumentError as a:
self.parser.error(a.message)
except Exception as a:
log.error("unexcepted error : " + str(a))
return False
def parse_args(self):
args = self.parser.parse_args()
for attr in args.__dict__:
data = getattr(args, attr)
if isinstance(data, str):
data = data.decode(locale.getpreferredencoding())
setattr(args, attr, data)
if isinstance(data, list):
res = []
for val in data:
if isinstance(val, str):
res.append(val.decode(locale.getpreferredencoding()))
else:
res.append(val)
setattr(args, attr, res)
return args
# -----------------------------------------------------------------------------
class BasicProgram(object):
""" TODO """
def __init__(self, name, config_file=None, desc=None,
mandatory=False, use_config_file=True, version="0.1-alpha",
force_debug=False, force_debug_to_file=False):
# create configuration
self.config = Config(name, config_file=config_file, desc=desc,
mandatory=mandatory,
use_config_file=use_config_file)
self.prog_name = name
self.parser = None
self.version = version
self.formatter_class = None
self.force_debug = force_debug
self.force_debug_to_file = force_debug_to_file
self.log = self.init_logger()
def init_logger(self):
# logger
log = logging.getLogger()
log.setLevel(logging.INFO)
# logger handlers
log.addHandler(streamHandler)
# debug mode
# if you want to enable debug during class construction, file
# configuration loading, ..., you need to modify the logger level here.
if self.force_debug:
log.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.force_debug_to_file:
dest = self.prog_name + ".log"
FILEHA = logging.FileHandler(dest, 'w', 'utf-8')
FILEHA.setFormatter(DEBUG_LOGGING_FORMAT)
log.setLevel(logging.DEBUG)
log.addHandler(FILEHA)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
log.warning("output log file : " + dest)
return log
def add_config_options(self):
""" You can override this method in order to add your options to the
config object."""
# default section
default = self.config.get_default_section()
default.add_element(
Element('debug',
e_type=int,
e_type_exclude=True,
default=0,
desc="""debug level : default : 0."""))
def load(self):
# loading default configuration from the file
self.config.load()
def init_parser(self):
# arguments parser
self.parser = self.config.get_parser()
if self.formatter_class:
self.parser.formatter_class = self.formatter_class
self.parser.add_argument('-v', '--verbose',
action="store_true",
default=False)
self.parser.add_argument('--version',
action="version",
version="%(prog)s " + self.version)
def add_pre_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file is already
loaded at this time."""
pass
def reload(self):
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.config.reload()
def add_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time."""
self.parser.add_argument(
'-d',
action="count",
**self.config.default.debug.get_arg_parse_arguments())
def __call__(self):
# adding some user options to the config object
self.add_config_options()
# loading default configuration from the file
self.load()
# initialisation of the cli parser,
# some default arguments are also added.
self.init_parser()
# adding some user arguments
self.add_pre_commands()
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.reload()
# adding all commands
self.add_commands()
# run
run = DefaultProgram(self.parser, self.config,
force_debug=self.force_debug)
if run():
sys.exit(0)
else:
sys.exit(1)
# -----------------------------------------------------------------------------
def query_yes_no(question, default="yes"):
"""Just prompt the user for a yes/no question"""
res = _query_yes_no(question, default)
if res == "yes":
return True
else:
return False
# -----------------------------------------------------------------------------
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Config.add_section | python | def add_section(self, section):
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section | Add a new Section object to the config. Should be a subclass of
_AbstractSection. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L160-L166 | [
"def get_key_name(self):\n \"\"\"This method return the name of the section, it Should be unique\n because it is used as a key or identifier.\"\"\"\n return self._name\n"
] | class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Config._load | python | def _load(self, exit_on_failure):
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.") | One you have added all your configuration data (Section, Element,
...) you need to load data from the config file. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L183-L226 | null | class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Config.get_parser | python | def get_parser(self, **kwargs):
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser | This method will create and return a new parser with prog_name,
description, and a config file argument. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L228-L242 | null | class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Config.reload | python | def reload(self, hooks=None):
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.") | This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L244-L295 | null | class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
def write_default_config_file(self, output, comments=True):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output))
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Config.write_default_config_file | python | def write_default_config_file(self, output, comments=True):
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
with open(output, 'w') as f:
if comments:
f.write("#####################################\n")
f.write("# Description :\n")
f.write("# -------------\n# ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n\n")
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.write_config_file(f, comments)
log.debug("config file generation completed : " + str(output)) | This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L314-L336 | null | class Config(object):
# pylint: disable-msg=R0902
"""This is the entry point, this class will contains all Section and
Elements."""
def __init__(self, prog_name, config_file=None, desc=None,
mandatory=False, use_config_file=True):
self.prog_name = prog_name
self.config_file = config_file
self.use_config_file = use_config_file
self._desc = desc
self.mandatory = mandatory
self.sections = OrderedDict()
self._default_section = self.add_section(SimpleSection("DEFAULT"))
self.parser = None
self.file_parser = ConfigParser.SafeConfigParser()
def add_section(self, section):
"""Add a new Section object to the config. Should be a subclass of
_AbstractSection."""
if not issubclass(section.__class__, _AbstractSection):
raise TypeError("argument should be a subclass of Section")
self.sections[section.get_key_name()] = section
return section
def get_section(self, name):
if name.lower() == "default":
return self._default_section
return self.sections.get(name)
def get_default_section(self):
"""This method will return default section object"""
return self._default_section
def load(self, exit_on_failure=False):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
if self.use_config_file:
self._load(exit_on_failure)
def _load(self, exit_on_failure):
"""One you have added all your configuration data (Section, Element,
...) you need to load data from the config file."""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
discoveredFileList = []
if self.config_file:
if isinstance(self.config_file, types.UnicodeType):
discoveredFileList = self.file_parser.read(self.config_file)
else:
discoveredFileList = self.file_parser.readfp(
self.config_file,
"file descriptor")
else:
defaultFileList = []
defaultFileList.append(self.prog_name + ".cfg")
defaultFileList.append(
os.path.expanduser('~/.' + self.prog_name + '.cfg'))
defaultFileList.append('/etc/' + self.prog_name + '.cfg')
log.debug("defaultFileList: " + str(defaultFileList))
discoveredFileList = self.file_parser.read(defaultFileList)
log.debug("discoveredFileList: " + str(discoveredFileList))
if self.mandatory and len(discoveredFileList) < 1:
msg = "The required config file was missing."
msg += " Default config files : " + str(defaultFileList)
log.error(msg)
raise EnvironmentError(msg)
log.debug("loading configuration ...")
if exit_on_failure:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
try:
s.load(self.file_parser)
except ValueError:
sys.exit(1)
else:
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.load(self.file_parser)
log.debug("configuration loaded.")
def get_parser(self, **kwargs):
"""This method will create and return a new parser with prog_name,
description, and a config file argument.
"""
self.parser = argparse.ArgumentParser(prog=self.prog_name,
description=self._desc,
add_help=False, **kwargs)
# help is removed because parser.parse_known_args() show help,
# often partial help. help action will be added during
# reloading step for parser.parse_args()
if self.use_config_file:
self.parser.add_argument('--config-file',
action="store",
help="Other configuration file.")
return self.parser
def reload(self, hooks=None):
"""This method will reload the configuration using input argument
from the command line interface.
1. pasing arguments
2. applying hooks
3. addding help argument
4. reloading configuration using cli argument like a configuration
file name.
"""
#from argcomplete import debug
# Parsing the command line looking for the previous options like
# configuration file name or server section. Extra arguments
# will be store into argv.
args = None
if os.environ.get('_ARGCOMPLETE'):
# During argcomplete completion, parse_known_args will return an
# empty Namespace. In this case, we feed the previous function with
# data comming from the input completion data
compline = os.environ.get('COMP_LINE')
args = self.parser.parse_known_args(compline.split()[1:])[0]
else:
args = self.parser.parse_known_args()[0]
if hooks is not None:
if isinstance(hooks, list):
for h in hooks:
if isinstance(h, SectionHook):
h(args)
else:
if isinstance(hooks, SectionHook):
hooks(args)
# After the first argument parsing, for configuration reloading,
# we can add the help action.
self.parser.add_argument('-h', '--help', action='help',
default=argparse.SUPPRESS,
help='show this help message and exit')
# Reloading
if self.use_config_file:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
log.debug("reloading configuration ...")
if args.config_file:
self.file_parser = ConfigParser.SafeConfigParser()
discoveredFileList = self.file_parser.read(args.config_file)
log.debug("discoveredFileList: " + str(discoveredFileList))
for s in self.sections.values():
log.debug("loading section : " + s.get_section_name())
s.reset()
s.load(self.file_parser)
log.debug("configuration reloaded.")
def __getattr__(self, name):
if name.lower() == "default":
return self._default_section
s = self.sections.get(name)
if s is not None:
return s
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def __str__(self):
res = []
res.append("Configuration of %(prog_name)s : " % self.__dict__)
for s in self.sections.values():
res.append("".join(s.get_representation("\t")))
return "\n".join(res)
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _AbstractSection.get_section_name | python | def get_section_name(self):
a = []
if self._prefix:
a.append(self._prefix)
a.append(str(self._name))
if self._suffix:
a.append(self._suffix)
return "-".join(a) | This method build the current section name that the program will
looking for into the configuration file.
The format is [<prefix>-]<name>[-<suffix>]. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L358-L369 | null | class _AbstractSection(object):
"""This class is the parent class of all Section classes. You can not use
it, you must implement abstract methods.
"""
def __init__(self, desc=None, prefix=None,
suffix=None, required=False):
self._name = None
self._desc = desc
self._prefix = prefix
self._suffix = suffix
self._required = required
def get_key_name(self):
"""This method return the name of the section, it Should be unique
because it is used as a key or identifier."""
return self._name
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def load(self, file_parser):
""" This method must be implemented by the subclass. This method should
read and load all section elements.
"""
raise NotImplementedError("You must implement this method.")
def get_representation(self, prefix="", suffix="\n"):
"""return the string representation of the current object."""
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res
def __str__(self):
return "".join(self.get_representation())
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if comments:
f.write("#####################################\n")
f.write("# Section : ")
f.write("#".join(self.get_representation()) + "\n")
f.write("#####################################\n")
f.write("[" + self._name + "]\n")
if self._desc and comments:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _AbstractSection.get_representation | python | def get_representation(self, prefix="", suffix="\n"):
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res | return the string representation of the current object. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L379-L382 | [
"def get_section_name(self):\n \"\"\"This method build the current section name that the program will\n looking for into the configuration file.\n The format is [<prefix>-]<name>[-<suffix>].\n \"\"\"\n a = []\n if self._prefix:\n a.append(self._prefix)\n a.append(str(self._name))\n if self._suffix:\n a.append(self._suffix)\n return \"-\".join(a)\n"
] | class _AbstractSection(object):
"""This class is the parent class of all Section classes. You can not use
it, you must implement abstract methods.
"""
def __init__(self, desc=None, prefix=None,
suffix=None, required=False):
self._name = None
self._desc = desc
self._prefix = prefix
self._suffix = suffix
self._required = required
def get_key_name(self):
"""This method return the name of the section, it Should be unique
because it is used as a key or identifier."""
return self._name
def get_section_name(self):
"""This method build the current section name that the program will
looking for into the configuration file.
The format is [<prefix>-]<name>[-<suffix>].
"""
a = []
if self._prefix:
a.append(self._prefix)
a.append(str(self._name))
if self._suffix:
a.append(self._suffix)
return "-".join(a)
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def load(self, file_parser):
""" This method must be implemented by the subclass. This method should
read and load all section elements.
"""
raise NotImplementedError("You must implement this method.")
def __str__(self):
return "".join(self.get_representation())
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if comments:
f.write("#####################################\n")
f.write("# Section : ")
f.write("#".join(self.get_representation()) + "\n")
f.write("#####################################\n")
f.write("[" + self._name + "]\n")
if self._desc and comments:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _AbstractSection.write_config_file | python | def write_config_file(self, f, comments):
if comments:
f.write("#####################################\n")
f.write("# Section : ")
f.write("#".join(self.get_representation()) + "\n")
f.write("#####################################\n")
f.write("[" + self._name + "]\n")
if self._desc and comments:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n") | This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L387-L404 | [
"def get_representation(self, prefix=\"\", suffix=\"\\n\"):\n \"\"\"return the string representation of the current object.\"\"\"\n res = prefix + \"Section \" + self.get_section_name().upper() + suffix\n return res\n"
] | class _AbstractSection(object):
"""This class is the parent class of all Section classes. You can not use
it, you must implement abstract methods.
"""
def __init__(self, desc=None, prefix=None,
suffix=None, required=False):
self._name = None
self._desc = desc
self._prefix = prefix
self._suffix = suffix
self._required = required
def get_key_name(self):
"""This method return the name of the section, it Should be unique
because it is used as a key or identifier."""
return self._name
def get_section_name(self):
"""This method build the current section name that the program will
looking for into the configuration file.
The format is [<prefix>-]<name>[-<suffix>].
"""
a = []
if self._prefix:
a.append(self._prefix)
a.append(str(self._name))
if self._suffix:
a.append(self._suffix)
return "-".join(a)
# pylint: disable-msg=W0613
# pylint: disable-msg=R0201
def load(self, file_parser):
""" This method must be implemented by the subclass. This method should
read and load all section elements.
"""
raise NotImplementedError("You must implement this method.")
def get_representation(self, prefix="", suffix="\n"):
"""return the string representation of the current object."""
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res
def __str__(self):
return "".join(self.get_representation())
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _Section.add_element | python | def add_element(self, elt):
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt | Helper to add a element to the current section. The Element name
will be used as an identifier. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L414-L420 | [
"def get_name(self):\n \"\"\"This method will return the name of the current element\"\"\"\n return self._name\n"
] | class _Section(_AbstractSection):
"""Simple abstract section object, container for Elements"""
def __init__(self, *args, **kwargs):
super(_Section, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs))
def count(self):
"""This method will return the number of Element in the current
Section"""
return len(self.elements)
def reset(self):
for e in self.elements.values():
e.reset()
def load(self, file_parser):
section = self.get_section_name()
try:
for e in self.elements.values():
e.load(file_parser, section)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def __getattr__(self, name):
e = self.elements.get(name)
if e is not None:
return e
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def get_element(self, name):
return self.elements.get(name)
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if len(self.elements) < 1:
return
super(_Section, self).write_config_file(f, comments)
for e in self.elements.values():
e.write_config_file(f, comments)
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _Section.add_element_list | python | def add_element_list(self, elt_list, **kwargs):
for e in elt_list:
self.add_element(Element(e, **kwargs)) | Helper to add a list of similar elements to the current section.
Element names will be used as an identifier. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L422-L426 | [
"def add_element(self, elt):\n \"\"\"Helper to add a element to the current section. The Element name\n will be used as an identifier.\"\"\"\n if not isinstance(elt, Element):\n raise TypeError(\"argument should be a subclass of Element\")\n self.elements[elt.get_name()] = elt\n return elt\n"
] | class _Section(_AbstractSection):
"""Simple abstract section object, container for Elements"""
def __init__(self, *args, **kwargs):
super(_Section, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt
def count(self):
"""This method will return the number of Element in the current
Section"""
return len(self.elements)
def reset(self):
for e in self.elements.values():
e.reset()
def load(self, file_parser):
section = self.get_section_name()
try:
for e in self.elements.values():
e.load(file_parser, section)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def __getattr__(self, name):
e = self.elements.get(name)
if e is not None:
return e
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def get_element(self, name):
return self.elements.get(name)
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if len(self.elements) < 1:
return
super(_Section, self).write_config_file(f, comments)
for e in self.elements.values():
e.write_config_file(f, comments)
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | _Section.write_config_file | python | def write_config_file(self, f, comments):
if len(self.elements) < 1:
return
super(_Section, self).write_config_file(f, comments)
for e in self.elements.values():
e.write_config_file(f, comments)
f.write("\n") | This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L462-L473 | [
"def write_config_file(self, f, comments):\n \"\"\"This method write a sample file, with attributes, descriptions,\n sample values, required flags, using the configuration object\n properties.\n \"\"\"\n if comments:\n f.write(\"#####################################\\n\")\n f.write(\"# Section : \")\n f.write(\"#\".join(self.get_representation()) + \"\\n\")\n f.write(\"#####################################\\n\")\n f.write(\"[\" + self._name + \"]\\n\")\n if self._desc and comments:\n f.write(\"# Description : \")\n for i in self._desc.split('\\n'):\n f.write(\"# \")\n f.write(i)\n f.write(\"\\n\")\n f.write(\"\\n\")\n"
] | class _Section(_AbstractSection):
"""Simple abstract section object, container for Elements"""
def __init__(self, *args, **kwargs):
super(_Section, self).__init__(*args, **kwargs)
self.elements = OrderedDict()
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt
def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs))
def count(self):
"""This method will return the number of Element in the current
Section"""
return len(self.elements)
def reset(self):
for e in self.elements.values():
e.reset()
def load(self, file_parser):
section = self.get_section_name()
try:
for e in self.elements.values():
e.load(file_parser, section)
except ConfigParser.NoSectionError as e:
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
if self._required:
log.error("Required section : " + section)
raise ValueError(e)
else:
log.debug("Missing section : " + section)
def __getattr__(self, name):
e = self.elements.get(name)
if e is not None:
return e
else:
raise AttributeError("'%(class)s' object has no attribute \
'%(name)s'" % {"name": name, "class": self.__class__.__name__})
def get_element(self, name):
return self.elements.get(name)
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Element.get_representation | python | def get_representation(self, prefix="", suffix="\n"):
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res | This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L658-L679 | null | class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Element.load | python | def load(self, file_parser, section_name):
self._load(file_parser, section_name)
self.post_load() | The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L697-L703 | [
"def post_load(self):\n \"\"\"Every element hooks are applied by this method, just after the\n loading process.\n \"\"\"\n for h in self.hooks:\n h(self)\n",
" def _load(self, file_parser, section_name):\n \"\"\"The current element is loaded from the configuration file,\n all constraints and requirements are checked.\n \"\"\"\n # pylint: disable-msg=W0621\n log = logging.getLogger('argtoolbox')\n try:\n log.debug(\"looking for field (section=\" + section_name\n + \") : \" + self._name)\n data = None\n try:\n if self.e_type == int:\n data = file_parser.getint(section_name, self._name)\n elif self.e_type == float:\n data = file_parser.getfloat(section_name, self._name)\n elif self.e_type == bool:\n data = file_parser.getboolean(section_name, self._name)\n elif self.e_type == list:\n data = file_parser.get(section_name, self._name)\n data = data.strip()\n data = data.decode(locale.getpreferredencoding())\n data = data.split()\n if not data:\n msg = \"The optional field '%(name)s' was present, \\\ntype is list, but the current value is an empty \\\nlist.\" % {\"name\": self._name}\n log.error(msg)\n raise ValueError(msg)\n elif self.e_type == str:\n data = file_parser.get(section_name, self._name)\n # happens only when the current field is present,\n # type is string, but value is ''\n if not data:\n msg = \"The optional field '%(name)s' was present, \\\n type is string, but the current value is an empty \\\n string.\" % {\"name\": self._name}\n log.error(msg)\n raise ValueError(msg)\n data = data.decode(locale.getpreferredencoding())\n else:\n msg = \"Data type not supported : %(type)s \" % {\n \"type\": self.e_type}\n log.error(msg)\n raise TypeError(msg)\n\n except ValueError as ex:\n msg = \"The current field '%(name)s' was present, but the \\\nrequired type is : %(e_type)s.\" % {\n \"name\": self._name,\n \"e_type\": self.e_type\n }\n log.error(msg)\n log.error(str(ex))\n raise ValueError(str(ex))\n\n log_data = {\"name\": self._name, \"data\": data,\n \"e_type\": self.e_type}\n if self.hidden:\n log_data['data'] = \"xxxxxxxx\"\n log.debug(\"field found : '%(name)s', value : '%(data)s', \\\ntype : '%(e_type)s'\", log_data)\n self.value = data\n\n except ConfigParser.NoOptionError:\n if self.conf_required:\n msg = \"The required field '%(name)s' was missing from the \\\nconfig file.\" % {\"name\": self._name}\n log.error(msg)\n raise ValueError(msg)\n\n if self.default is not None:\n self.value = self.default\n log_data = {\"name\": self._name, \"data\": self.default,\n \"e_type\": self.e_type}\n if self.hidden:\n log_data['data'] = \"xxxxxxxx\"\n log.debug(\"Field not found : '%(name)s', default value : \\\n'%(data)s', type : '%(e_type)s'\", log_data)\n else:\n log.debug(\"Field not found : '\" + self._name + \"'\")\n"
] | class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Element._load | python | def _load(self, file_parser, section_name):
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'") | The current element is loaded from the configuration file,
all constraints and requirements are checked. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L705-L784 | null | class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Element.get_arg_parse_arguments | python | def get_arg_parse_arguments(self):
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret | During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L789-L813 | null | class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def write_config_file(self, f, comments):
"""This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties.
"""
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n")
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | Element.write_config_file | python | def write_config_file(self, f, comments):
if self.conf_hidden:
return False
if comments:
f.write("\n")
f.write("# Attribute (")
f.write(str(self.e_type.__name__))
f.write(") : ")
f.write(self._name.upper())
f.write("\n")
if self._desc and self._desc != argparse.SUPPRESS:
f.write("# Description : ")
for i in self._desc.split('\n'):
f.write("# ")
f.write(i)
f.write("\n")
f.write("\n")
if not self.conf_required:
f.write(";")
f.write(self._name)
f.write("=")
if self.default is not None and not self.hidden:
f.write(str(self.default))
f.write("\n") | This method write a sample file, with attributes, descriptions,
sample values, required flags, using the configuration object
properties. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L815-L844 | null | class Element(object):
"""
An Element could represent a option into the configuration file, this
class lets you configure many requirements like default value, data
type, if the option is mandatory, etc.
You can also defined if element could be supply by the command line
interface, default options for the cli, etc.
"""
# pylint: disable-msg=R0913
def __init__(self, name, e_type=str, required=False, default=None,
conf_hidden=False, conf_required=False, desc=None,
hooks=None, hidden=False, e_type_exclude=False):
"""Information about how to declare a element which will be load from a
configuration file.
Keyword Arguments:
- name -- name of the attribute store into the configuration file.
- e_type -- Data type of the attribute.
- e_type_exclude -- Do not export data type to argparse. Example there is a
conflict between type=int and action=count for argparse parameters.
- conf_required -- The current attribute must be present in the
configuration file.
- required -- The current attribute must be present into command line
arguments except if it is present into configuration file.
- default -- Default value used if the attribute is not set in
configuration file.
This value is also used during configuration file generation.
ex: 'attribute = $default_value' or ';attribute = $default_value'
if this attribute is mandatory.
- desc -- Description used into the configuration file and argparse.
- conf_hidden -- The current attribute will not be used during
configuration file generation.
- hidden -- The current attribute will not be print on console
(ex password)
- hooks -- one hook or a list of hook. Should be an instance of
DefaultHook. The hook will be apply to the element value once read
from config file.
"""
self._name = name
self.e_type = e_type
self.e_type_exclude = e_type_exclude
self._required = required
self.default = default
self._desc = desc
self.conf_hidden = conf_hidden
self.conf_required = conf_required
self._desc_for_config = None
self._desc_for_argparse = None
self.value = None
self.hidden = hidden
if hooks is None:
hooks = []
if isinstance(hooks, list):
for h in hooks:
if not isinstance(h, DefaultHook):
raise TypeError("Hook argument should be a subclass"
+ " of DefaultHook")
self.hooks = hooks
else:
if isinstance(hooks, DefaultHook):
self.hooks = [hooks]
else:
raise TypeError(
"Hook argument should be a subclass of DefaultHook")
def get_name(self):
"""This method will return the name of the current element"""
return self._name
def get_representation(self, prefix="", suffix="\n"):
"""This method build a array that will contain the string
representation of the current object. Every lines could be
prefixed and suffixed.
"""
res = []
if self.hidden:
res.append(prefix + " - " + str(self._name)
+ " : xxxxxxxx" + suffix)
else:
default = self.default
if default is None:
default = " - "
a = prefix + " - "
a += str(self._name) + " : "
if isinstance(default, types.UnicodeType):
a += default
else:
a += str(default)
a += suffix
res.append(a)
return res
def __str__(self):
return "".join(self.get_representation())
def __copy__(self):
newone = type(self)(self._name)
newone.__dict__.update(self.__dict__)
#self.elements = OrderedDict()
return newone
def post_load(self):
"""Every element hooks are applied by this method, just after the
loading process.
"""
for h in self.hooks:
h(self)
def load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
Then element hooks are applied.
"""
self._load(file_parser, section_name)
self.post_load()
def _load(self, file_parser, section_name):
"""The current element is loaded from the configuration file,
all constraints and requirements are checked.
"""
# pylint: disable-msg=W0621
log = logging.getLogger('argtoolbox')
try:
log.debug("looking for field (section=" + section_name
+ ") : " + self._name)
data = None
try:
if self.e_type == int:
data = file_parser.getint(section_name, self._name)
elif self.e_type == float:
data = file_parser.getfloat(section_name, self._name)
elif self.e_type == bool:
data = file_parser.getboolean(section_name, self._name)
elif self.e_type == list:
data = file_parser.get(section_name, self._name)
data = data.strip()
data = data.decode(locale.getpreferredencoding())
data = data.split()
if not data:
msg = "The optional field '%(name)s' was present, \
type is list, but the current value is an empty \
list." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
elif self.e_type == str:
data = file_parser.get(section_name, self._name)
# happens only when the current field is present,
# type is string, but value is ''
if not data:
msg = "The optional field '%(name)s' was present, \
type is string, but the current value is an empty \
string." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
data = data.decode(locale.getpreferredencoding())
else:
msg = "Data type not supported : %(type)s " % {
"type": self.e_type}
log.error(msg)
raise TypeError(msg)
except ValueError as ex:
msg = "The current field '%(name)s' was present, but the \
required type is : %(e_type)s." % {
"name": self._name,
"e_type": self.e_type
}
log.error(msg)
log.error(str(ex))
raise ValueError(str(ex))
log_data = {"name": self._name, "data": data,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("field found : '%(name)s', value : '%(data)s', \
type : '%(e_type)s'", log_data)
self.value = data
except ConfigParser.NoOptionError:
if self.conf_required:
msg = "The required field '%(name)s' was missing from the \
config file." % {"name": self._name}
log.error(msg)
raise ValueError(msg)
if self.default is not None:
self.value = self.default
log_data = {"name": self._name, "data": self.default,
"e_type": self.e_type}
if self.hidden:
log_data['data'] = "xxxxxxxx"
log.debug("Field not found : '%(name)s', default value : \
'%(data)s', type : '%(e_type)s'", log_data)
else:
log.debug("Field not found : '" + self._name + "'")
def reset(self):
self.value = None
def get_arg_parse_arguments(self):
"""
During the element declaration, all configuration file requirements
and all cli requirements have been described once.
This method will build a dict containing all argparse options.
It can be used to feed argparse.ArgumentParser.
You does not need to have multiple declarations.
"""
ret = dict()
if self._required:
if self.value is not None:
ret["default"] = self.value
else:
ret["required"] = True
ret["dest"] = self._name
if not self.e_type_exclude:
if self.e_type == int or self.e_type == float:
# Just override argparse.add_argument 'type' parameter for int or float.
ret["type"] = self.e_type
if self.value is not None:
ret["default"] = self.value
if self._desc:
ret["help"] = self._desc
return ret
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | ElementWithSubSections.add_section | python | def add_section(self, section):
if not issubclass(section.__class__, SubSection):
raise TypeError("Argument should be a subclass of SubSection, \
not :" + str(section.__class__))
self.sections[section.name] = section
return section | You can add section inside a Element, the section must be a
subclass of SubSection. You can use this class to represent a tree. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L881-L890 | null | class ElementWithSubSections(Element):
""" This class extends the default class Element. It offers you the power
to add sections (SubSection) inside a element.
The simple case is one section containing some elements.
But in some situation you may represent your data like a tree.
Section :
Element1 : name = port, value = 389
Element2 : name = address, value = 127.0.0.1
ElementWithSubSections
SubSection
Element1
Element2
"""
def __init__(self, *args, **kwargs):
super(ElementWithSubSections, self).__init__(*args, **kwargs)
self.e_type = str
self.sections = OrderedDict()
def get_representation(self, prefix="", suffix="\n"):
res = ['\n']
temp_line = prefix + " - " + str(self._name) + " : "
if self.hidden:
temp_line += "xxxxxxxx" + suffix
else:
temp_line += str(self.value) + suffix
res.append(temp_line)
if len(self.sections) > 0:
for elt in self.sections.values():
res.append("".join(elt.get_representation(prefix + "\t")))
return res
def load(self, file_parser, section_name):
self._load(file_parser, section_name)
if len(self.sections) > 0:
for sec in self.sections.values():
sec.name = self.value
sec.load(file_parser)
self.post_load()
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | BasicProgram.add_config_options | python | def add_config_options(self):
# default section
default = self.config.get_default_section()
default.add_element(
Element('debug',
e_type=int,
e_type_exclude=True,
default=0,
desc="""debug level : default : 0.""")) | You can override this method in order to add your options to the
config object. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L1244-L1254 | null | class BasicProgram(object):
""" TODO """
def __init__(self, name, config_file=None, desc=None,
mandatory=False, use_config_file=True, version="0.1-alpha",
force_debug=False, force_debug_to_file=False):
# create configuration
self.config = Config(name, config_file=config_file, desc=desc,
mandatory=mandatory,
use_config_file=use_config_file)
self.prog_name = name
self.parser = None
self.version = version
self.formatter_class = None
self.force_debug = force_debug
self.force_debug_to_file = force_debug_to_file
self.log = self.init_logger()
def init_logger(self):
# logger
log = logging.getLogger()
log.setLevel(logging.INFO)
# logger handlers
log.addHandler(streamHandler)
# debug mode
# if you want to enable debug during class construction, file
# configuration loading, ..., you need to modify the logger level here.
if self.force_debug:
log.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.force_debug_to_file:
dest = self.prog_name + ".log"
FILEHA = logging.FileHandler(dest, 'w', 'utf-8')
FILEHA.setFormatter(DEBUG_LOGGING_FORMAT)
log.setLevel(logging.DEBUG)
log.addHandler(FILEHA)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
log.warning("output log file : " + dest)
return log
def load(self):
# loading default configuration from the file
self.config.load()
def init_parser(self):
# arguments parser
self.parser = self.config.get_parser()
if self.formatter_class:
self.parser.formatter_class = self.formatter_class
self.parser.add_argument('-v', '--verbose',
action="store_true",
default=False)
self.parser.add_argument('--version',
action="version",
version="%(prog)s " + self.version)
def add_pre_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file is already
loaded at this time."""
pass
def reload(self):
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.config.reload()
def add_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time."""
self.parser.add_argument(
'-d',
action="count",
**self.config.default.debug.get_arg_parse_arguments())
def __call__(self):
# adding some user options to the config object
self.add_config_options()
# loading default configuration from the file
self.load()
# initialisation of the cli parser,
# some default arguments are also added.
self.init_parser()
# adding some user arguments
self.add_pre_commands()
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.reload()
# adding all commands
self.add_commands()
# run
run = DefaultProgram(self.parser, self.config,
force_debug=self.force_debug)
if run():
sys.exit(0)
else:
sys.exit(1)
|
fred49/argtoolbox | argtoolbox/argtoolbox.py | BasicProgram.add_commands | python | def add_commands(self):
self.parser.add_argument(
'-d',
action="count",
**self.config.default.debug.get_arg_parse_arguments()) | You can override this method in order to add your command line
arguments to the argparse parser. The configuration file was
reloaded at this time. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L1283-L1290 | null | class BasicProgram(object):
""" TODO """
def __init__(self, name, config_file=None, desc=None,
mandatory=False, use_config_file=True, version="0.1-alpha",
force_debug=False, force_debug_to_file=False):
# create configuration
self.config = Config(name, config_file=config_file, desc=desc,
mandatory=mandatory,
use_config_file=use_config_file)
self.prog_name = name
self.parser = None
self.version = version
self.formatter_class = None
self.force_debug = force_debug
self.force_debug_to_file = force_debug_to_file
self.log = self.init_logger()
def init_logger(self):
# logger
log = logging.getLogger()
log.setLevel(logging.INFO)
# logger handlers
log.addHandler(streamHandler)
# debug mode
# if you want to enable debug during class construction, file
# configuration loading, ..., you need to modify the logger level here.
if self.force_debug:
log.setLevel(logging.DEBUG)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
if self.force_debug_to_file:
dest = self.prog_name + ".log"
FILEHA = logging.FileHandler(dest, 'w', 'utf-8')
FILEHA.setFormatter(DEBUG_LOGGING_FORMAT)
log.setLevel(logging.DEBUG)
log.addHandler(FILEHA)
streamHandler.setFormatter(DEBUG_LOGGING_FORMAT)
log.warning("output log file : " + dest)
return log
def add_config_options(self):
""" You can override this method in order to add your options to the
config object."""
# default section
default = self.config.get_default_section()
default.add_element(
Element('debug',
e_type=int,
e_type_exclude=True,
default=0,
desc="""debug level : default : 0."""))
def load(self):
# loading default configuration from the file
self.config.load()
def init_parser(self):
# arguments parser
self.parser = self.config.get_parser()
if self.formatter_class:
self.parser.formatter_class = self.formatter_class
self.parser.add_argument('-v', '--verbose',
action="store_true",
default=False)
self.parser.add_argument('--version',
action="version",
version="%(prog)s " + self.version)
def add_pre_commands(self):
""" You can override this method in order to add your command line
arguments to the argparse parser. The configuration file is already
loaded at this time."""
pass
def reload(self):
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.config.reload()
def __call__(self):
# adding some user options to the config object
self.add_config_options()
# loading default configuration from the file
self.load()
# initialisation of the cli parser,
# some default arguments are also added.
self.init_parser()
# adding some user arguments
self.add_pre_commands()
# reloading configuration with previous optional arguments
# (example : config file name from argv, ...)
self.reload()
# adding all commands
self.add_commands()
# run
run = DefaultProgram(self.parser, self.config,
force_debug=self.force_debug)
if run():
sys.exit(0)
else:
sys.exit(1)
|
fred49/argtoolbox | setup.py | is_archlinux | python | def is_archlinux():
if platform.system().lower() == 'linux':
if platform.linux_distribution() == ('', '', ''):
# undefined distribution. Fixed in python 3.
if os.path.exists('/etc/arch-release'):
return True
return False | return True if the current distribution is running on debian like OS. | train | https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/setup.py#L46-L53 | null | #!/usr/bin/env python
import glob
from setuptools import setup, find_packages
from setuptools.command.install import install
import codecs
import os
import re
import shlex
import platform
from subprocess import call
import subprocess
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
def is_debian_like():
"""return True if the current distribution is running on debian like OS."""
if platform.system().lower() == 'linux':
if platform.linux_distribution()[0].lower() in ['ubuntu', 'debian']:
return True
return False
def bash_version_greater_than_4_3():
if is_debian_like():
cmd = "/usr/bin/dpkg-query -W -f '${version}' bash"
status, version = run_command(cmd, True)
if status:
cmd = "dpkg --compare-versions %(version)s ge 4.3" % {
'version': version,
}
return run_command(cmd)
elif is_archlinux():
# TODO : support postinst for archlinux
pass
return False
def run_command(cmd, return_stdout=False):
dpkg_process = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = dpkg_process.communicate()
status = False
if dpkg_process.wait() == 0:
status = True
#print stderr
if return_stdout:
return (status, stdout.strip('\n'))
return status
class CustomInstallCommand(install):
"""Customized setuptools install command - prints a friendly
greeting."""
def run(self):
install.run(self)
# argcomplete completion is automatic if bash version is greater or
# equal than 4.3
if bash_version_greater_than_4_3():
# Detection of virtual env installation.
if os.getenv('VIRTUAL_ENV', False):
self.install_virtual_env()
else:
self.install_etc()
else:
print "\n/!\\ You need to register manually every script which \
support argcomplete to enable bash completion.\n"
def install_virtual_env(self):
print "\nINFO: Registering argcomplete support in the current \
virtualenv for auto activation."
directory = os.getenv('VIRTUAL_ENV')
cmd = "activate-global-python-argcomplete --dest "
cmd += os.path.join(directory, 'bin')
call(shlex.split(cmd))
f = open(os.path.join(directory, 'bin', 'activate'), 'a')
data = "\nsource "
data += os.path.join(directory, 'bin', 'python-argcomplete.sh')
data += '\n'
f.write(data)
f.close()
print "INFO: You may need to launch a new install of bash for the auto \
completion to be active.\n"
def install_etc(self):
print "\nINFO: Registering argcomplete support in /etc/ for global \
activation."
cmd = "activate-global-python-argcomplete --global"
status = run_command(cmd)
if not status:
print "WARN: Global activation for argcomplete failed !!!"
print "WARN: See 'argtoolboxtool register -h'.\n"
setup(
cmdclass={'install': CustomInstallCommand},
name = 'argtoolbox',
version = find_version('argtoolbox', '__init__.py'),
description = 'The easy way to create a short program with file options and command line options.',
long_description=long_description,
# The project URL.
url = 'https://github.com/fred49/argtoolbox',
# Author details
author = 'Frederic MARTIN',
author_email = 'frederic.martin.fma@gmail.com',
# Choose your license
license = "GPL3",
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Environment :: Console',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='argparse ConfigFile command line interface',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
package_data = {
'argtoolbox': ['templates/*.tml'],
},
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['argparse',
'argcomplete',
'ConfigParser',
'ordereddict'],
scripts = glob.glob('bin/*'),
)
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.connect | python | def connect(self):
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect | |coro|
Connect to ubisoft, automatically called when needed | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L378-L398 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_players | python | def get_players(self, name=None, platform=None, uid=None):
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data)) | |coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L460-L515 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_player | python | def get_player(self, name=None, platform=None, uid=None):
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0] | |coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L518-L539 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_operator_definitions | python | def get_operator_definitions(self):
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data | |coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L542-L558 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_operator_index | python | def get_operator_index(self, name):
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"] | |coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L561-L576 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_operator_statistic | python | def get_operator_statistic(self, name):
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"] | |coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L579-L598 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_operator_badge | python | def get_operator_badge(self, name):
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge | |coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L601-L621 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_definitions | python | def get_definitions(self):
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data | |coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L625-L642 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
@asyncio.coroutine
def get_object_index(self, key):
"""|coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index"""
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Auth.get_object_index | python | def get_object_index(self, key):
defns = yield from self.get_definitions()
for x in defns:
if key in x and "objectIndex" in defns[x]:
return defns[x]["objectIndex"]
return None | |coro|
Mainly for internal use with get_operator,
returns the "location" index for the key in the definitions
Returns
-------
str
the object's location index | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L645-L661 | null | class Auth:
"""Holds your authentication information. Used to retrieve Player objects
Parameters
----------
email : Optional[str]
Your Ubisoft email
password : Optional[str]
Your Ubisoft password
token : Optional[str]
Your Ubisoft auth token, either supply this OR email/password
appid : Optional[str]
Your Ubisoft appid, not required
cachetime : Optional[float]
How long players are cached for (in seconds)
max_connect_retries : Optional[int]
How many times the auth client will automatically try to reconnect, high numbers can get you temporarily banned
Attributes
----------
session
aiohttp client session
token : str
your token
appid : str
your appid
sessionid : str
the current connections session id (will change upon attaining new key)
key : str
your current auth key (will change every time you connect)
spaceids : dict
contains the spaceid for each platform
profileid : str
your profileid (corresponds to your appid)
userid : str
your userid (corresponds to your appid)
cachetime : float
the time players are cached for
cache : dict
the current player cache
"""
@staticmethod
def get_basic_token(email, password):
return base64.b64encode((email + ":" + password).encode("utf-8")).decode("utf-8")
def __init__(self, email=None, password=None, token=None, appid=None,
cachetime=120, max_connect_retries=1, session=None):
if session is not None:
self.session = session
else:
self.session = aiohttp.ClientSession()
self.max_connect_retries = max_connect_retries
if email is not None and password is not None:
self.token = Auth.get_basic_token(email, password)
elif token is not None:
self.token = token
else:
raise TypeError("Argument error, requires either email/password or token to be set, neither given")
if appid is not None:
self.appid = appid
else:
self.appid = "39baebad-39e5-4552-8c25-2c9b919064e2"
self.sessionid = ""
self.key = ""
self.uncertain_spaceid = ""
self.spaceids = {
"uplay": "5172a557-50b5-4665-b7db-e3f2e8c5041d",
"psn": "05bfb3f7-6c21-4c42-be1f-97a33fb5cf66",
"xbl": "98a601e5-ca91-4440-b1c5-753f601a2c90"
}
self.profileid = ""
self.userid = ""
self.genome = ""
self.cachetime = cachetime
self.cache={}
self._definitions = None
self._op_definitions = None
self._login_cooldown = 0
def __del__(self):
self.session.close()
@asyncio.coroutine
def connect(self):
"""|coro|
Connect to ubisoft, automatically called when needed"""
if time.time() < self._login_cooldown:
raise FailedToConnect("login on cooldown")
resp = yield from self.session.post("https://connect.ubi.com/ubiservices/v2/profiles/sessions", headers = {
"Content-Type": "application/json",
"Ubi-AppId": self.appid,
"Authorization": "Basic " + self.token
}, data=json.dumps({"rememberMe": True}))
data = yield from resp.json()
if "ticket" in data:
self.key = data.get("ticket")
self.sessionid = data.get("sessionId")
self.uncertain_spaceid = data.get("spaceId")
else:
raise FailedToConnect
@asyncio.coroutine
def get(self, *args, retries=0, referer=None, json=True, **kwargs):
if not self.key:
for i in range(self.max_connect_retries):
try:
yield from self.connect()
break
except FailedToConnect:
pass
else:
raise FailedToConnect
if "headers" not in kwargs: kwargs["headers"] = {}
kwargs["headers"]["Authorization"] = "Ubi_v1 t=" + self.key
kwargs["headers"]["Ubi-AppId"] = self.appid
kwargs["headers"]["Ubi-SessionId"] = self.sessionid
kwargs["headers"]["Connection"] = "keep-alive"
if referer is not None:
if isinstance(referer, Player):
referer = "https://game-rainbow6.ubi.com/en-gb/uplay/player-statistics/%s/multiplayer" % referer.id
kwargs["headers"]["Referer"] = str(referer)
resp = yield from self.session.get(*args, **kwargs)
if json:
try:
data = yield from resp.json()
except:
text = yield from resp.text()
message = text.split("h1>")
if len(message) > 1:
message = message[1][:-2]
code = 0
if "502" in message: code = 502
else:
message = text
raise InvalidRequest("Received a text response, expected JSON response. Message: %s" % message, code=code)
if "httpCode" in data:
if data["httpCode"] == 401:
if retries >= self.max_connect_retries:
# wait 30 seconds before sending another request
self._login_cooldown = time.time() + 60
raise FailedToConnect
yield from self.connect()
result = yield from self.get(*args, retries=retries+1, **kwargs)
return result
else:
msg = data.get("message", "")
if data["httpCode"] == 404: msg = "missing resource %s" % data.get("resource", args[0])
raise InvalidRequest("HTTP Code: %s, Message: %s" % (data["httpCode"], msg), code=data["httpCode"])
return data
else:
text = yield from resp.text()
return text
@asyncio.coroutine
def get_players(self, name=None, platform=None, uid=None):
"""|coro|
get a list of players matching the term on that platform,
exactly one of uid and name must be given, platform must be given,
this list almost always has only 1 element, so it's easier to use get_player
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
list[:class:`Player`]
list of found players"""
if name is None and uid is None:
raise TypeError("name and uid are both None, exactly one must be given")
if name is not None and uid is not None:
raise TypeError("cannot search by uid and name at the same time, please give one or the other")
if platform is None:
raise TypeError("platform cannot be None")
if "platform" not in self.cache: self.cache[platform] = {}
if name:
cache_key = "NAME:%s" % name
else:
cache_key = "UID:%s" % uid
if cache_key in self.cache[platform]:
if self.cachetime > 0 and self.cache[platform][cache_key][0] < time.time():
del self.cache[platform][cache_key]
else:
return self.cache[platform][cache_key][1]
if name:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/profiles?nameOnPlatform=%s&platformType=%s" % (parse.quote(name), parse.quote(platform)))
else:
data = yield from self.get("https://public-ubiservices.ubi.com/v2/users/%s/profiles?platformType=%s" % (uid, parse.quote(platform)))
if "profiles" in data:
results = [Player(self, x) for x in data["profiles"] if x.get("platformType", "") == platform]
if len(results) == 0: raise InvalidRequest("No results")
if self.cachetime != 0:
self.cache[platform][cache_key] = [time.time() + self.cachetime, results]
return results
else:
raise InvalidRequest("Missing key profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0]
@asyncio.coroutine
def get_operator_definitions(self):
"""|coro|
Retrieves a list of information about operators - their badge, unique statistic, etc.
Returns
-------
dict
operators"""
if self._op_definitions is not None:
return self._op_definitions
resp = yield from self.session.get("https://game-rainbow6.ubi.com/assets/data/operators.24b865895.json")
data = yield from resp.json()
self._op_definitions = data
return data
@asyncio.coroutine
def get_operator_index(self, name):
"""|coro|
Gets the operators index from the operator definitions dict
Returns
-------
str
the operator index"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
return opdefs[name]["index"]
@asyncio.coroutine
def get_operator_statistic(self, name):
"""|coro|
Gets the operator unique statistic from the operator definitions dict
Returns
-------
str
the name of the operator unique statistic"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
# some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason...
if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]:
return None
return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
@asyncio.coroutine
def get_operator_badge(self, name):
"""|coro|
Gets the operator badge URL
Returns
-------
str
the operators badge URL"""
opdefs = yield from self.get_operator_definitions()
name = name.lower()
if name not in opdefs:
return None
badge = opdefs[name]["badge"]
if not badge.startswith("http"):
badge = "https://game-rainbow6.ubi.com/" + badge
return badge
@asyncio.coroutine
def get_definitions(self):
"""|coro|
Retrieves the list of api definitions, downloading it from Ubisoft if it hasn't been fetched all ready
Primarily for internal use, but could contain useful information.
Returns
-------
dict
definitions"""
if self._definitions is not None:
return self._definitions
resp = yield from self.session.get("https://ubistatic-a.akamaihd.net/0058/prod/assets/data/statistics.definitions.eb165e13.json")
data = yield from resp.json()
self._definitions = data
return data
@asyncio.coroutine
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Rank.get_charm_url | python | def get_charm_url(self):
if self.rank_id <= 4: return self.RANK_CHARMS[0]
if self.rank_id <= 8: return self.RANK_CHARMS[1]
if self.rank_id <= 12: return self.RANK_CHARMS[2]
if self.rank_id <= 16: return self.RANK_CHARMS[3]
if self.rank_id <= 19: return self.RANK_CHARMS[4]
return self.RANK_CHARMS[5] | Get charm URL for the bracket this rank is in
Returns
-------
:class:`str`
the URL for the charm | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L808-L822 | null | class Rank:
"""Contains information about your rank
Attributes
----------
RANKS : list[str]
Names of the ranks
RANK_CHARMS : list[str]
URLs for the rank charms
UNRANKED : int
the unranked bracket id
COPPER : int
the copper bracket id
BRONZE : int
the bronze bracket id
SILVER : int
the silver bracket id
GOLD : int
the gold bracket id
PLATINUM : int
the platinum bracket id
DIAMOND : int
the diamond bracket id
max_mmr : int
the maximum MMR the player has achieved
mmr : int
the MMR the player currently has
wins : int
the number of wins this player has this season
losses : int
the number of losses this player has this season
abandons : int
the number of abandons this player has this season
rank_id : int
the id of the players current rank
rank : str
the name of the players current rank
max_rank : int
the id of the players max rank
next_rank_mmr : int
the mmr required for the player to achieve their next rank
season : int
the season this rank is for
region : str
the region this rank is for
skill_mean : float
the mean for this persons skill
skill_stdev : float
the standard deviation for this persons skill
"""
RANKS = ["Unranked",
"Copper 4", "Copper 3", "Copper 2", "Copper 1",
"Bronze 4", "Bronze 3", "Bronze 2", "Bronze 1",
"Silver 4", "Silver 3", "Silver 2", "Silver 1",
"Gold 4", "Gold 3", "Gold 2", "Gold 1",
"Platinum 3", "Platinum 2", "Platinum 1", "Diamond"]
RANK_CHARMS = [
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20copper%20charm.44c1ede2.png",
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20bronze%20charm.5edcf1c6.png",
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20silver%20charm.adde1d01.png",
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20gold%20charm.1667669d.png",
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20platinum%20charm.d7f950d5.png",
"https://ubistatic-a.akamaihd.net/0058/prod/assets/images/season02%20-%20diamond%20charm.e66cad88.png"
]
RANK_ICONS = [
"https://i.imgur.com/sB11BIz.png", # unranked
"https://i.imgur.com/ehILQ3i.jpg", # copper 4
"https://i.imgur.com/6CxJoMn.jpg", # copper 3
"https://i.imgur.com/eI11lah.jpg", # copper 2
"https://i.imgur.com/0J0jSWB.jpg", # copper 1
"https://i.imgur.com/42AC7RD.jpg", # bronze 4
"https://i.imgur.com/QD5LYD7.jpg", # bronze 3
"https://i.imgur.com/9AORiNm.jpg", # bronze 2
"https://i.imgur.com/hmPhPBj.jpg", # bronze 1
"https://i.imgur.com/D36ZfuR.jpg", # silver 4
"https://i.imgur.com/m8GToyF.jpg", # silver 3
"https://i.imgur.com/EswGcx1.jpg", # silver 2
"https://i.imgur.com/KmFpkNc.jpg", # silver 1
"https://i.imgur.com/6Qg6aaH.jpg", # gold 4
"https://i.imgur.com/B0s1o1h.jpg", # gold 3
"https://i.imgur.com/ELbGMc7.jpg", # gold 2
"https://i.imgur.com/ffDmiPk.jpg", # gold 1
"https://i.imgur.com/Sv3PQQE.jpg", # plat 3
"https://i.imgur.com/Uq3WhzZ.jpg", # plat 2
"https://i.imgur.com/xx03Pc5.jpg", # plat 1
"https://i.imgur.com/nODE0QI.jpg" # diamond
]
@staticmethod
def bracket_from_rank(rank_id):
if rank_id == 0: return 0
elif rank_id <= 4: return 1
elif rank_id <= 8: return 2
elif rank_id <= 12: return 3
elif rank_id <= 16: return 4
elif rank_id <= 19: return 5
else: return 6
@staticmethod
def bracket_name(bracket):
if bracket == 0: return "Unranked"
elif bracket == 1: return "Copper"
elif bracket == 2: return "Bronze"
elif bracket == 3: return "Silver"
elif bracket == 4: return "Gold"
elif bracket == 5: return "Platinum"
else: return "Diamond"
UNRANKED = 0
COPPER = 1
BRONZE = 2
SILVER = 3
GOLD = 4
PLATINUM = 5
DIAMOND = 6
def __init__(self, data):
self.max_mmr = data.get("max_mmr")
self.mmr = data.get("mmr")
self.wins = data.get("wins")
self.losses = data.get("losses")
self.rank_id = data.get("rank", 0)
self.rank = Rank.RANKS[self.rank_id]
self.max_rank = data.get("max_rank")
self.next_rank_mmr = data.get("next_rank_mmr")
self.season = data.get("season")
self.region = data.get("region")
self.abandons = data.get("abandons")
self.skill_mean = data.get("skill_mean")
self.skill_stdev = data.get("skill_stdev")
def get_icon_url(self):
"""Get URL for this rank's icon
Returns
-------
:class:`str`
the URL for the rank icon"""
return self.RANK_ICONS[self.rank_id]
def get_bracket(self):
"""Get rank bracket
Returns
-------
:class:`int`
the id for the rank bracket this rank is in
"""
return Rank.bracket_from_rank(self.rank_id)
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_level | python | def load_level(self):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data)) | |coro|
Load the players XP and level | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1093-L1103 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_rank | python | def load_rank(self, region, season=-1):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data)) | |coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1114-L1136 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.get_rank | python | def get_rank(self, region, season=-1):
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result | |coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1139-L1160 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_all_operators | python | def load_all_operators(self):
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators | |coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1163-L1195 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.get_all_operators | python | def get_all_operators(self):
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result | |coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1198-L1212 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_operator | python | def load_operator(self, operator):
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper | |coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1215-L1256 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.get_operator | python | def get_operator(self, operator):
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result | |coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1259-L1277 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_weapons | python | def load_weapons(self):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons | |coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1280-L1310 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_gamemodes | python | def load_gamemodes(self):
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes | |coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`) | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1327-L1363 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_general | python | def load_general(self):
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills") | |coro|
Loads the players general stats | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1380-L1418 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_queues | python | def load_queues(self):
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0) | |coro|
Loads the players game queues | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1430-L1451 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
def load_terrohunt(self):
"""|coro|
Loads the player's general stats for terrorist hunt"""
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
billy-yoyo/RainbowSixSiege-Python-API | r6sapi/r6sapi.py | Player.load_terrohunt | python | def load_terrohunt(self):
stats = yield from self._fetch_statistics("generalpve_dbnoassists", "generalpve_death", "generalpve_revive",
"generalpve_matchwon", "generalpve_suicide", "generalpve_servershacked",
"generalpve_serverdefender", "generalpve_barricadedeployed", "generalpve_reinforcementdeploy",
"generalpve_kills", "generalpve_hostagedefense", "generalpve_bulletfired",
"generalpve_matchlost", "generalpve_killassists", "generalpve_totalxp",
"generalpve_hostagerescue", "generalpve_penetrationkills", "generalpve_meleekills",
"generalpve_rappelbreach", "generalpve_distancetravelled", "generalpve_matchplayed",
"generalpve_serveraggression", "generalpve_timeplayed", "generalpve_revivedenied",
"generalpve_dbno", "generalpve_bullethit", "generalpve_blindkills", "generalpve_headshot",
"generalpve_gadgetdestroy", "generalpve_accuracy")
self.terrorist_hunt = GameQueue("terrohunt")
statname = "generalpve_"
self.terrorist_hunt.deaths = stats.get(statname + "death", 0)
self.terrorist_hunt.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.terrorist_hunt.matches_won = stats.get(statname + "matchwon", 0)
self.terrorist_hunt.bullets_hit = stats.get(statname + "bullethit", 0)
self.terrorist_hunt.melee_kills = stats.get(statname + "meleekills", 0)
self.terrorist_hunt.bullets_fired = stats.get(statname + "bulletfired", 0)
self.terrorist_hunt.matches_played = stats.get(statname + "matchplayed", 0)
self.terrorist_hunt.kill_assists = stats.get(statname + "killassists", 0)
self.terrorist_hunt.time_played = stats.get(statname + "timeplayed", 0)
self.terrorist_hunt.revives = stats.get(statname + "revive", 0)
self.terrorist_hunt.kills = stats.get(statname + "kills", 0)
self.terrorist_hunt.headshots = stats.get(statname + "headshot", 0)
self.terrorist_hunt.matches_lost = stats.get(statname + "matchlost", 0)
self.terrorist_hunt.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.terrorist_hunt.suicides = stats.get(statname + "suicide", 0)
self.terrorist_hunt.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.terrorist_hunt.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.terrorist_hunt.total_xp = stats.get(statname + "totalxp", 0)
self.terrorist_hunt.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.terrorist_hunt.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.terrorist_hunt.revives_denied = stats.get(statname + "revivedenied", 0)
self.terrorist_hunt.dbnos = stats.get(statname + "dbno", 0)
self.terrorist_hunt.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.terrorist_hunt.areas_secured = stats.get(statname + "servershacked", 0)
self.terrorist_hunt.areas_defended = stats.get(statname + "serverdefender", 0)
self.terrorist_hunt.areas_contested = stats.get(statname + "serveraggression", 0)
self.terrorist_hunt.hostages_rescued = stats.get(statname + "hostagerescue", 0)
self.terrorist_hunt.hostages_defended = stats.get(statname + "hostagedefense", 0)
self.terrorist_hunt.blind_kills = stats.get(statname + "blindkills", 0)
return self.terrorist_hunt | |coro|
Loads the player's general stats for terrorist hunt | train | https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1463-L1511 | null | class Player:
"""Contains information about a specific player
Attributes
----------
auth : :class:`Auth`
the auth object used to find this player
id : str
the players profile id
userid : str
the players user id
platform : str
the platform this player is on
platform_url : str
the URL name for this platform (used internally)
id_on_platform : str
the players ID on the platform
name : str
the players name on the platform
url : str
a link to the players profile
icon_url : str
a link to the players avatar
xp : int
the amount of xp the player has, must call check_level or load_level first
level : int
the level of the player, must call check_level or load_level first
ranks : dict
dict containing already found ranks ("region_name:season": :class:`Rank`)
operators : dict
dict containing already found operators (operator_name: :class:`Operator`)
gamemodes : dict
dict containing already found gamemodes (gamemode_id: :class:`Gamemode`)
weapons : dict
dict containing already found weapons (weapon_id: :class:`Weapon`)
casual : :class:`GameQueue`
stats for the casual queue, must call load_queues or check_queues first
ranked : :class:`GameQueue`
stats for the ranked queue, must call load_queues or check_queues first
deaths : int
the number of deaths the player has (must call load_general or check_general first)
kills : int
the number of kills the player has (must call load_general or check_general first)
kill_assists : int
the number of kill assists the player has (must call load_general or check_general first)
penetration_kills : int
the number of penetration kills the player has (must call load_general or check_general first)
melee_kills : int
the number of melee kills the player has (must call load_general or check_general first)
revives : int
the number of revives the player has (must call load_general or check_general first)
matches_won : int
the number of matches the player has won (must call load_general or check_general first)
matches_lost : int
the number of matches the player has lost (must call load_general or check_general first)
matches_played : int
the number of matches the player has played (must call load_general or check_general first)
time_played : int
the amount of time in seconds the player has played for (must call load_general or check_general first)
bullets_fired : int
the amount of bullets the player has fired (must call load_general or check_general first)
bullets_hit : int
the amount of bullets the player has hit (must call load_general or check_general first)
headshots : int
the amount of headshots the player has hit (must call load_general or check_general first)
terrorist_hunt : :class:`GameQueue`
contains all of the above state (from deaths to headshots) inside a gamequeue object.
"""
def __init__(self, auth, data):
self.auth = auth
self.id = data.get("profileId")
self.userid = data.get("userId")
self.platform = data.get("platformType")
self.platform_url = PlatformURLNames[self.platform]
self.id_on_platform = data.get("idOnPlatform")
self.name = data.get("nameOnPlatform")
self.url = "https://game-rainbow6.ubi.com/en-us/%s/player-statistics/%s/multiplayer" % (self.platform, self.id)
self.icon_url = "https://ubisoft-avatars.akamaized.net/%s/default_146_146.png" % (self.id)
self.ranks = {}
self.operators = {}
self.gamemodes = {}
self.weapons = []
self.casual = None
self.ranked = None
self.terrorist_hunt = None
@property
def spaceid(self):
return self.auth.spaceids[self.platform]
@asyncio.coroutine
def _fetch_statistics(self, *statsitics):
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, ",".join(statsitics)))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
stats = {}
for x in data:
statistic = x.split(":")[0]
if statistic in statsitics:
stats[statistic] = data[x]
return stats
@asyncio.coroutine
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
@asyncio.coroutine
def check_level(self):
"""|coro|
Check the players XP and level, only loading it if it hasn't been loaded yet"""
if not hasattr(self, "level"):
yield from self.load_level()
@asyncio.coroutine
def load_rank(self, region, season=-1):
"""|coro|
Loads the players rank for this region and season
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s®ion_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season))
if "players" in data and self.id in data["players"]:
regionkey = "%s:%s" % (region, season)
self.ranks[regionkey] = Rank(data["players"][self.id])
return self.ranks[regionkey]
else:
raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
@asyncio.coroutine
def get_rank(self, region, season=-1):
"""|coro|
Checks the players rank for this region, only loading it if it hasn't already been found
Parameters
----------
region : str
the name of the region you want to get the rank for
season : Optional[int]
the season you want to get the rank for (defaults to -1, latest season)
Returns
-------
:class:`Rank`
the players rank for this region and season"""
cache_key = "%s:%s" % (region, season)
if cache_key in self.ranks:
return self.ranks[cache_key]
result = yield from self.load_rank(region, season)
return result
@asyncio.coroutine
def load_all_operators(self):
"""|coro|
Loads the player stats for all operators
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
statistics = "operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno"
for operator in OperatorStatisticNames:
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
statistics += "," + operator_key
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=%s" % (self.spaceid, self.platform_url, self.id, statistics))
if "results" not in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
for operator in OperatorStatisticNames:
location = yield from self.auth.get_operator_index(operator.lower())
op_data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key:
op_data["__statistic_name"] = operator_key.split("_")[1]
self.operators[operator.lower()] = Operator(operator.lower(), op_data)
return self.operators
@asyncio.coroutine
def get_all_operators(self):
"""|coro|
Checks the player stats for all operators, loading them all again if any aren't found
This is significantly more efficient than calling get_operator for every operator name.
Returns
-------
dict[:class:`Operator`]
the dictionary of all operators found"""
if len(self.operators) >= len(OperatorStatisticNames):
return self.operators
result = yield from self.load_all_operators()
return result
@asyncio.coroutine
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper
@asyncio.coroutine
def get_operator(self, operator):
"""|coro|
Checks the players stats for this operator, only loading them if they haven't already been found
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
if operator in self.operators:
return self.operators[operator]
result = yield from self.load_operator(operator)
return result
@asyncio.coroutine
def load_weapons(self):
"""|coro|
Load the players weapon stats
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=weapontypepvp_kills,weapontypepvp_headshot,weapontypepvp_bulletfired,weapontypepvp_bullethit" % (self.spaceid, self.platform_url, self.id))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing key results in returned JSON object %s" % str(data))
data = data["results"][self.id]
self.weapons = [Weapon(i) for i in range(7)]
for x in data:
spl = x.split(":")
category = spl[0].split("_")[1]
try:
weapontype = int(spl[1]) - 1
weapon = self.weapons[weapontype]
if category == "kills": weapon.kills = data[x]
elif category == "headshot": weapon.headshots = data[x]
elif category == "bulletfired": weapon.shots = data[x]
elif category == "bullethit": weapon.hits = data[x]
except (ValueError, TypeError, IndexError):
pass
return self.weapons
@asyncio.coroutine
def check_weapons(self):
"""|coro|
Check the players weapon stats, only loading them if they haven't already been found
Returns
-------
list[:class:`Weapon`]
list of all the weapon objects found"""
if len(self.weapons) == 0:
yield from self.load_weapons()
return self.weapons
@asyncio.coroutine
def load_gamemodes(self):
"""|coro|
Loads the players gamemode stats
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
stats = yield from self._fetch_statistics("secureareapvp_matchwon", "secureareapvp_matchlost", "secureareapvp_matchplayed",
"secureareapvp_bestscore", "rescuehostagepvp_matchwon", "rescuehostagepvp_matchlost",
"rescuehostagepvp_matchplayed", "rescuehostagepvp_bestscore", "plantbombpvp_matchwon",
"plantbombpvp_matchlost", "plantbombpvp_matchplayed", "plantbombpvp_bestscore",
"generalpvp_servershacked", "generalpvp_serverdefender", "generalpvp_serveraggression",
"generalpvp_hostagerescue", "generalpvp_hostagedefense")
self.gamemodes = {x: Gamemode(x) for x in GamemodeNames}
for name in self.gamemodes:
statname, gamemode = name + "pvp_", self.gamemodes[name]
gamemode.best_score = stats.get(statname + "bestscore", 0)
gamemode.lost = stats.get(statname + "matchlost", 0)
gamemode.won = stats.get(statname + "matchwon", 0)
gamemode.played = stats.get(statname + "matchplayed", 0)
if name == "securearea":
gamemode.areas_secured = stats.get("generalpvp_servershacked", 0)
gamemode.areas_defended = stats.get("generalpvp_serverdefender", 0)
gamemode.areas_contested = stats.get("generalpvp_serveraggression", 0)
elif name == "rescuehostage":
gamemode.hostages_rescued = stats.get("generalpvp_hostagerescue", 0)
gamemode.hostages_defended = stats.get("generalpvp_hostagedefense", 0)
return self.gamemodes
@asyncio.coroutine
def check_gamemodes(self):
"""|coro|
Checks the players gamemode stats, only loading them if they haven't already been found
Returns
-------
dict
dict of all the gamemodes found (gamemode_name: :class:`Gamemode`)"""
if len(self.gamemodes) == 0:
yield from self.load_gamemodes()
return self.gamemodes
@asyncio.coroutine
def load_general(self):
"""|coro|
Loads the players general stats"""
stats = yield from self._fetch_statistics("generalpvp_timeplayed", "generalpvp_matchplayed", "generalpvp_matchwon",
"generalpvp_matchlost", "generalpvp_kills", "generalpvp_death",
"generalpvp_bullethit", "generalpvp_bulletfired", "generalpvp_killassists",
"generalpvp_revive", "generalpvp_headshot", "generalpvp_penetrationkills",
"generalpvp_meleekills", "generalpvp_dbnoassists", "generalpvp_suicide",
"generalpvp_barricadedeployed", "generalpvp_reinforcementdeploy", "generalpvp_totalxp",
"generalpvp_rappelbreach", "generalpvp_distancetravelled", "generalpvp_revivedenied",
"generalpvp_dbno", "generalpvp_gadgetdestroy", "generalpvp_blindkills")
statname = "generalpvp_"
self.deaths = stats.get(statname + "death", 0)
self.penetration_kills = stats.get(statname + "penetrationkills", 0)
self.matches_won = stats.get(statname + "matchwon", 0)
self.bullets_hit = stats.get(statname + "bullethit", 0)
self.melee_kills = stats.get(statname + "meleekills", 0)
self.bullets_fired = stats.get(statname + "bulletfired", 0)
self.matches_played = stats.get(statname + "matchplayed", 0)
self.kill_assists = stats.get(statname + "killassists", 0)
self.time_played = stats.get(statname + "timeplayed", 0)
self.revives = stats.get(statname + "revive", 0)
self.kills = stats.get(statname + "kills", 0)
self.headshots = stats.get(statname + "headshot", 0)
self.matches_lost = stats.get(statname + "matchlost", 0)
self.dbno_assists = stats.get(statname + "dbnoassists", 0)
self.suicides = stats.get(statname + "suicide", 0)
self.barricades_deployed = stats.get(statname + "barricadedeployed", 0)
self.reinforcements_deployed = stats.get(statname + "reinforcementdeploy", 0)
self.total_xp = stats.get(statname + "totalxp", 0)
self.rappel_breaches = stats.get(statname + "rappelbreach", 0)
self.distance_travelled = stats.get(statname + "distancetravelled", 0)
self.revives_denied = stats.get(statname + "revivedenied", 0)
self.dbnos = stats.get(statname + "dbno", 0)
self.gadgets_destroyed = stats.get(statname + "gadgetdestroy", 0)
self.blind_kills = stats.get(statname + "blindkills")
@asyncio.coroutine
def check_general(self):
"""|coro|
Checks the players general stats, only loading them if they haven't already been found"""
if not hasattr(self, "kills"):
yield from self.load_general()
@asyncio.coroutine
def load_queues(self):
"""|coro|
Loads the players game queues"""
stats = yield from self._fetch_statistics("casualpvp_matchwon", "casualpvp_matchlost", "casualpvp_timeplayed",
"casualpvp_matchplayed", "casualpvp_kills", "casualpvp_death",
"rankedpvp_matchwon", "rankedpvp_matchlost", "rankedpvp_timeplayed",
"rankedpvp_matchplayed", "rankedpvp_kills", "rankedpvp_death")
self.ranked = GameQueue("ranked")
self.casual = GameQueue("casual")
for gq in (self.ranked, self.casual):
statname = gq.name + "pvp_"
gq.won = stats.get(statname + "matchwon", 0)
gq.lost = stats.get(statname + "matchlost", 0)
gq.time_played = stats.get(statname + "timeplayed", 0)
gq.played = stats.get(statname + "matchplayed", 0)
gq.kills = stats.get(statname + "kills", 0)
gq.deaths = stats.get(statname + "death", 0)
@asyncio.coroutine
def check_queues(self):
"""|coro|
Checks the players game queues, only loading them if they haven't already been found"""
if self.casual is None:
yield from self.load_queues()
@asyncio.coroutine
@asyncio.coroutine
def check_terrohunt(self):
"""|coro|
Checks the players general stats for terrorist hunt, only loading them if they haven't been loaded already"""
if self.terrorist_hunt is None:
yield from self.load_terrohunt()
return self.terrorist_hunt
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_default_cache_dir | python | def _get_default_cache_dir(self):
return os.path.join(os.path.dirname(__file__), self._DATA_DIR) | Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L66-L75 | null | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_default_cache_file_path | python | def _get_default_cache_file_path(self):
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path | Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L77-L94 | [
"def _get_default_cache_dir(self):\n \"\"\"\n Returns default cache directory (data directory)\n\n :raises: CacheFileError when default cached file does not is exist\n :return: path to default cache directory\n :rtype: str\n \"\"\"\n\n return os.path.join(os.path.dirname(__file__), self._DATA_DIR)\n"
] | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_writable_cache_dir | python | def _get_writable_cache_dir(self):
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.") | Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L96-L122 | null | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_cache_file_path | python | def _get_cache_file_path(self, cache_dir=None):
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME) | Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L124-L142 | [
"def _get_writable_cache_dir(self):\n \"\"\"\n Get writable cache directory with fallback to user's cache directory\n and global temp directory\n\n :raises: CacheFileError when cached directory is not writable for user\n :return: path to cache directory\n :rtype: str\n \"\"\"\n dir_path_data = self._get_default_cache_dir()\n\n if os.access(dir_path_data, os.W_OK):\n self._default_cache_file = True\n return dir_path_data\n\n dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)\n if not os.path.exists(dir_path_user):\n os.makedirs(dir_path_user, exist_ok=True)\n\n if os.access(dir_path_user, os.W_OK):\n return dir_path_user\n\n dir_path_temp = tempfile.gettempdir()\n if os.access(dir_path_temp, os.W_OK):\n return dir_path_temp\n\n raise CacheFileError(\"Cache directories are not writable.\")\n"
] | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._download_tlds_list | python | def _download_tlds_list(self):
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True | Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L144-L188 | [
"def _get_cache_file_path(self, cache_dir=None):\n \"\"\"\n Get path for cache file\n\n :param str cache_dir: base path for TLD cache, defaults to data dir\n :raises: CacheFileError when cached directory is not writable for user\n :return: Full path to cached file with TLDs\n :rtype: str\n \"\"\"\n if cache_dir is None:\n # Tries to get writable cache dir with fallback to users data dir\n # and temp directory\n cache_dir = self._get_writable_cache_dir()\n else:\n if not os.access(cache_dir, os.W_OK):\n raise CacheFileError(\"None of cache directories is writable.\")\n\n # get directory for cached file\n return os.path.join(cache_dir, self._CACHE_FILE_NAME)\n"
] | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._load_cached_tlds | python | def _load_cached_tlds(self):
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds | Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L190-L220 | null | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _get_last_cachefile_modification(self):
"""
Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None
"""
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime)
|
lipoja/URLExtract | urlextract/cachefile.py | CacheFile._get_last_cachefile_modification | python | def _get_last_cachefile_modification(self):
try:
mtime = os.path.getmtime(self._tld_list_path)
except OSError:
return None
return datetime.fromtimestamp(mtime) | Get last modification of cache file with TLDs.
:return: Date and time of last modification or
None when file does not exist
:rtype: datetime|None | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L222-L236 | null | class CacheFile:
"""
Class for working with cached TLDs in file.
"""
# file name of cached list of TLDs downloaded from IANA
_CACHE_FILE_NAME = 'tlds-alpha-by-domain.txt'
_DATA_DIR = 'data'
# name used in appdir
_URLEXTRACT_NAME = "urlextract"
def __init__(self, cache_dir=None):
"""
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached file is not readable for user
"""
self._logger = logging.getLogger(self._URLEXTRACT_NAME)
# True if user specified path to cache directory
self._user_defined_cache = bool(cache_dir)
self._default_cache_file = False
# full path for cached file with list of TLDs
self._tld_list_path = self._get_cache_file_path(cache_dir)
if not os.access(self._tld_list_path, os.F_OK):
self._logger.info(
"Cache file not found in '%s'. "
"Use URLExtract.update() to download newest version.",
self._tld_list_path)
self._logger.info(
"Using default list of TLDs provided in urlextract package."
)
self._tld_list_path = self._get_default_cache_file_path()
self._default_cache_file = True
def _get_default_cache_dir(self):
"""
Returns default cache directory (data directory)
:raises: CacheFileError when default cached file does not is exist
:return: path to default cache directory
:rtype: str
"""
return os.path.join(os.path.dirname(__file__), self._DATA_DIR)
def _get_default_cache_file_path(self):
"""
Returns default cache file path
:return: default cache file path (to data directory)
:rtype: str
"""
default_list_path = os.path.join(
self._get_default_cache_dir(), self._CACHE_FILE_NAME)
if not os.access(default_list_path, os.F_OK):
raise CacheFileError(
"Default cache file does not exist "
"'{}'!".format(default_list_path)
)
return default_list_path
def _get_writable_cache_dir(self):
"""
Get writable cache directory with fallback to user's cache directory
and global temp directory
:raises: CacheFileError when cached directory is not writable for user
:return: path to cache directory
:rtype: str
"""
dir_path_data = self._get_default_cache_dir()
if os.access(dir_path_data, os.W_OK):
self._default_cache_file = True
return dir_path_data
dir_path_user = user_cache_dir(self._URLEXTRACT_NAME)
if not os.path.exists(dir_path_user):
os.makedirs(dir_path_user, exist_ok=True)
if os.access(dir_path_user, os.W_OK):
return dir_path_user
dir_path_temp = tempfile.gettempdir()
if os.access(dir_path_temp, os.W_OK):
return dir_path_temp
raise CacheFileError("Cache directories are not writable.")
def _get_cache_file_path(self, cache_dir=None):
"""
Get path for cache file
:param str cache_dir: base path for TLD cache, defaults to data dir
:raises: CacheFileError when cached directory is not writable for user
:return: Full path to cached file with TLDs
:rtype: str
"""
if cache_dir is None:
# Tries to get writable cache dir with fallback to users data dir
# and temp directory
cache_dir = self._get_writable_cache_dir()
else:
if not os.access(cache_dir, os.W_OK):
raise CacheFileError("None of cache directories is writable.")
# get directory for cached file
return os.path.join(cache_dir, self._CACHE_FILE_NAME)
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True
def _load_cached_tlds(self):
"""
Loads TLDs from cached file to set.
:return: Set of current TLDs
:rtype: set
"""
# check if cached file is readable
if not os.access(self._tld_list_path, os.R_OK):
self._logger.error("Cached file is not readable for current "
"user. ({})".format(self._tld_list_path))
raise CacheFileError(
"Cached file is not readable for current user."
)
set_of_tlds = set()
with open(self._tld_list_path, 'r') as f_cache_tld:
for line in f_cache_tld:
tld = line.strip().lower()
# skip empty lines
if not tld:
continue
# skip comments
if tld[0] == '#':
continue
set_of_tlds.add("." + tld)
set_of_tlds.add("." + idna.decode(tld))
return set_of_tlds
|
lipoja/URLExtract | urlextract/urlextract_core.py | _urlextract_cli | python | def _urlextract_cli():
import argparse
def get_args():
"""
Parse programs arguments
"""
parser = argparse.ArgumentParser(
description='urlextract - prints out all URLs that were '
'found in input file or stdin based on locating '
'their TLDs')
ver = URLExtract.get_version()
parser.add_argument("-v", "--version", action="version",
version='%(prog)s - version {}'.format(ver))
parser.add_argument(
"-u", "--unique", dest='unique', action='store_true',
help='print out only unique URLs found in file.')
parser.add_argument(
'input_file', nargs='?', metavar='<input_file>',
type=argparse.FileType(encoding="UTF-8"), default=sys.stdin,
help='input text file with URLs to extract. [UTF-8]')
parsed_args = parser.parse_args()
return parsed_args
args = get_args()
logging.basicConfig(
level=logging.INFO, stream=sys.stderr,
format='%(asctime)s - %(levelname)s (%(name)s): %(message)s')
logger = logging.getLogger('urlextract')
try:
urlextract = URLExtract()
urlextract.update_when_older(30)
content = args.input_file.read()
for url in urlextract.find_urls(content, args.unique):
print(url)
except CacheFileError as e:
logger.error(str(e))
sys.exit(-1)
finally:
args.input_file.close() | urlextract - command line program that will print all URLs to stdout
Usage: urlextract [input_file] [-u] [-v]
input_file - text file with URLs to extract | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L590-L640 | [
"def get_args():\n \"\"\"\n Parse programs arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description='urlextract - prints out all URLs that were '\n 'found in input file or stdin based on locating '\n 'their TLDs')\n\n ver = URLExtract.get_version()\n parser.add_argument(\"-v\", \"--version\", action=\"version\",\n version='%(prog)s - version {}'.format(ver))\n\n parser.add_argument(\n \"-u\", \"--unique\", dest='unique', action='store_true',\n help='print out only unique URLs found in file.')\n\n parser.add_argument(\n 'input_file', nargs='?', metavar='<input_file>',\n type=argparse.FileType(encoding=\"UTF-8\"), default=sys.stdin,\n help='input text file with URLs to extract. [UTF-8]')\n\n parsed_args = parser.parse_args()\n return parsed_args\n",
"def update_when_older(self, days):\n \"\"\"\n Update TLD list cache file if the list is older than\n number of days given in parameter `days` or if does not exist.\n\n :param int days: number of days from last change\n :return: True if update was successful, False otherwise\n :rtype: bool\n \"\"\"\n\n last_cache = self._get_last_cachefile_modification()\n if last_cache is None:\n return self.update()\n\n time_to_update = last_cache + timedelta(days=days)\n\n if datetime.now() >= time_to_update:\n return self.update()\n\n return True\n",
"def find_urls(self, text, only_unique=False):\n \"\"\"\n Find all URLs in given text.\n\n :param str text: text where we want to find URLs\n :param bool only_unique: return only unique URLs\n :return: list of URLs found in text\n :rtype: list\n \"\"\"\n urls = self.gen_urls(text)\n urls = OrderedDict.fromkeys(urls) if only_unique else urls\n return list(urls)\n"
] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
urlextract_core.py - file with definition of URLExtract class and urlextract cli
.. Created on 2016-07-29
.. Licence MIT
.. codeauthor:: Jan Lipovský <janlipovsky@gmail.com>, janlipovsky.cz
.. contributors: https://github.com/lipoja/URLExtract/graphs/contributors
"""
import logging
import re
import string
import sys
import warnings
from collections import OrderedDict
from datetime import datetime, timedelta
import uritools
from urlextract.cachefile import CacheFileError, CacheFile
# version of URLExtract (do not forget to change it in setup.py as well)
__version__ = '0.10'
class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
if __name__ == '__main__':
_urlextract_cli()
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._get_after_tld_chars | python | def _get_after_tld_chars(self):
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars | Initialize after tld characters | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L91-L103 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._reload_tlds_from_file | python | def _reload_tlds_from_file(self):
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped)) | Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L105-L113 | [
"def _load_cached_tlds(self):\n \"\"\"\n Loads TLDs from cached file to set.\n\n :return: Set of current TLDs\n :rtype: set\n \"\"\"\n\n # check if cached file is readable\n if not os.access(self._tld_list_path, os.R_OK):\n self._logger.error(\"Cached file is not readable for current \"\n \"user. ({})\".format(self._tld_list_path))\n raise CacheFileError(\n \"Cached file is not readable for current user.\"\n )\n\n set_of_tlds = set()\n with open(self._tld_list_path, 'r') as f_cache_tld:\n for line in f_cache_tld:\n tld = line.strip().lower()\n # skip empty lines\n if not tld:\n continue\n # skip comments\n if tld[0] == '#':\n continue\n\n set_of_tlds.add(\".\" + tld)\n set_of_tlds.add(\".\" + idna.decode(tld))\n\n return set_of_tlds\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.update_when_older | python | def update_when_older(self, days):
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True | Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L129-L148 | [
"def _get_last_cachefile_modification(self):\n \"\"\"\n Get last modification of cache file with TLDs.\n\n :return: Date and time of last modification or\n None when file does not exist\n :rtype: datetime|None\n \"\"\"\n\n try:\n mtime = os.path.getmtime(self._tld_list_path)\n except OSError:\n return None\n\n return datetime.fromtimestamp(mtime)\n",
"def update(self):\n \"\"\"\n Update TLD list cache file.\n\n :return: True if update was successful False otherwise\n :rtype: bool\n \"\"\"\n if not self._download_tlds_list():\n return False\n\n self._reload_tlds_from_file()\n\n return True\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.set_stop_chars | python | def set_stop_chars(self, stop_chars):
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars | Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L196-L212 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.set_stop_chars_left | python | def set_stop_chars_left(self, stop_chars):
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right | Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L223-L236 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.add_enclosure | python | def add_enclosure(self, left_char, right_char):
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars() | Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")" | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L272-L286 | [
"def _get_after_tld_chars(self):\n \"\"\"\n Initialize after tld characters\n \"\"\"\n after_tld_chars = set(string.whitespace)\n after_tld_chars |= {'/', '\\\"', '\\'', '<', '>', '?', ':', '.', ','}\n # get left enclosure characters\n _, right_enclosure = zip(*self._enclosure)\n # add right enclosure characters to be valid after TLD\n # for correct parsing of URL e.g. (example.com)\n after_tld_chars |= set(right_enclosure)\n\n return after_tld_chars\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.remove_enclosure | python | def remove_enclosure(self, left_char, right_char):
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars() | Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")" | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L288-L303 | [
"def _get_after_tld_chars(self):\n \"\"\"\n Initialize after tld characters\n \"\"\"\n after_tld_chars = set(string.whitespace)\n after_tld_chars |= {'/', '\\\"', '\\'', '<', '>', '?', ':', '.', ','}\n # get left enclosure characters\n _, right_enclosure = zip(*self._enclosure)\n # add right enclosure characters to be valid after TLD\n # for correct parsing of URL e.g. (example.com)\n after_tld_chars |= set(right_enclosure)\n\n return after_tld_chars\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._complete_url | python | def _complete_url(self, text, tld_pos, tld):
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url | Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L305-L354 | [
"def _is_domain_valid(self, url, tld):\n \"\"\"\n Checks if given URL has valid domain name (ignores subdomains)\n\n :param str url: complete URL that we want to check\n :param str tld: TLD that should be found at the end of URL (hostname)\n :return: True if URL is valid, False otherwise\n :rtype: bool\n\n >>> extractor = URLExtract()\n >>> extractor._is_domain_valid(\"janlipovsky.cz\", \".cz\")\n True\n\n >>> extractor._is_domain_valid(\"https://janlipovsky.cz\", \".cz\")\n True\n\n >>> extractor._is_domain_valid(\"invalid.cz.\", \".cz\")\n False\n\n >>> extractor._is_domain_valid(\"invalid.cz,\", \".cz\")\n False\n\n >>> extractor._is_domain_valid(\"in.v_alid.cz\", \".cz\")\n False\n\n >>> extractor._is_domain_valid(\"-is.valid.cz\", \".cz\")\n True\n\n >>> extractor._is_domain_valid(\"not.valid-.cz\", \".cz\")\n False\n\n >>> extractor._is_domain_valid(\"http://blog/media/path.io.jpg\", \".cz\")\n False\n \"\"\"\n\n if not url:\n return False\n\n scheme_pos = url.find('://')\n if scheme_pos == -1:\n url = 'http://' + url\n\n url_parts = uritools.urisplit(url)\n # <scheme>://<authority>/<path>?<query>#<fragment>\n\n try:\n host = url_parts.gethost()\n except ValueError:\n self._logger.info(\n \"Invalid host '%s'. \"\n \"If the host is valid report a bug.\", url\n )\n return False\n\n if not host:\n return False\n\n host_parts = host.split('.')\n if len(host_parts) <= 1:\n return False\n\n host_tld = '.'+host_parts[-1]\n if host_tld != tld:\n return False\n\n top = host_parts[-2]\n\n if self._hostname_re.match(top) is None:\n return False\n\n return True\n",
"def _remove_enclosure_from_url(self, text_url, tld_pos, tld):\n \"\"\"\n Removes enclosure characters from URL given in text_url.\n For example: (example.com) -> example.com\n\n :param str text_url: text with URL that we want to extract from\n enclosure of two characters\n :param int tld_pos: position of TLD in text_url\n :param str tld: matched TLD which should be in text\n :return: URL that has removed enclosure\n :rtype: str\n \"\"\"\n\n enclosure_map = {\n left_char: right_char\n for left_char, right_char in self._enclosure\n }\n # get position of most right left_char of enclosure pairs\n left_pos = max([\n text_url.rfind(left_char, 0, tld_pos)\n for left_char in enclosure_map.keys()\n ])\n left_char = text_url[left_pos] if left_pos >= 0 else ''\n right_char = enclosure_map.get(left_char, '')\n right_pos = text_url.rfind(right_char) if right_char else len(text_url)\n if right_pos < 0 or right_pos < tld_pos:\n right_pos = len(text_url)\n\n new_url = text_url[left_pos + 1:right_pos]\n tld_pos -= left_pos + 1\n\n # Get valid domain when we have input as: example.com)/path\n # we assume that if there is enclosure character after TLD it is\n # the end URL it self therefore we remove the rest\n after_tld_pos = tld_pos + len(tld)\n if after_tld_pos < len(new_url):\n if new_url[after_tld_pos] in enclosure_map.values():\n new_url_tmp = new_url[:after_tld_pos]\n return self._remove_enclosure_from_url(\n new_url_tmp, tld_pos, tld)\n\n return new_url\n",
"def _split_markdown(text_url, tld_pos):\n \"\"\"\n Split markdown URL. There is an issue wen Markdown URL is found.\n Parsing of the URL does not stop on right place so wrongly found URL\n has to be split.\n\n :param str text_url: URL that we want to extract from enclosure\n :param int tld_pos: position of TLD\n :return: URL that has removed enclosure\n :rtype: str\n \"\"\"\n # Markdown url can looks like:\n # [http://example.com/](http://example.com/status/210)\n\n left_bracket_pos = text_url.find('[')\n # subtract 3 because URL is never shorter than 3 characters\n if left_bracket_pos > tld_pos-3:\n return text_url\n\n right_bracket_pos = text_url.find(')')\n if right_bracket_pos < tld_pos:\n return text_url\n\n middle_pos = text_url.rfind(\"](\")\n if middle_pos > tld_pos:\n return text_url[left_bracket_pos+1:middle_pos]\n return text_url\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._validate_tld_match | python | def _validate_tld_match(self, text, matched_tld, tld_pos):
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False | Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L356-L379 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._is_domain_valid | python | def _is_domain_valid(self, url, tld):
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True | Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L381-L451 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._remove_enclosure_from_url | python | def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url | Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L453-L494 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract._split_markdown | python | def _split_markdown(text_url, tld_pos):
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url | Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L497-L523 | null | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.gen_urls | python | def gen_urls(self, text):
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset | Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L525-L555 | [
"def _complete_url(self, text, tld_pos, tld):\n \"\"\"\n Expand string in both sides to match whole URL.\n\n :param str text: text where we want to find URL\n :param int tld_pos: position of TLD\n :param str tld: matched TLD which should be in text\n :return: returns URL\n :rtype: str\n \"\"\"\n\n left_ok = True\n right_ok = True\n\n max_len = len(text) - 1\n end_pos = tld_pos\n start_pos = tld_pos\n while left_ok or right_ok:\n if left_ok:\n if start_pos <= 0:\n left_ok = False\n else:\n if text[start_pos - 1] not in self._stop_chars_left:\n start_pos -= 1\n else:\n left_ok = False\n if right_ok:\n if end_pos >= max_len:\n right_ok = False\n else:\n if text[end_pos + 1] not in self._stop_chars_right:\n end_pos += 1\n else:\n right_ok = False\n\n complete_url = text[start_pos:end_pos + 1].lstrip('/')\n # remove last character from url\n # when it is allowed character right after TLD (e.g. dot, comma)\n temp_tlds = {tld + c for c in self._after_tld_chars}\n # get only dot+tld+one_char and compare\n if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:\n complete_url = complete_url[:-1]\n\n complete_url = self._split_markdown(complete_url, tld_pos-start_pos)\n complete_url = self._remove_enclosure_from_url(\n complete_url, tld_pos-start_pos, tld)\n if not self._is_domain_valid(complete_url, tld):\n return \"\"\n\n return complete_url\n",
"def _validate_tld_match(self, text, matched_tld, tld_pos):\n \"\"\"\n Validate TLD match - tells if at found position is really TLD.\n\n :param str text: text where we want to find URLs\n :param str matched_tld: matched TLD\n :param int tld_pos: position of matched TLD\n :return: True if match is valid, False otherwise\n :rtype: bool\n \"\"\"\n if tld_pos > len(text):\n return False\n\n right_tld_pos = tld_pos + len(matched_tld)\n if len(text) > right_tld_pos:\n if text[right_tld_pos] in self._after_tld_chars:\n if tld_pos > 0 and text[tld_pos - 1] \\\n not in self._stop_chars_left:\n return True\n else:\n if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:\n return True\n\n return False\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def find_urls(self, text, only_unique=False):
"""
Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list
"""
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls)
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
lipoja/URLExtract | urlextract/urlextract_core.py | URLExtract.find_urls | python | def find_urls(self, text, only_unique=False):
urls = self.gen_urls(text)
urls = OrderedDict.fromkeys(urls) if only_unique else urls
return list(urls) | Find all URLs in given text.
:param str text: text where we want to find URLs
:param bool only_unique: return only unique URLs
:return: list of URLs found in text
:rtype: list | train | https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/urlextract_core.py#L557-L568 | [
"def gen_urls(self, text):\n \"\"\"\n Creates generator over found URLs in given text.\n\n :param str text: text where we want to find URLs\n :yields: URL found in text or empty string if no found\n :rtype: str\n \"\"\"\n tld_pos = 0\n matched_tlds = self._tlds_re.findall(text)\n\n for tld in matched_tlds:\n tmp_text = text[tld_pos:]\n offset = tld_pos\n tld_pos = tmp_text.find(tld)\n validated = self._validate_tld_match(text, tld, offset + tld_pos)\n if tld_pos != -1 and validated:\n tmp_url = self._complete_url(text, offset + tld_pos, tld)\n if tmp_url:\n yield tmp_url\n\n # do not search for TLD in already extracted URL\n tld_pos_url = tmp_url.find(tld)\n # move cursor right after found TLD\n tld_pos += len(tld) + offset\n # move cursor after end of found URL\n tld_pos += len(tmp_url[tld_pos_url+len(tld):])\n continue\n\n # move cursor right after found TLD\n tld_pos += len(tld) + offset\n"
] | class URLExtract(CacheFile):
"""
Class for finding and extracting URLs from given string.
**Examples:**
.. code-block:: python
from urlextract import URLExtract
extractor = URLExtract()
urls = extractor.find_urls("Let's have URL example.com example.")
print(urls) # prints: ['example.com']
# Another way is to get a generator over found URLs in text:
for url in extractor.gen_urls(example_text):
print(url) # prints: ['example.com']
# Or if you want to just check if there is at least one URL in text:
if extractor.has_urls(example_text):
print("Given text contains some URL")
"""
# compiled regexp for naive validation of host name
_hostname_re = re.compile(
r"^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])$")
# list of enclosure of URL that should be removed
_enclosure = {
("(", ")"),
("{", "}"),
("[", "]"),
("\"", "\""),
("\\", "\\"),
("'", "'"),
("`", "`"),
}
def __init__(self, **kwargs):
"""
Initialize function for URLExtract class.
Tries to get cached TLDs, if cached file does not exist it will try
to download new list from IANA and save it to cache file.
"""
super(URLExtract, self).__init__(**kwargs)
self._tlds_re = None
self._reload_tlds_from_file()
# general stop characters
general_stop_chars = {'\"', '\'', '<', '>', ';'}
# defining default stop chars left
self._stop_chars_left = set(string.whitespace)
self._stop_chars_left |= general_stop_chars | {'|', '=', ']', ')', '}'}
# defining default stop chars left
self._stop_chars_right = set(string.whitespace)
self._stop_chars_right |= general_stop_chars
# preprocessed union _stop_chars is used in _validate_tld_match
self._stop_chars = self._stop_chars_left | self._stop_chars_right
# characters that are allowed to be right after TLD
self._after_tld_chars = self._get_after_tld_chars()
def _get_after_tld_chars(self):
"""
Initialize after tld characters
"""
after_tld_chars = set(string.whitespace)
after_tld_chars |= {'/', '\"', '\'', '<', '>', '?', ':', '.', ','}
# get left enclosure characters
_, right_enclosure = zip(*self._enclosure)
# add right enclosure characters to be valid after TLD
# for correct parsing of URL e.g. (example.com)
after_tld_chars |= set(right_enclosure)
return after_tld_chars
def _reload_tlds_from_file(self):
"""
Reloads TLDs from file and compile regexp.
:raises: CacheFileError when cached file is not readable for user
"""
tlds = sorted(self._load_cached_tlds(), key=len, reverse=True)
re_escaped = [re.escape(str(tld)) for tld in tlds]
self._tlds_re = re.compile('|'.join(re_escaped))
def update(self):
"""
Update TLD list cache file.
:return: True if update was successful False otherwise
:rtype: bool
"""
if not self._download_tlds_list():
return False
self._reload_tlds_from_file()
return True
def update_when_older(self, days):
"""
Update TLD list cache file if the list is older than
number of days given in parameter `days` or if does not exist.
:param int days: number of days from last change
:return: True if update was successful, False otherwise
:rtype: bool
"""
last_cache = self._get_last_cachefile_modification()
if last_cache is None:
return self.update()
time_to_update = last_cache + timedelta(days=days)
if datetime.now() >= time_to_update:
return self.update()
return True
@staticmethod
def get_version():
"""
Returns version number.
:return: version number
:rtype: str
"""
return __version__
def get_after_tld_chars(self):
"""
Returns list of chars that are allowed after TLD
:return: list of chars that are allowed after TLD
:rtype: list
"""
return list(self._after_tld_chars)
def set_after_tld_chars(self, after_tld_chars):
"""
Set chars that are allowed after TLD.
:param list after_tld_chars: list of characters
"""
self._after_tld_chars = set(after_tld_chars)
def get_stop_chars(self):
"""
Returns list of stop chars.
.. deprecated:: 0.7
Use :func:`get_stop_chars_left` or :func:`get_stop_chars_right`
instead.
:return: list of stop chars
:rtype: list
"""
warnings.warn("Method get_stop_chars is deprecated, "
"use `get_stop_chars_left` or "
"`get_stop_chars_right` instead", DeprecationWarning)
return list(self._stop_chars)
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars
def get_stop_chars_left(self):
"""
Returns set of stop chars for text on left from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_left
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_stop_chars_right(self):
"""
Returns set of stop chars for text on right from TLD.
:return: set of stop chars
:rtype: set
"""
return self._stop_chars_right
def set_stop_chars_right(self, stop_chars):
"""
Set stop characters for text on right from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_right = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right
def get_enclosures(self):
"""
Returns set of enclosure pairs that might be used to enclosure URL.
For example brackets (example.com), [example.com], {example.com}
:return: set of tuple of enclosure characters
:rtype: set(tuple(str,str))
"""
return self._enclosure
def add_enclosure(self, left_char, right_char):
"""
Add new enclosure pair of characters. That and should be removed
when their presence is detected at beginning and end of found URL
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
self._enclosure.add((left_char, right_char))
self._after_tld_chars = self._get_after_tld_chars()
def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars()
def _complete_url(self, text, tld_pos, tld):
"""
Expand string in both sides to match whole URL.
:param str text: text where we want to find URL
:param int tld_pos: position of TLD
:param str tld: matched TLD which should be in text
:return: returns URL
:rtype: str
"""
left_ok = True
right_ok = True
max_len = len(text) - 1
end_pos = tld_pos
start_pos = tld_pos
while left_ok or right_ok:
if left_ok:
if start_pos <= 0:
left_ok = False
else:
if text[start_pos - 1] not in self._stop_chars_left:
start_pos -= 1
else:
left_ok = False
if right_ok:
if end_pos >= max_len:
right_ok = False
else:
if text[end_pos + 1] not in self._stop_chars_right:
end_pos += 1
else:
right_ok = False
complete_url = text[start_pos:end_pos + 1].lstrip('/')
# remove last character from url
# when it is allowed character right after TLD (e.g. dot, comma)
temp_tlds = {tld + c for c in self._after_tld_chars}
# get only dot+tld+one_char and compare
if complete_url[len(complete_url)-len(tld)-1:] in temp_tlds:
complete_url = complete_url[:-1]
complete_url = self._split_markdown(complete_url, tld_pos-start_pos)
complete_url = self._remove_enclosure_from_url(
complete_url, tld_pos-start_pos, tld)
if not self._is_domain_valid(complete_url, tld):
return ""
return complete_url
def _validate_tld_match(self, text, matched_tld, tld_pos):
"""
Validate TLD match - tells if at found position is really TLD.
:param str text: text where we want to find URLs
:param str matched_tld: matched TLD
:param int tld_pos: position of matched TLD
:return: True if match is valid, False otherwise
:rtype: bool
"""
if tld_pos > len(text):
return False
right_tld_pos = tld_pos + len(matched_tld)
if len(text) > right_tld_pos:
if text[right_tld_pos] in self._after_tld_chars:
if tld_pos > 0 and text[tld_pos - 1] \
not in self._stop_chars_left:
return True
else:
if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left:
return True
return False
def _is_domain_valid(self, url, tld):
"""
Checks if given URL has valid domain name (ignores subdomains)
:param str url: complete URL that we want to check
:param str tld: TLD that should be found at the end of URL (hostname)
:return: True if URL is valid, False otherwise
:rtype: bool
>>> extractor = URLExtract()
>>> extractor._is_domain_valid("janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("https://janlipovsky.cz", ".cz")
True
>>> extractor._is_domain_valid("invalid.cz.", ".cz")
False
>>> extractor._is_domain_valid("invalid.cz,", ".cz")
False
>>> extractor._is_domain_valid("in.v_alid.cz", ".cz")
False
>>> extractor._is_domain_valid("-is.valid.cz", ".cz")
True
>>> extractor._is_domain_valid("not.valid-.cz", ".cz")
False
>>> extractor._is_domain_valid("http://blog/media/path.io.jpg", ".cz")
False
"""
if not url:
return False
scheme_pos = url.find('://')
if scheme_pos == -1:
url = 'http://' + url
url_parts = uritools.urisplit(url)
# <scheme>://<authority>/<path>?<query>#<fragment>
try:
host = url_parts.gethost()
except ValueError:
self._logger.info(
"Invalid host '%s'. "
"If the host is valid report a bug.", url
)
return False
if not host:
return False
host_parts = host.split('.')
if len(host_parts) <= 1:
return False
host_tld = '.'+host_parts[-1]
if host_tld != tld:
return False
top = host_parts[-2]
if self._hostname_re.match(top) is None:
return False
return True
def _remove_enclosure_from_url(self, text_url, tld_pos, tld):
"""
Removes enclosure characters from URL given in text_url.
For example: (example.com) -> example.com
:param str text_url: text with URL that we want to extract from
enclosure of two characters
:param int tld_pos: position of TLD in text_url
:param str tld: matched TLD which should be in text
:return: URL that has removed enclosure
:rtype: str
"""
enclosure_map = {
left_char: right_char
for left_char, right_char in self._enclosure
}
# get position of most right left_char of enclosure pairs
left_pos = max([
text_url.rfind(left_char, 0, tld_pos)
for left_char in enclosure_map.keys()
])
left_char = text_url[left_pos] if left_pos >= 0 else ''
right_char = enclosure_map.get(left_char, '')
right_pos = text_url.rfind(right_char) if right_char else len(text_url)
if right_pos < 0 or right_pos < tld_pos:
right_pos = len(text_url)
new_url = text_url[left_pos + 1:right_pos]
tld_pos -= left_pos + 1
# Get valid domain when we have input as: example.com)/path
# we assume that if there is enclosure character after TLD it is
# the end URL it self therefore we remove the rest
after_tld_pos = tld_pos + len(tld)
if after_tld_pos < len(new_url):
if new_url[after_tld_pos] in enclosure_map.values():
new_url_tmp = new_url[:after_tld_pos]
return self._remove_enclosure_from_url(
new_url_tmp, tld_pos, tld)
return new_url
@staticmethod
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
def gen_urls(self, text):
"""
Creates generator over found URLs in given text.
:param str text: text where we want to find URLs
:yields: URL found in text or empty string if no found
:rtype: str
"""
tld_pos = 0
matched_tlds = self._tlds_re.findall(text)
for tld in matched_tlds:
tmp_text = text[tld_pos:]
offset = tld_pos
tld_pos = tmp_text.find(tld)
validated = self._validate_tld_match(text, tld, offset + tld_pos)
if tld_pos != -1 and validated:
tmp_url = self._complete_url(text, offset + tld_pos, tld)
if tmp_url:
yield tmp_url
# do not search for TLD in already extracted URL
tld_pos_url = tmp_url.find(tld)
# move cursor right after found TLD
tld_pos += len(tld) + offset
# move cursor after end of found URL
tld_pos += len(tmp_url[tld_pos_url+len(tld):])
continue
# move cursor right after found TLD
tld_pos += len(tld) + offset
def has_urls(self, text):
"""
Checks if text contains any valid URL.
Returns True if text contains at least one URL.
>>> extractor = URLExtract()
>>> extractor.has_urls("Get unique URL from: http://janlipovsky.cz")
True
>>> extractor.has_urls("Clean text")
False
:param text: text where we want to find URLs
:return: True if et least one URL was found, False otherwise
:rtype: bool
"""
return any(self.gen_urls(text))
|
mitodl/pylti | pylti/common.py | _post_patched_request | python | def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
# pylint: disable=too-many-locals, too-many-arguments
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())
lti_consumer = oauth_server.lookup_consumer(lti_key)
lti_cert = oauth_server.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode('utf-8'),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content | Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L145-L203 | null | # -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import json
import oauth2
from xml.etree import ElementTree as etree
from oauth2 import STRING_TYPES
from six.moves.urllib.parse import urlparse, urlencode
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
log.exception("There was an LTI communication error")
return "There was an LTI communication error", 500
class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def post_message(consumers, lti_key, url, body):
"""
Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success
"""
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = b"<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success
def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
"""
Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success
"""
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success
def verify_request_common(consumers, url, method, headers, params):
"""
Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid
"""
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(
SignatureMethod_PLAINTEXT_Unicode())
oauth_server.add_signature_method(
SignatureMethod_HMAC_SHA1_Unicode())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = Request_Fix_Duplicate.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_server.lookup_consumer(oauth_consumer_key)
if not consumer:
raise oauth2.Error('Invalid consumer.')
oauth_server.verify_request(oauth_request, consumer, None)
except oauth2.Error:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8').decode('utf-8'))
log.debug("XML Response: \n%s", ret)
return ret
class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class SignatureMethod_PLAINTEXT_Unicode(oauth2.SignatureMethod_PLAINTEXT):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class Request_Fix_Duplicate(oauth2.Request):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/pull/197
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def get_normalized_parameters(self):
"""
Return a string that contains the parameters that must be signed.
"""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append(
(oauth2.to_utf8_if_string(key), oauth2.to_utf8(value))
)
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(value))
)
else:
items.extend(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(item))
for item in value
)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [
(oauth2.to_utf8(k), oauth2.to_utf8_optional_iterator(v))
for k, v in url_items if k != 'oauth_signature'
]
# Merge together URL and POST parameters.
# Eliminates parameters duplicated between URL and POST.
items_dict = {}
for k, v in items:
items_dict.setdefault(k, []).append(v)
for k, v in url_items:
if not (k in items_dict and v in items_dict[k]):
items.append((k, v))
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | post_message | python | def post_message(consumers, lti_key, url, body):
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = b"<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success | Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L206-L229 | [
"def _post_patched_request(consumers, lti_key, body,\n url, method, content_type):\n \"\"\"\n Authorization header needs to be capitalized for some LTI clients\n this function ensures that header is capitalized\n\n :param body: body of the call\n :param client: OAuth Client\n :param url: outcome url\n :return: response\n \"\"\"\n # pylint: disable=too-many-locals, too-many-arguments\n oauth_server = LTIOAuthServer(consumers)\n oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())\n lti_consumer = oauth_server.lookup_consumer(lti_key)\n lti_cert = oauth_server.lookup_cert(lti_key)\n secret = lti_consumer.secret\n\n consumer = oauth2.Consumer(key=lti_key, secret=secret)\n client = oauth2.Client(consumer)\n\n if lti_cert:\n client.add_certificate(key=lti_cert, cert=lti_cert, domain='')\n log.debug(\"cert %s\", lti_cert)\n\n import httplib2\n\n http = httplib2.Http\n # pylint: disable=protected-access\n normalize = http._normalize_headers\n\n def my_normalize(self, headers):\n \"\"\" This function patches Authorization header \"\"\"\n ret = normalize(self, headers)\n if 'authorization' in ret:\n ret['Authorization'] = ret.pop('authorization')\n log.debug(\"headers\")\n log.debug(headers)\n return ret\n\n http._normalize_headers = my_normalize\n monkey_patch_function = normalize\n response, content = client.request(\n url,\n method,\n body=body.encode('utf-8'),\n headers={'Content-Type': content_type})\n\n http = httplib2.Http\n # pylint: disable=protected-access\n http._normalize_headers = monkey_patch_function\n\n log.debug(\"key %s\", lti_key)\n log.debug(\"secret %s\", secret)\n log.debug(\"url %s\", url)\n log.debug(\"response %s\", response)\n log.debug(\"content %s\", format(content))\n\n return response, content\n"
] | # -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import json
import oauth2
from xml.etree import ElementTree as etree
from oauth2 import STRING_TYPES
from six.moves.urllib.parse import urlparse, urlencode
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
log.exception("There was an LTI communication error")
return "There was an LTI communication error", 500
class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
# pylint: disable=too-many-locals, too-many-arguments
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())
lti_consumer = oauth_server.lookup_consumer(lti_key)
lti_cert = oauth_server.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode('utf-8'),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content
def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
"""
Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success
"""
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success
def verify_request_common(consumers, url, method, headers, params):
"""
Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid
"""
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(
SignatureMethod_PLAINTEXT_Unicode())
oauth_server.add_signature_method(
SignatureMethod_HMAC_SHA1_Unicode())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = Request_Fix_Duplicate.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_server.lookup_consumer(oauth_consumer_key)
if not consumer:
raise oauth2.Error('Invalid consumer.')
oauth_server.verify_request(oauth_request, consumer, None)
except oauth2.Error:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8').decode('utf-8'))
log.debug("XML Response: \n%s", ret)
return ret
class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class SignatureMethod_PLAINTEXT_Unicode(oauth2.SignatureMethod_PLAINTEXT):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class Request_Fix_Duplicate(oauth2.Request):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/pull/197
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def get_normalized_parameters(self):
"""
Return a string that contains the parameters that must be signed.
"""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append(
(oauth2.to_utf8_if_string(key), oauth2.to_utf8(value))
)
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(value))
)
else:
items.extend(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(item))
for item in value
)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [
(oauth2.to_utf8(k), oauth2.to_utf8_optional_iterator(v))
for k, v in url_items if k != 'oauth_signature'
]
# Merge together URL and POST parameters.
# Eliminates parameters duplicated between URL and POST.
items_dict = {}
for k, v in items:
items_dict.setdefault(k, []).append(v)
for k, v in url_items:
if not (k in items_dict and v in items_dict[k]):
items.append((k, v))
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | post_message2 | python | def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success | Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L232-L256 | [
"def _post_patched_request(consumers, lti_key, body,\n url, method, content_type):\n \"\"\"\n Authorization header needs to be capitalized for some LTI clients\n this function ensures that header is capitalized\n\n :param body: body of the call\n :param client: OAuth Client\n :param url: outcome url\n :return: response\n \"\"\"\n # pylint: disable=too-many-locals, too-many-arguments\n oauth_server = LTIOAuthServer(consumers)\n oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())\n lti_consumer = oauth_server.lookup_consumer(lti_key)\n lti_cert = oauth_server.lookup_cert(lti_key)\n secret = lti_consumer.secret\n\n consumer = oauth2.Consumer(key=lti_key, secret=secret)\n client = oauth2.Client(consumer)\n\n if lti_cert:\n client.add_certificate(key=lti_cert, cert=lti_cert, domain='')\n log.debug(\"cert %s\", lti_cert)\n\n import httplib2\n\n http = httplib2.Http\n # pylint: disable=protected-access\n normalize = http._normalize_headers\n\n def my_normalize(self, headers):\n \"\"\" This function patches Authorization header \"\"\"\n ret = normalize(self, headers)\n if 'authorization' in ret:\n ret['Authorization'] = ret.pop('authorization')\n log.debug(\"headers\")\n log.debug(headers)\n return ret\n\n http._normalize_headers = my_normalize\n monkey_patch_function = normalize\n response, content = client.request(\n url,\n method,\n body=body.encode('utf-8'),\n headers={'Content-Type': content_type})\n\n http = httplib2.Http\n # pylint: disable=protected-access\n http._normalize_headers = monkey_patch_function\n\n log.debug(\"key %s\", lti_key)\n log.debug(\"secret %s\", secret)\n log.debug(\"url %s\", url)\n log.debug(\"response %s\", response)\n log.debug(\"content %s\", format(content))\n\n return response, content\n"
] | # -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import json
import oauth2
from xml.etree import ElementTree as etree
from oauth2 import STRING_TYPES
from six.moves.urllib.parse import urlparse, urlencode
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
log.exception("There was an LTI communication error")
return "There was an LTI communication error", 500
class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
# pylint: disable=too-many-locals, too-many-arguments
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())
lti_consumer = oauth_server.lookup_consumer(lti_key)
lti_cert = oauth_server.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode('utf-8'),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content
def post_message(consumers, lti_key, url, body):
"""
Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success
"""
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = b"<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success
def verify_request_common(consumers, url, method, headers, params):
"""
Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid
"""
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(
SignatureMethod_PLAINTEXT_Unicode())
oauth_server.add_signature_method(
SignatureMethod_HMAC_SHA1_Unicode())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = Request_Fix_Duplicate.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_server.lookup_consumer(oauth_consumer_key)
if not consumer:
raise oauth2.Error('Invalid consumer.')
oauth_server.verify_request(oauth_request, consumer, None)
except oauth2.Error:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8').decode('utf-8'))
log.debug("XML Response: \n%s", ret)
return ret
class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class SignatureMethod_PLAINTEXT_Unicode(oauth2.SignatureMethod_PLAINTEXT):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class Request_Fix_Duplicate(oauth2.Request):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/pull/197
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def get_normalized_parameters(self):
"""
Return a string that contains the parameters that must be signed.
"""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append(
(oauth2.to_utf8_if_string(key), oauth2.to_utf8(value))
)
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(value))
)
else:
items.extend(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(item))
for item in value
)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [
(oauth2.to_utf8(k), oauth2.to_utf8_optional_iterator(v))
for k, v in url_items if k != 'oauth_signature'
]
# Merge together URL and POST parameters.
# Eliminates parameters duplicated between URL and POST.
items_dict = {}
for k, v in items:
items_dict.setdefault(k, []).append(v)
for k, v in url_items:
if not (k in items_dict and v in items_dict[k]):
items.append((k, v))
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | verify_request_common | python | def verify_request_common(consumers, url, method, headers, params):
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(
SignatureMethod_PLAINTEXT_Unicode())
oauth_server.add_signature_method(
SignatureMethod_HMAC_SHA1_Unicode())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = Request_Fix_Duplicate.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_server.lookup_consumer(oauth_consumer_key)
if not consumer:
raise oauth2.Error('Invalid consumer.')
oauth_server.verify_request(oauth_request, consumer, None)
except oauth2.Error:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True | Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L259-L308 | null | # -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import json
import oauth2
from xml.etree import ElementTree as etree
from oauth2 import STRING_TYPES
from six.moves.urllib.parse import urlparse, urlencode
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
log.exception("There was an LTI communication error")
return "There was an LTI communication error", 500
class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
# pylint: disable=too-many-locals, too-many-arguments
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())
lti_consumer = oauth_server.lookup_consumer(lti_key)
lti_cert = oauth_server.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode('utf-8'),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content
def post_message(consumers, lti_key, url, body):
"""
Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success
"""
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = b"<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success
def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
"""
Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success
"""
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8').decode('utf-8'))
log.debug("XML Response: \n%s", ret)
return ret
class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class SignatureMethod_PLAINTEXT_Unicode(oauth2.SignatureMethod_PLAINTEXT):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class Request_Fix_Duplicate(oauth2.Request):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/pull/197
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def get_normalized_parameters(self):
"""
Return a string that contains the parameters that must be signed.
"""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append(
(oauth2.to_utf8_if_string(key), oauth2.to_utf8(value))
)
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(value))
)
else:
items.extend(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(item))
for item in value
)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [
(oauth2.to_utf8(k), oauth2.to_utf8_optional_iterator(v))
for k, v in url_items if k != 'oauth_signature'
]
# Merge together URL and POST parameters.
# Eliminates parameters duplicated between URL and POST.
items_dict = {}
for k, v in items:
items_dict.setdefault(k, []).append(v)
for k, v in url_items:
if not (k in items_dict and v in items_dict[k]):
items.append((k, v))
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | generate_request_xml | python | def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
root = etree.Element(u'imsx_POXEnvelopeRequest',
xmlns=u'http://www.imsglobal.org/services/'
u'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='utf-8').decode('utf-8'))
log.debug("XML Response: \n%s", ret)
return ret | Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L311-L353 | null | # -*- coding: utf-8 -*-
"""
Common classes and methods for PyLTI module
"""
from __future__ import absolute_import
import logging
import json
import oauth2
from xml.etree import ElementTree as etree
from oauth2 import STRING_TYPES
from six.moves.urllib.parse import urlparse, urlencode
log = logging.getLogger('pylti.common') # pylint: disable=invalid-name
LTI_PROPERTY_LIST = [
'oauth_consumer_key',
'launch_presentation_return_url',
'user_id',
'oauth_nonce',
'context_label',
'context_id',
'resource_link_title',
'resource_link_id',
'lis_person_contact_email_primary',
'lis_person_contact_emailprimary',
'lis_person_name_full',
'lis_person_name_family',
'lis_person_name_given',
'lis_result_sourcedid',
'lis_person_sourcedid',
'launch_type',
'lti_message',
'lti_version',
'roles',
'lis_outcome_service_url'
]
LTI_ROLES = {
u'staff': [u'Administrator', u'Instructor', ],
u'instructor': [u'Instructor', ],
u'administrator': [u'Administrator', ],
u'student': [u'Student', u'Learner', ]
# There is also a special role u'any' that ignores role check
}
LTI_SESSION_KEY = u'lti_authenticated'
LTI_REQUEST_TYPE = [u'any', u'initial', u'session']
def default_error(exception=None):
"""Render simple error page. This should be overidden in applications."""
# pylint: disable=unused-argument
log.exception("There was an LTI communication error")
return "There was an LTI communication error", 500
class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
class LTIException(Exception):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTINotInSessionException(LTIException):
"""
Custom LTI exception for proper handling
of LTI specific errors
"""
pass
class LTIRoleException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
class LTIPostMessageException(LTIException):
"""
Exception class for when LTI user doesn't have the
right role.
"""
pass
def _post_patched_request(consumers, lti_key, body,
url, method, content_type):
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
# pylint: disable=too-many-locals, too-many-arguments
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(SignatureMethod_HMAC_SHA1_Unicode())
lti_consumer = oauth_server.lookup_consumer(lti_key)
lti_cert = oauth_server.lookup_cert(lti_key)
secret = lti_consumer.secret
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
if lti_cert:
client.add_certificate(key=lti_cert, cert=lti_cert, domain='')
log.debug("cert %s", lti_cert)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
log.debug("headers")
log.debug(headers)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode('utf-8'),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
log.debug("key %s", lti_key)
log.debug("secret %s", secret)
log.debug("url %s", url)
log.debug("response %s", response)
log.debug("content %s", format(content))
return response, content
def post_message(consumers, lti_key, url, body):
"""
Posts a signed message to LTI consumer
:param consumers: consumers from config
:param lti_key: key to find appropriate consumer
:param url: post url
:param body: xml body
:return: success
"""
content_type = 'application/xml'
method = 'POST'
(_, content) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = b"<imsx_codeMajor>success</imsx_codeMajor>" in content
log.debug("is success %s", is_success)
return is_success
def post_message2(consumers, lti_key, url, body,
method='POST', content_type='application/xml'):
"""
Posts a signed message to LTI consumer using LTI 2.0 format
:param: consumers: consumers from config
:param: lti_key: key to find appropriate consumer
:param: url: post url
:param: body: xml body
:return: success
"""
# pylint: disable=too-many-arguments
(response, _) = _post_patched_request(
consumers,
lti_key,
body,
url,
method,
content_type,
)
is_success = response.status == 200
log.debug("is success %s", is_success)
return is_success
def verify_request_common(consumers, url, method, headers, params):
"""
Verifies that request is valid
:param consumers: consumers from config file
:param url: request url
:param method: request method
:param headers: request headers
:param params: request params
:return: is request valid
"""
log.debug("consumers %s", consumers)
log.debug("url %s", url)
log.debug("method %s", method)
log.debug("headers %s", headers)
log.debug("params %s", params)
oauth_server = LTIOAuthServer(consumers)
oauth_server.add_signature_method(
SignatureMethod_PLAINTEXT_Unicode())
oauth_server.add_signature_method(
SignatureMethod_HMAC_SHA1_Unicode())
# Check header for SSL before selecting the url
if headers.get('X-Forwarded-Proto', 'http') == 'https':
url = url.replace('http:', 'https:', 1)
oauth_request = Request_Fix_Duplicate.from_request(
method,
url,
headers=dict(headers),
parameters=params
)
if not oauth_request:
log.info('Received non oauth request on oauth protected page')
raise LTIException('This page requires a valid oauth session '
'or request')
try:
# pylint: disable=protected-access
oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_server.lookup_consumer(oauth_consumer_key)
if not consumer:
raise oauth2.Error('Invalid consumer.')
oauth_server.verify_request(oauth_request, consumer, None)
except oauth2.Error:
# Rethrow our own for nice error handling (don't print
# error message as it will contain the key
raise LTIException("OAuth error: Please check your key and secret")
return True
class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class SignatureMethod_PLAINTEXT_Unicode(oauth2.SignatureMethod_PLAINTEXT):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def check(self, request, consumer, token, signature):
"""
Returns whether the given signature is the correct signature for
the given consumer and token signing the given request.
"""
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature
class Request_Fix_Duplicate(oauth2.Request):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/pull/197
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
def get_normalized_parameters(self):
"""
Return a string that contains the parameters that must be signed.
"""
items = []
for key, value in self.items():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, STRING_TYPES):
items.append(
(oauth2.to_utf8_if_string(key), oauth2.to_utf8(value))
)
else:
try:
value = list(value)
except TypeError as e:
assert 'is not iterable' in str(e)
items.append(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(value))
)
else:
items.extend(
(oauth2.to_utf8_if_string(key),
oauth2.to_utf8_if_string(item))
for item in value
)
# Include any query string parameters from the provided URL
query = urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [
(oauth2.to_utf8(k), oauth2.to_utf8_optional_iterator(v))
for k, v in url_items if k != 'oauth_signature'
]
# Merge together URL and POST parameters.
# Eliminates parameters duplicated between URL and POST.
items_dict = {}
for k, v in items:
items_dict.setdefault(k, []).append(v)
for k, v in url_items:
if not (k in items_dict and v in items_dict[k]):
items.append((k, v))
items.sort()
encoded_str = urlencode(items, True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIOAuthServer.lookup_consumer | python | def lookup_consumer(self, key):
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret) | Search through keys | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L75-L94 | null | class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert
|
mitodl/pylti | pylti/common.py | LTIOAuthServer.lookup_cert | python | def lookup_cert(self, key):
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert | Search through keys | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L96-L110 | null | class LTIOAuthServer(oauth2.Server):
"""
Largely taken from reference implementation
for app engine at https://code.google.com/p/ims-dev/
"""
def __init__(self, consumers, signature_methods=None):
"""
Create OAuth server
"""
super(LTIOAuthServer, self).__init__(signature_methods)
self.consumers = consumers
def lookup_consumer(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
secret = consumer.get('secret', None)
if not secret:
log.critical(('Consumer %s, is missing secret'
'in settings file, and needs correction.'), key)
return None
return oauth2.Consumer(key, secret)
|
mitodl/pylti | pylti/common.py | SignatureMethod_HMAC_SHA1_Unicode.check | python | def check(self, request, consumer, token, signature):
built = self.sign(request, consumer, token)
if isinstance(signature, STRING_TYPES):
signature = signature.encode("utf8")
return built == signature | Returns whether the given signature is the correct signature for
the given consumer and token signing the given request. | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L364-L372 | null | class SignatureMethod_HMAC_SHA1_Unicode(oauth2.SignatureMethod_HMAC_SHA1):
"""
Temporary workaround for
https://github.com/joestump/python-oauth2/issues/207
Original code is Copyright (c) 2007 Leah Culver, MIT license.
"""
|
mitodl/pylti | pylti/common.py | LTIBase.name | python | def name(self): # pylint: disable=no-self-use
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return '' | Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L473-L485 | null | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIBase.verify | python | def verify(self):
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True | Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L487-L503 | null | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIBase.is_role | python | def is_role(self, role):
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role)) | Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L550-L571 | null | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIBase._check_role | python | def _check_role(self):
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.') | Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L573-L586 | [
"def is_role(self, role):\n \"\"\"\n Verify if user is in role\n\n :param: role: role to verify against\n :return: if user is in role\n :exception: LTIException if role is unknown\n \"\"\"\n log.debug(\"is_role %s\", role)\n roles = self.session['roles'].split(',')\n if role in LTI_ROLES:\n role_list = LTI_ROLES[role]\n # find the intersection of the roles\n roles = set(role_list) & set(roles)\n is_user_role_there = len(roles) >= 1\n log.debug(\n \"is_role roles_list=%s role=%s in list=%s\", role_list,\n roles, is_user_role_there\n )\n return is_user_role_there\n else:\n raise LTIException(\"Unknown role {}.\".format(role))\n"
] | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIBase.post_grade | python | def post_grade(self, grade):
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False | Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L588-L611 | [
"def message_identifier_id():\n \"\"\"\n Message identifier to use for XML callback\n\n :return: non-empty string\n \"\"\"\n return \"edX_fix\"\n"
] | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade2(self, grade, user=None, comment=''):
"""
Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/common.py | LTIBase.post_grade2 | python | def post_grade2(self, grade, user=None, comment=''):
content_type = 'application/vnd.ims.lis.v2.result+json'
if user is None:
user = self.user_id
lti2_url = self.response_url.replace(
"/grade_handler",
"/lti_2_0_result_rest_handler/user/{}".format(user))
score = float(grade)
if 0 <= score <= 1.0:
body = json.dumps({
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": score,
"comment": comment
})
ret = post_message2(self._consumers(), self.key, lti2_url, body,
method='PUT',
content_type=content_type)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False | Post grade to LTI consumer using REST/JSON
URL munging will is related to:
https://openedx.atlassian.net/browse/PLAT-281
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/common.py#L613-L644 | null | class LTIBase(object):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.lti_args = lti_args
self.lti_kwargs = lti_kwargs
self.nickname = self.name
@property
def name(self): # pylint: disable=no-self-use
"""
Name returns user's name or user's email or user_id
:return: best guess of name to use to greet user
"""
if 'lis_person_sourcedid' in self.session:
return self.session['lis_person_sourcedid']
elif 'lis_person_contact_email_primary' in self.session:
return self.session['lis_person_contact_email_primary']
elif 'user_id' in self.session:
return self.session['user_id']
else:
return ''
def verify(self):
"""
Verify if LTI request is valid, validation
depends on @lti wrapper arguments
:raises: LTIException
"""
log.debug('verify request=%s', self.lti_kwargs.get('request'))
if self.lti_kwargs.get('request') == 'session':
self._verify_session()
elif self.lti_kwargs.get('request') == 'initial':
self.verify_request()
elif self.lti_kwargs.get('request') == 'any':
self._verify_any()
else:
raise LTIException("Unknown request type")
return True
@property
def user_id(self): # pylint: disable=no-self-use
"""
Returns user_id as provided by LTI
:return: user_id
"""
return self.session['user_id']
@property
def key(self): # pylint: disable=no-self-use
"""
OAuth Consumer Key
:return: key
"""
return self.session['oauth_consumer_key']
@staticmethod
def message_identifier_id():
"""
Message identifier to use for XML callback
:return: non-empty string
"""
return "edX_fix"
@property
def lis_result_sourcedid(self): # pylint: disable=no-self-use
"""
lis_result_sourcedid to use for XML callback
:return: LTI lis_result_sourcedid
"""
return self.session['lis_result_sourcedid']
@property
def role(self): # pylint: disable=no-self-use
"""
LTI roles
:return: roles
"""
return self.session.get('roles')
@staticmethod
def is_role(self, role):
"""
Verify if user is in role
:param: role: role to verify against
:return: if user is in role
:exception: LTIException if role is unknown
"""
log.debug("is_role %s", role)
roles = self.session['roles'].split(',')
if role in LTI_ROLES:
role_list = LTI_ROLES[role]
# find the intersection of the roles
roles = set(role_list) & set(roles)
is_user_role_there = len(roles) >= 1
log.debug(
"is_role roles_list=%s role=%s in list=%s", role_list,
roles, is_user_role_there
)
return is_user_role_there
else:
raise LTIException("Unknown role {}.".format(role))
def _check_role(self):
"""
Check that user is in role specified as wrapper attribute
:exception: LTIRoleException if user is not in roles
"""
role = u'any'
if 'role' in self.lti_kwargs:
role = self.lti_kwargs['role']
log.debug(
"check_role lti_role=%s decorator_role=%s", self.role, role
)
if not (role == u'any' or self.is_role(self, role)):
raise LTIRoleException('Not authorized.')
def post_grade(self, grade):
"""
Post grade to LTI consumer using XML
:param: grade: 0 <= grade <= 1
:return: True if post successful and grade valid
:exception: LTIPostMessageException if call failed
"""
message_identifier_id = self.message_identifier_id()
operation = 'replaceResult'
lis_result_sourcedid = self.lis_result_sourcedid
# # edX devbox fix
score = float(grade)
if 0 <= score <= 1.0:
xml = generate_request_xml(
message_identifier_id, operation, lis_result_sourcedid,
score)
ret = post_message(self._consumers(), self.key,
self.response_url, xml)
if not ret:
raise LTIPostMessageException("Post Message Failed")
return True
return False
|
mitodl/pylti | pylti/flask.py | lti | python | def lti(app=None, request='any', error=default_error, role='any',
*lti_args, **lti_kwargs):
def _lti(function):
"""
Inner LTI decorator
:param: function:
:return:
"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
Pass LTI reference to function or return error.
"""
try:
the_lti = LTI(lti_args, lti_kwargs)
the_lti.verify()
the_lti._check_role() # pylint: disable=protected-access
kwargs['lti'] = the_lti
return function(*args, **kwargs)
except LTIException as lti_exception:
error = lti_kwargs.get('error')
exception = dict()
exception['exception'] = lti_exception
exception['kwargs'] = kwargs
exception['args'] = args
return error(exception=exception)
return wrapper
lti_kwargs['request'] = request
lti_kwargs['error'] = error
lti_kwargs['role'] = role
if (not app) or isinstance(app, Flask):
lti_kwargs['app'] = app
return _lti
else:
# We are wrapping without arguments
lti_kwargs['app'] = None
return _lti(app) | LTI decorator
:param: app - Flask App object (optional).
:py:attr:`flask.current_app` is used if no object is passed in.
:param: error - Callback if LTI throws exception (optional).
:py:attr:`pylti.flask.default_error` is the default.
:param: request - Request type from
:py:attr:`pylti.common.LTI_REQUEST_TYPE`. (default: any)
:param: roles - LTI Role (default: any)
:return: wrapper | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L159-L213 | [
"def _lti(function):\n \"\"\"\n Inner LTI decorator\n\n :param: function:\n :return:\n \"\"\"\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n \"\"\"\n Pass LTI reference to function or return error.\n \"\"\"\n try:\n the_lti = LTI(lti_args, lti_kwargs)\n the_lti.verify()\n the_lti._check_role() # pylint: disable=protected-access\n kwargs['lti'] = the_lti\n return function(*args, **kwargs)\n except LTIException as lti_exception:\n error = lti_kwargs.get('error')\n exception = dict()\n exception['exception'] = lti_exception\n exception['kwargs'] = kwargs\n exception['args'] = args\n return error(exception=exception)\n\n return wrapper\n"
] | # -*- coding: utf-8 -*-
"""
PyLTI decorator implementation for flask framework
"""
from __future__ import absolute_import
from functools import wraps
import logging
from flask import session, current_app, Flask
from flask import request as flask_request
from .common import (
LTI_SESSION_KEY,
LTI_PROPERTY_LIST,
verify_request_common,
default_error,
LTIException,
LTINotInSessionException,
LTIBase
)
log = logging.getLogger('pylti.flask') # pylint: disable=invalid-name
class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def _consumers(self):
"""
Gets consumer's map from app config
:return: consumers map
"""
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers
def verify_request(self):
"""
Verify LTI request
:raises: LTIException is request validation failed
"""
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException
"""
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session()
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
def close_session():
"""
Invalidates session
"""
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
|
mitodl/pylti | pylti/flask.py | LTI._consumers | python | def _consumers(self):
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers | Gets consumer's map from app config
:return: consumers map | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L42-L51 | null | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def verify_request(self):
"""
Verify LTI request
:raises: LTIException is request validation failed
"""
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException
"""
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session()
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
def close_session():
"""
Invalidates session
"""
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
|
mitodl/pylti | pylti/flask.py | LTI.verify_request | python | def verify_request(self):
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise | Verify LTI request
:raises: LTIException is request validation failed | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L53-L87 | [
"def verify_request_common(consumers, url, method, headers, params):\n \"\"\"\n Verifies that request is valid\n\n :param consumers: consumers from config file\n :param url: request url\n :param method: request method\n :param headers: request headers\n :param params: request params\n :return: is request valid\n \"\"\"\n log.debug(\"consumers %s\", consumers)\n log.debug(\"url %s\", url)\n log.debug(\"method %s\", method)\n log.debug(\"headers %s\", headers)\n log.debug(\"params %s\", params)\n\n oauth_server = LTIOAuthServer(consumers)\n oauth_server.add_signature_method(\n SignatureMethod_PLAINTEXT_Unicode())\n oauth_server.add_signature_method(\n SignatureMethod_HMAC_SHA1_Unicode())\n\n # Check header for SSL before selecting the url\n if headers.get('X-Forwarded-Proto', 'http') == 'https':\n url = url.replace('http:', 'https:', 1)\n\n oauth_request = Request_Fix_Duplicate.from_request(\n method,\n url,\n headers=dict(headers),\n parameters=params\n )\n if not oauth_request:\n log.info('Received non oauth request on oauth protected page')\n raise LTIException('This page requires a valid oauth session '\n 'or request')\n try:\n # pylint: disable=protected-access\n oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')\n consumer = oauth_server.lookup_consumer(oauth_consumer_key)\n if not consumer:\n raise oauth2.Error('Invalid consumer.')\n oauth_server.verify_request(oauth_request, consumer, None)\n except oauth2.Error:\n # Rethrow our own for nice error handling (don't print\n # error message as it will contain the key\n raise LTIException(\"OAuth error: Please check your key and secret\")\n\n return True\n",
"def _consumers(self):\n \"\"\"\n Gets consumer's map from app config\n\n :return: consumers map\n \"\"\"\n app_config = self.lti_kwargs['app'].config\n config = app_config.get('PYLTI_CONFIG', dict())\n consumers = config.get('consumers', dict())\n return consumers\n"
] | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def _consumers(self):
"""
Gets consumer's map from app config
:return: consumers map
"""
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException
"""
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session()
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
def close_session():
"""
Invalidates session
"""
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
|
mitodl/pylti | pylti/flask.py | LTI.response_url | python | def response_url(self):
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url | Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L90-L107 | null | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def _consumers(self):
"""
Gets consumer's map from app config
:return: consumers map
"""
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers
def verify_request(self):
"""
Verify LTI request
:raises: LTIException is request validation failed
"""
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise
@property
def _verify_any(self):
"""
Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException
"""
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session()
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
def close_session():
"""
Invalidates session
"""
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
|
mitodl/pylti | pylti/flask.py | LTI._verify_any | python | def _verify_any(self):
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session() | Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L109-L135 | null | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def _consumers(self):
"""
Gets consumer's map from app config
:return: consumers map
"""
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers
def verify_request(self):
"""
Verify LTI request
:raises: LTIException is request validation failed
"""
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
def close_session():
"""
Invalidates session
"""
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
|
mitodl/pylti | pylti/flask.py | LTI.close_session | python | def close_session():
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False | Invalidates session | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/flask.py#L149-L156 | null | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
self.session = session
LTIBase.__init__(self, lti_args, lti_kwargs)
# Set app to current_app if not specified
if not self.lti_kwargs['app']:
self.lti_kwargs['app'] = current_app
def _consumers(self):
"""
Gets consumer's map from app config
:return: consumers map
"""
app_config = self.lti_kwargs['app'].config
config = app_config.get('PYLTI_CONFIG', dict())
consumers = config.get('consumers', dict())
return consumers
def verify_request(self):
"""
Verify LTI request
:raises: LTIException is request validation failed
"""
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
else:
params = flask_request.args.to_dict()
log.debug(params)
log.debug('verify_request?')
try:
verify_request_common(self._consumers(), flask_request.url,
flask_request.method, flask_request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
session[prop] = params[prop]
# Set logged in session key
session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
app_config = self.lti_kwargs['app'].config
urls = app_config.get('PYLTI_URL_FIX', dict())
# url remapping is useful for using devstack
# devstack reports httpS://localhost:8000/ and listens on HTTP
for prefix, mapping in urls.items():
if url.startswith(prefix):
for _from, _to in mapping.items():
url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that an initial request has been made, or failing that, that
the request is in the session
:raises: LTIException
"""
log.debug('verify_any enter')
# Check to see if there is a new LTI launch request incoming
newrequest = False
if flask_request.method == 'POST':
params = flask_request.form.to_dict()
initiation = "basic-lti-launch-request"
if params.get("lti_message_type", None) == initiation:
newrequest = True
# Scrub the session of the old authentication
for prop in LTI_PROPERTY_LIST:
if session.get(prop, None):
del session[prop]
session[LTI_SESSION_KEY] = False
# Attempt the appropriate validation
# Both of these methods raise LTIException as necessary
if newrequest:
self.verify_request()
else:
self._verify_session()
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
if not session.get(LTI_SESSION_KEY, False):
log.debug('verify_session failed')
raise LTINotInSessionException('Session expired or unavailable')
@staticmethod
|
mitodl/pylti | pylti/chalice.py | LTI._consumers | python | def _consumers(self):
consumers = {}
for env in os.environ:
if env.startswith('CONSUMER_KEY_SECRET_'):
key = env[20:] # Strip off the CONSUMER_KEY_SECRET_ prefix
# TODO: remove below after live test
# consumers[key] = {"secret": os.environ[env], "cert": 'NA'}
consumers[key] = {"secret": os.environ[env], "cert": None}
if not consumers:
raise LTIException("No consumers found. Chalice stores "
"consumers in Lambda environment variables. "
"Have you created the environment variables?")
return consumers | Gets consumers from Lambda environment variables prefixed with
CONSUMER_KEY_SECRET_. For example, given a consumer key of foo
and a shared secret of bar, you should have an environment
variable CONSUMER_KEY_SECRET_foo=bar.
:return: consumers map
:raises: LTIException if environment variables are not found | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/chalice.py#L50-L71 | null | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
# Chalice does not support sessions. Yet, we want the experiance
# to be the same as Flask. Therefore, use a simple dictionary
# to keep session variables for the length of this request.
self.session = {}
LTIBase.__init__(self, lti_args, lti_kwargs)
def verify_request(self):
"""
Verify LTI request
:raises: LTIException if request validation failed
"""
request = self.lti_kwargs['app'].current_request
if request.method == 'POST':
# Chalice expects JSON and does not nativly support forms data in
# a post body. The below is copied from the parsing of query
# strings as implimented in match_route of Chalice local.py
parsed_url = request.raw_body.decode()
parsed_qs = parse_qs(parsed_url, keep_blank_values=True)
params = {k: v[0] for k, v in parsed_qs .items()}
else:
params = request.query_params
log.debug(params)
log.debug('verify_request?')
try:
# Chalice does not have a url property therefore building it.
protocol = request.headers.get('x-forwarded-proto', 'http')
hostname = request.headers['host']
path = request.context['path']
url = urlunparse((protocol, hostname, path, "", "", ""))
verify_request_common(self._consumers(), url,
request.method, request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
self.session[prop] = params[prop]
# Set logged in session key
self.session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if self.session.get(prop, None):
del self.session[prop]
self.session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
# TODO: Remove this section if not needed
# app_config = self.config
# urls = app_config.get('PYLTI_URL_FIX', dict())
# # url remapping is useful for using devstack
# # devstack reports httpS://localhost:8000/ and listens on HTTP
# for prefix, mapping in urls.items():
# if url.startswith(prefix):
# for _from, _to in mapping.items():
# url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that request is in session or initial request
:raises: LTIException
"""
raise LTIException("The Request Type any is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
raise LTIException("The Request Type session is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def close_session():
"""
Invalidates session
:raises: LTIException
"""
raise LTIException("Can not close session. Chalice does "
"not support session state.")
|
mitodl/pylti | pylti/chalice.py | LTI.verify_request | python | def verify_request(self):
request = self.lti_kwargs['app'].current_request
if request.method == 'POST':
# Chalice expects JSON and does not nativly support forms data in
# a post body. The below is copied from the parsing of query
# strings as implimented in match_route of Chalice local.py
parsed_url = request.raw_body.decode()
parsed_qs = parse_qs(parsed_url, keep_blank_values=True)
params = {k: v[0] for k, v in parsed_qs .items()}
else:
params = request.query_params
log.debug(params)
log.debug('verify_request?')
try:
# Chalice does not have a url property therefore building it.
protocol = request.headers.get('x-forwarded-proto', 'http')
hostname = request.headers['host']
path = request.context['path']
url = urlunparse((protocol, hostname, path, "", "", ""))
verify_request_common(self._consumers(), url,
request.method, request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
self.session[prop] = params[prop]
# Set logged in session key
self.session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if self.session.get(prop, None):
del self.session[prop]
self.session[LTI_SESSION_KEY] = False
raise | Verify LTI request
:raises: LTIException if request validation failed | train | https://github.com/mitodl/pylti/blob/18a608282e0d5bc941beb2eaaeea3b7ad484b399/pylti/chalice.py#L73-L119 | [
"def verify_request_common(consumers, url, method, headers, params):\n \"\"\"\n Verifies that request is valid\n\n :param consumers: consumers from config file\n :param url: request url\n :param method: request method\n :param headers: request headers\n :param params: request params\n :return: is request valid\n \"\"\"\n log.debug(\"consumers %s\", consumers)\n log.debug(\"url %s\", url)\n log.debug(\"method %s\", method)\n log.debug(\"headers %s\", headers)\n log.debug(\"params %s\", params)\n\n oauth_server = LTIOAuthServer(consumers)\n oauth_server.add_signature_method(\n SignatureMethod_PLAINTEXT_Unicode())\n oauth_server.add_signature_method(\n SignatureMethod_HMAC_SHA1_Unicode())\n\n # Check header for SSL before selecting the url\n if headers.get('X-Forwarded-Proto', 'http') == 'https':\n url = url.replace('http:', 'https:', 1)\n\n oauth_request = Request_Fix_Duplicate.from_request(\n method,\n url,\n headers=dict(headers),\n parameters=params\n )\n if not oauth_request:\n log.info('Received non oauth request on oauth protected page')\n raise LTIException('This page requires a valid oauth session '\n 'or request')\n try:\n # pylint: disable=protected-access\n oauth_consumer_key = oauth_request.get_parameter('oauth_consumer_key')\n consumer = oauth_server.lookup_consumer(oauth_consumer_key)\n if not consumer:\n raise oauth2.Error('Invalid consumer.')\n oauth_server.verify_request(oauth_request, consumer, None)\n except oauth2.Error:\n # Rethrow our own for nice error handling (don't print\n # error message as it will contain the key\n raise LTIException(\"OAuth error: Please check your key and secret\")\n\n return True\n",
"def _consumers(self):\n \"\"\"\n Gets consumers from Lambda environment variables prefixed with\n CONSUMER_KEY_SECRET_. For example, given a consumer key of foo\n and a shared secret of bar, you should have an environment\n variable CONSUMER_KEY_SECRET_foo=bar.\n\n :return: consumers map\n :raises: LTIException if environment variables are not found\n \"\"\"\n consumers = {}\n for env in os.environ:\n if env.startswith('CONSUMER_KEY_SECRET_'):\n key = env[20:] # Strip off the CONSUMER_KEY_SECRET_ prefix\n # TODO: remove below after live test\n # consumers[key] = {\"secret\": os.environ[env], \"cert\": 'NA'}\n consumers[key] = {\"secret\": os.environ[env], \"cert\": None}\n if not consumers:\n raise LTIException(\"No consumers found. Chalice stores \"\n \"consumers in Lambda environment variables. \"\n \"Have you created the environment variables?\")\n return consumers\n"
] | class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
# Chalice does not support sessions. Yet, we want the experiance
# to be the same as Flask. Therefore, use a simple dictionary
# to keep session variables for the length of this request.
self.session = {}
LTIBase.__init__(self, lti_args, lti_kwargs)
def _consumers(self):
"""
Gets consumers from Lambda environment variables prefixed with
CONSUMER_KEY_SECRET_. For example, given a consumer key of foo
and a shared secret of bar, you should have an environment
variable CONSUMER_KEY_SECRET_foo=bar.
:return: consumers map
:raises: LTIException if environment variables are not found
"""
consumers = {}
for env in os.environ:
if env.startswith('CONSUMER_KEY_SECRET_'):
key = env[20:] # Strip off the CONSUMER_KEY_SECRET_ prefix
# TODO: remove below after live test
# consumers[key] = {"secret": os.environ[env], "cert": 'NA'}
consumers[key] = {"secret": os.environ[env], "cert": None}
if not consumers:
raise LTIException("No consumers found. Chalice stores "
"consumers in Lambda environment variables. "
"Have you created the environment variables?")
return consumers
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
# TODO: Remove this section if not needed
# app_config = self.config
# urls = app_config.get('PYLTI_URL_FIX', dict())
# # url remapping is useful for using devstack
# # devstack reports httpS://localhost:8000/ and listens on HTTP
# for prefix, mapping in urls.items():
# if url.startswith(prefix):
# for _from, _to in mapping.items():
# url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that request is in session or initial request
:raises: LTIException
"""
raise LTIException("The Request Type any is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
raise LTIException("The Request Type session is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def close_session():
"""
Invalidates session
:raises: LTIException
"""
raise LTIException("Can not close session. Chalice does "
"not support session state.")
|
googlefonts/fontmake | Lib/fontmake/__main__.py | exclude_args | python | def exclude_args(parser, args, excluded_args, target):
msg = '"%s" option invalid for %s'
for argname in excluded_args:
if argname not in args:
continue
if args[argname]:
optname = "--%s" % argname.replace("_", "-")
parser.error(msg % (optname, target))
del args[argname] | Delete options that are not appropriate for a following code path; exit
with an error if excluded options were passed in by the user.
argparse generates a namespace with all options it knows, but not every
attribute should be passed to all code paths (i.e. options about
interpolation should not reach `run_from_ufos()`). This function can be run
before entering a particular code path to clean up the kwargs passed to it.
Exit with an error message if the user actually passed the options in. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/__main__.py#L42-L60 | null | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from argparse import ArgumentParser
from contextlib import contextmanager
from fontmake import __version__
from fontmake.errors import FontmakeError
from fontmake.font_project import INTERPOLATABLE_OUTPUTS, FontProject
from ufo2ft import CFFOptimization
from ufo2ft.featureWriters import loadFeatureWriterFromString
def _loadFeatureWriters(parser, specs):
feature_writers = []
for s in specs:
if s == "None":
# magic value that means "don't generate any features!"
return []
try:
feature_writers.append(loadFeatureWriterFromString(s))
except Exception as e:
parser.error(
"Failed to load --feature-writer:\n {}: {}".format(type(e).__name__, e)
)
return feature_writers
@contextmanager
def _make_tempdirs(parser, args):
output = args["output"]
tempdirs = []
for dirname in ("master_dir", "instance_dir"):
if args.get(dirname) == "{tmp}":
if "ufo" in output:
parser.error(
"Can't use temporary %s directory with 'ufo' output"
% dirname.replace("_dir", "")
)
import tempfile
td = args[dirname] = tempfile.mkdtemp(prefix=dirname + "_")
tempdirs.append(td)
yield tempdirs
if tempdirs:
import shutil
for td in tempdirs:
shutil.rmtree(td)
def main(args=None):
parser = ArgumentParser()
parser.add_argument("--version", action="version", version=__version__)
inputGroup = parser.add_argument_group(
title="Input arguments",
description="The following arguments are mutually exclusive (pick only one):",
)
xInputGroup = inputGroup.add_mutually_exclusive_group(required=True)
xInputGroup.add_argument(
"-g", "--glyphs-path", metavar="GLYPHS", help="Path to .glyphs source file"
)
xInputGroup.add_argument(
"-u",
"--ufo-paths",
nargs="+",
metavar="UFO",
help="One or more paths to UFO files",
)
xInputGroup.add_argument(
"-m",
"--mm-designspace",
metavar="DESIGNSPACE",
help="Path to .designspace file",
)
outputGroup = parser.add_argument_group(title="Output arguments")
outputGroup.add_argument(
"-o",
"--output",
nargs="+",
default=("otf", "ttf"),
metavar="FORMAT",
help="Output font formats. Choose 1 or more from: %(choices)s. Default: otf, ttf. "
"(No file paths).",
choices=(
"ufo",
"otf",
"ttf",
"ttf-interpolatable",
"otf-interpolatable",
"variable",
"variable-cff2",
),
)
outputSubGroup = outputGroup.add_mutually_exclusive_group()
outputSubGroup.add_argument(
"--output-path",
default=None,
help="Output font file path. Only valid when the output is a single "
"file (e.g. input is a single UFO or output is variable font)",
)
outputSubGroup.add_argument(
"--output-dir",
default=None,
help="Output folder. By default, output folders are created in the "
"current working directory, grouping output fonts by format.",
)
outputGroup.add_argument(
"-i",
"--interpolate",
nargs="?",
default=False,
const=True,
metavar="INSTANCE_NAME",
help="Interpolate masters and generate all the instances defined. "
"To only interpolate a specific instance (or instances) that "
'match a given "name" attribute, you can pass as argument '
"the full instance name or a regular expression. "
'E.g.: -i "Noto Sans Bold"; or -i ".* UI Condensed". '
"(for Glyphs or MutatorMath sources only). ",
)
outputGroup.add_argument(
"-M",
"--masters-as-instances",
action="store_true",
help="Output masters as instances",
)
outputGroup.add_argument(
"--family-name",
help="Family name to use for masters, and to filter output instances",
)
outputGroup.add_argument(
"--round-instances",
dest="round_instances",
action="store_true",
help="Apply integer rounding to all geometry when interpolating",
)
outputGroup.add_argument(
"--designspace-path",
default=None,
help="Path to output designspace file (for Glyphs sources only).",
)
outputGroup.add_argument(
"--master-dir",
default=None,
help='Directory where to write master UFO. Default: "./master_ufo". '
'If value is "{tmp}", a temporary directory is created and '
"removed at the end (for Glyphs sources only).",
)
outputGroup.add_argument(
"--instance-dir",
default=None,
help="Directory where to write instance UFOs. Default: "
'"./instance_ufo". If value is "{tmp}", a temporary directory '
"is created and removed at the end (for Glyphs sources only).",
)
outputGroup.add_argument(
"--validate-ufo",
action="store_true",
help="Enable ufoLib validation on reading/writing UFO files. It is "
"disabled by default",
)
outputGroup.add_argument(
"--expand-features-to-instances",
action="store_true",
help="Resolves all include()s in the master feature file and writes "
"the full feature file to all instance UFOs. Only valid when "
"interpolating. Use if you share feature files of masters in "
"external files, as instances can end up elsewhere.",
)
contourGroup = parser.add_argument_group(title="Handling of contours")
contourGroup.add_argument(
"--keep-overlaps",
dest="remove_overlaps",
action="store_false",
help="Do not remove any overlap.",
)
contourGroup.add_argument(
"--overlaps-backend",
dest="overlaps_backend",
metavar="BACKEND",
choices=("booleanOperations", "pathops"),
default="booleanOperations",
help="Select library to remove overlaps. Choose between: %(choices)s "
"(default: %(default)s)",
)
contourGroup.add_argument(
"--keep-direction",
dest="reverse_direction",
action="store_false",
help="Do not reverse contour direction when output is ttf or "
"ttf-interpolatable",
)
contourGroup.add_argument(
"-e",
"--conversion-error",
type=float,
default=None,
metavar="ERROR",
help="Maximum approximation error for cubic to quadratic conversion "
"measured in EM",
)
contourGroup.add_argument(
"-a",
"--autohint",
nargs="?",
const="",
help="Run ttfautohint. Can provide arguments, quoted",
)
contourGroup.add_argument(
"--cff-round-tolerance",
type=float,
default=None,
metavar="FLOAT",
help="Restrict rounding of point coordinates in CFF table to only "
"those floats whose absolute difference from their integral part "
"is less than or equal to the tolerance. By default, all floats "
"are rounded to integer (tolerance 0.5); 0 disables rounding.",
)
contourGroup.add_argument(
"--optimize-cff",
type=lambda s: CFFOptimization(int(s)),
default=CFFOptimization.SUBROUTINIZE,
help="0 disables all optimizations; 1 specializes the CFF charstring "
"operators; 2 (default) also enables subroutinization",
)
layoutGroup = parser.add_argument_group(title="Handling of OpenType Layout")
layoutGroup.add_argument(
"--interpolate-binary-layout",
nargs="?",
default=False,
const=True,
metavar="MASTER_DIR",
help="Interpolate layout tables from compiled master binaries. "
"Requires Glyphs or MutatorMath source.",
)
layoutGroup.add_argument(
"--feature-writer",
metavar="CLASS",
action="append",
dest="feature_writer_specs",
help="string specifying a feature writer class to load, either "
"built-in or from an external module, optionally initialized with "
"the given keyword arguments. The class and module names are "
"separated by '::'. The option can be repeated multiple times "
"for each writer class. A special value of 'None' will disable "
"all automatic feature generation. The option overrides both the "
"default ufo2ft writers and those specified in the UFO lib.",
)
feaCompilerGroup = layoutGroup.add_mutually_exclusive_group(required=False)
feaCompilerGroup.add_argument(
"--use-afdko",
action="store_true",
help="Use makeOTF instead of feaLib to compile FEA.",
)
feaCompilerGroup.add_argument(
"--mti-source",
help="Path to mtiLib .txt feature definitions (use instead of FEA)",
)
glyphnamesGroup = parser.add_mutually_exclusive_group(required=False)
glyphnamesGroup.add_argument(
"--production-names",
dest="use_production_names",
action="store_true",
help="Rename glyphs with production names if available otherwise use "
"uninames.",
)
glyphnamesGroup.add_argument(
"--no-production-names", dest="use_production_names", action="store_false"
)
subsetGroup = parser.add_mutually_exclusive_group(required=False)
subsetGroup.add_argument(
"--subset",
dest="subset",
action="store_true",
help="Subset font using export flags set by glyphsLib",
)
subsetGroup.add_argument("--no-subset", dest="subset", action="store_false")
subroutinizeGroup = parser.add_mutually_exclusive_group(required=False)
subroutinizeGroup.add_argument(
"-s",
"--subroutinize",
action="store_true",
help="Optimize CFF table using compreffor (default) [DEPRECATED: use "
"--optimize-cff option instead]",
)
subroutinizeGroup.add_argument(
"-S", "--no-subroutinize", dest="subroutinize", action="store_false"
)
parser.set_defaults(use_production_names=None, subset=None, subroutinize=None)
logGroup = parser.add_argument_group(title="Logging arguments")
logGroup.add_argument(
"--timing", action="store_true", help="Print the elapsed time for each steps"
)
logGroup.add_argument(
"--verbose",
default="INFO",
metavar="LEVEL",
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
help="Configure the logger verbosity level. Choose between: "
"%(choices)s. Default: INFO",
)
args = vars(parser.parse_args(args))
specs = args.pop("feature_writer_specs")
if specs is not None:
args["feature_writers"] = _loadFeatureWriters(parser, specs)
glyphs_path = args.pop("glyphs_path")
ufo_paths = args.pop("ufo_paths")
designspace_path = args.pop("mm_designspace")
input_format = (
"Glyphs" if glyphs_path else "designspace" if designspace_path else "UFO"
) + " source"
if INTERPOLATABLE_OUTPUTS.intersection(args["output"]):
if not (glyphs_path or designspace_path):
parser.error("Glyphs or designspace source required for variable font")
exclude_args(
parser,
args,
["interpolate", "masters_as_instances", "interpolate_binary_layout"],
"variable output",
)
try:
project = FontProject(
timing=args.pop("timing"),
verbose=args.pop("verbose"),
validate_ufo=args.pop("validate_ufo"),
)
if glyphs_path:
with _make_tempdirs(parser, args):
project.run_from_glyphs(glyphs_path, **args)
return
exclude_args(
parser,
args,
[
"family_name",
"mti_source",
"designspace_path",
"master_dir",
"instance_dir",
],
input_format,
)
if designspace_path:
project.run_from_designspace(designspace_path, **args)
return
exclude_args(
parser,
args,
[
"interpolate",
"interpolate_binary_layout",
"round_instances",
"expand_features_to_instances",
],
input_format,
)
project.run_from_ufos(
ufo_paths, is_instance=args.pop("masters_as_instances"), **args
)
except FontmakeError as e:
import sys
sys.exit("fontmake: error: %s" % e)
if __name__ == "__main__":
import sys
sys.exit(main())
|
googlefonts/fontmake | Lib/fontmake/font_project.py | _varLib_finder | python | def _varLib_finder(source, directory="", ext="ttf"):
fname = os.path.splitext(os.path.basename(source))[0] + "." + ext
return os.path.join(directory, fname) | Finder function to be used with varLib.build to find master TTFs given
the filename of the source UFO master as specified in the designspace.
It replaces the UFO directory with the one specified in 'directory'
argument, and replaces the file extension with 'ext'. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1158-L1165 | null | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import logging
import math
import os
import shutil
import tempfile
from collections import OrderedDict
from functools import partial, wraps
import ufo2ft
from defcon import Font
from defcon.objects.base import setUfoLibReadValidate, setUfoLibWriteValidate
from fontmake.errors import FontmakeError, TTFAError
from fontmake.ttfautohint import ttfautohint
from fontTools import designspaceLib, varLib
from fontTools.misc.loggingTools import Timer, configLogger
from fontTools.misc.py23 import basestring, tobytes, zip
from fontTools.misc.transform import Transform
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.transformPen import TransformPen
from fontTools.ttLib import TTFont
from fontTools.varLib.interpolate_layout import interpolate_layout
from ufo2ft import CFFOptimization
from ufo2ft.featureCompiler import FeatureCompiler, parseLayoutFeatures
from ufo2ft.featureWriters import FEATURE_WRITERS_KEY, loadFeatureWriters
from ufo2ft.util import makeOfficialGlyphOrder
try:
from plistlib import load as readPlist # PY3
except ImportError:
from plistlib import readPlist # PY2
try:
from re import fullmatch
except ImportError:
import re
def fullmatch(regex, string, flags=0):
"""Backport of python3.4 re.fullmatch()."""
return re.match("(?:" + regex + r")\Z", string, flags=flags)
logger = logging.getLogger(__name__)
timer = Timer(logging.getLogger("fontmake.timer"), level=logging.DEBUG)
PUBLIC_PREFIX = "public."
GLYPHS_PREFIX = "com.schriftgestaltung."
# for glyphsLib < 2.3.0
KEEP_GLYPHS_OLD_KEY = GLYPHS_PREFIX + "Keep Glyphs"
# for glyphsLib >= 2.3.0
KEEP_GLYPHS_NEW_KEY = (
GLYPHS_PREFIX + "customParameter.InstanceDescriptorAsGSInstance.Keep Glyphs"
)
GLYPH_EXPORT_KEY = GLYPHS_PREFIX + "Glyphs.Export"
STATIC_OUTPUTS = frozenset(["ttf", "otf"])
INTERPOLATABLE_OUTPUTS = frozenset(
["ttf-interpolatable", "otf-interpolatable", "variable", "variable-cff2"]
)
def _deprecated(func):
import warnings
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
"'%s' is deprecated and will be dropped in future versions" % func.__name__,
category=UserWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
class FDKFeatureCompiler(FeatureCompiler):
"""An OTF compiler which uses the AFDKO to compile feature syntax."""
def buildTables(self):
if not self.features.strip():
return
import subprocess
from fontTools.misc.py23 import tostr
outline_path = feasrc_path = fea_path = None
try:
fd, outline_path = tempfile.mkstemp()
os.close(fd)
self.ttFont.save(outline_path)
fd, feasrc_path = tempfile.mkstemp()
os.close(fd)
fd, fea_path = tempfile.mkstemp()
os.write(fd, tobytes(self.features, encoding="utf-8"))
os.close(fd)
process = subprocess.Popen(
["makeotf", "-o", feasrc_path, "-f", outline_path, "-ff", fea_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
retcode = process.poll()
report = tostr(stdout + (b"\n" + stderr if stderr else b""))
logger.info(report)
# before afdko >= 2.7.1rc1, makeotf did not exit with non-zero
# on failure, so we have to parse the error message
if retcode != 0:
success = False
else:
success = "makeotf [Error] Failed to build output font" not in report
if success:
with TTFont(feasrc_path) as feasrc:
for table in ["GDEF", "GPOS", "GSUB"]:
if table in feasrc:
self.ttFont[table] = feasrc[table]
if not success:
raise FontmakeError("Feature syntax compilation failed.")
finally:
for path in (outline_path, fea_path, feasrc_path):
if path is not None:
os.remove(path)
def _normpath(fname):
return os.path.normcase(os.path.normpath(fname))
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.build_master_ufos | python | def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path | Build UFOs and MutatorMath designspace from Glyphs source. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L110-L163 | [
"def _output_dir(\n self,\n ext,\n is_instance=False,\n interpolatable=False,\n autohinted=False,\n is_variable=False,\n):\n \"\"\"Generate an output directory.\n\n Args:\n ext: extension string.\n is_instance: The output is instance font or not.\n interpolatable: The output is interpolatable or not.\n autohinted: The output is autohinted or not.\n is_variable: The output is variable font or not.\n Return:\n output directory string.\n \"\"\"\n\n assert not (is_variable and any([is_instance, interpolatable]))\n # FIXME? Use user configurable destination folders.\n if is_variable:\n dir_prefix = \"variable_\"\n elif is_instance:\n dir_prefix = \"instance_\"\n else:\n dir_prefix = \"master_\"\n dir_suffix = \"_interpolatable\" if interpolatable else \"\"\n output_dir = dir_prefix + ext + dir_suffix\n if autohinted:\n output_dir = os.path.join(\"autohinted\", output_dir)\n return output_dir\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.remove_overlaps | python | def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise | Remove overlaps in UFOs' glyphs' contours. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L187-L205 | [
"def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):\n",
"def _font_name(self, ufo):\n \"\"\"Generate a postscript-style font name.\"\"\"\n family_name = (\n ufo.info.familyName.replace(\" \", \"\")\n if ufo.info.familyName is not None\n else \"None\"\n )\n style_name = (\n ufo.info.styleName.replace(\" \", \"\")\n if ufo.info.styleName is not None\n else \"None\"\n )\n return \"{}-{}\".format(family_name, style_name)\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.decompose_glyphs | python | def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents() | Move components of UFOs' glyphs to their outlines. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L209-L218 | [
"def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):\n",
"def _deep_copy_contours(self, ufo, parent, component, transformation):\n \"\"\"Copy contours from component to parent, including nested components.\"\"\"\n\n for nested in component.components:\n self._deep_copy_contours(\n ufo,\n parent,\n ufo[nested.baseGlyph],\n transformation.transform(nested.transformation),\n )\n\n if component != parent:\n pen = TransformPen(parent.getPen(), transformation)\n\n # if the transformation has a negative determinant, it will reverse\n # the contour direction of the component\n xx, xy, yx, yy = transformation[:4]\n if xx * yy - xy * yx < 0:\n pen = ReverseContourPen(pen)\n\n component.draw(pen)\n",
"def _font_name(self, ufo):\n \"\"\"Generate a postscript-style font name.\"\"\"\n family_name = (\n ufo.info.familyName.replace(\" \", \"\")\n if ufo.info.familyName is not None\n else \"None\"\n )\n style_name = (\n ufo.info.styleName.replace(\" \", \"\")\n if ufo.info.styleName is not None\n else \"None\"\n )\n return \"{}-{}\".format(family_name, style_name)\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject._deep_copy_contours | python | def _deep_copy_contours(self, ufo, parent, component, transformation):
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen) | Copy contours from component to parent, including nested components. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L220-L240 | [
"def _deep_copy_contours(self, ufo, parent, component, transformation):\n \"\"\"Copy contours from component to parent, including nested components.\"\"\"\n\n for nested in component.components:\n self._deep_copy_contours(\n ufo,\n parent,\n ufo[nested.baseGlyph],\n transformation.transform(nested.transformation),\n )\n\n if component != parent:\n pen = TransformPen(parent.getPen(), transformation)\n\n # if the transformation has a negative determinant, it will reverse\n # the contour direction of the component\n xx, xy, yx, yy = transformation[:4]\n if xx * yy - xy * yx < 0:\n pen = ReverseContourPen(pen)\n\n component.draw(pen)\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.build_ttfs | python | def build_ttfs(self, ufos, **kwargs):
self.save_otfs(ufos, ttf=True, **kwargs) | Build OpenType binaries with TrueType outlines. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L271-L273 | null | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.build_interpolatable_ttfs | python | def build_interpolatable_ttfs(self, designspace, **kwargs):
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs) | Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L329-L333 | [
"def _build_interpolatable_masters(\n self,\n designspace,\n ttf,\n use_production_names=None,\n reverse_direction=True,\n conversion_error=None,\n feature_writers=None,\n cff_round_tolerance=None,\n **kwargs\n):\n if hasattr(designspace, \"__fspath__\"):\n ds_path = designspace.__fspath__()\n if isinstance(designspace, basestring):\n ds_path = designspace\n else:\n # reload designspace from its path so we have a new copy\n # that can be modified in-place.\n ds_path = designspace.path\n if ds_path is not None:\n designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)\n\n self._load_designspace_sources(designspace)\n\n if ttf:\n return ufo2ft.compileInterpolatableTTFsFromDS(\n designspace,\n useProductionNames=use_production_names,\n reverseDirection=reverse_direction,\n cubicConversionError=conversion_error,\n featureWriters=feature_writers,\n inplace=True,\n )\n else:\n return ufo2ft.compileInterpolatableOTFsFromDS(\n designspace,\n useProductionNames=use_production_names,\n roundTolerance=cff_round_tolerance,\n featureWriters=feature_writers,\n inplace=True,\n )\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.build_interpolatable_otfs | python | def build_interpolatable_otfs(self, designspace, **kwargs):
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs) | Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L335-L339 | [
"def _build_interpolatable_masters(\n self,\n designspace,\n ttf,\n use_production_names=None,\n reverse_direction=True,\n conversion_error=None,\n feature_writers=None,\n cff_round_tolerance=None,\n **kwargs\n):\n if hasattr(designspace, \"__fspath__\"):\n ds_path = designspace.__fspath__()\n if isinstance(designspace, basestring):\n ds_path = designspace\n else:\n # reload designspace from its path so we have a new copy\n # that can be modified in-place.\n ds_path = designspace.path\n if ds_path is not None:\n designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)\n\n self._load_designspace_sources(designspace)\n\n if ttf:\n return ufo2ft.compileInterpolatableTTFsFromDS(\n designspace,\n useProductionNames=use_production_names,\n reverseDirection=reverse_direction,\n cubicConversionError=conversion_error,\n featureWriters=feature_writers,\n inplace=True,\n )\n else:\n return ufo2ft.compileInterpolatableOTFsFromDS(\n designspace,\n useProductionNames=use_production_names,\n roundTolerance=cff_round_tolerance,\n featureWriters=feature_writers,\n inplace=True,\n )\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.build_variable_font | python | def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path) | Build OpenType variable font from masters in a designspace. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L341-L377 | [
"def _output_dir(\n self,\n ext,\n is_instance=False,\n interpolatable=False,\n autohinted=False,\n is_variable=False,\n):\n \"\"\"Generate an output directory.\n\n Args:\n ext: extension string.\n is_instance: The output is instance font or not.\n interpolatable: The output is interpolatable or not.\n autohinted: The output is autohinted or not.\n is_variable: The output is variable font or not.\n Return:\n output directory string.\n \"\"\"\n\n assert not (is_variable and any([is_instance, interpolatable]))\n # FIXME? Use user configurable destination folders.\n if is_variable:\n dir_prefix = \"variable_\"\n elif is_instance:\n dir_prefix = \"instance_\"\n else:\n dir_prefix = \"master_\"\n dir_suffix = \"_interpolatable\" if interpolatable else \"\"\n output_dir = dir_prefix + ext + dir_suffix\n if autohinted:\n output_dir = os.path.join(\"autohinted\", output_dir)\n return output_dir\n",
"def _output_path(\n self,\n ufo_or_font_name,\n ext,\n is_instance=False,\n interpolatable=False,\n autohinted=False,\n is_variable=False,\n output_dir=None,\n suffix=None,\n):\n \"\"\"Generate output path for a font file with given extension.\"\"\"\n\n if isinstance(ufo_or_font_name, basestring):\n font_name = ufo_or_font_name\n elif ufo_or_font_name.path:\n font_name = os.path.splitext(\n os.path.basename(os.path.normpath(ufo_or_font_name.path))\n )[0]\n else:\n font_name = self._font_name(ufo_or_font_name)\n\n if output_dir is None:\n output_dir = self._output_dir(\n ext, is_instance, interpolatable, autohinted, is_variable\n )\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if suffix:\n return os.path.join(output_dir, \"{}-{}.{}\".format(font_name, suffix, ext))\n else:\n return os.path.join(output_dir, \"{}.{}\".format(font_name, ext))\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
"""Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument.
"""
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path)
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
googlefonts/fontmake | Lib/fontmake/font_project.py | FontProject.save_otfs | python | def save_otfs(
self,
ufos,
ttf=False,
is_instance=False,
interpolatable=False,
use_afdko=False,
autohint=None,
subset=None,
use_production_names=None,
subroutinize=None, # deprecated
optimize_cff=CFFOptimization.NONE,
cff_round_tolerance=None,
remove_overlaps=True,
overlaps_backend=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
interpolate_layout_from=None,
interpolate_layout_dir=None,
output_path=None,
output_dir=None,
inplace=True,
):
assert not (output_path and output_dir), "mutually exclusive args"
if output_path is not None and len(ufos) > 1:
raise ValueError("output_path requires a single input")
if subroutinize is not None:
import warnings
warnings.warn(
"the 'subroutinize' argument is deprecated, use 'optimize_cff'",
UserWarning,
)
if subroutinize:
optimize_cff = CFFOptimization.SUBROUTINIZE
else:
# for b/w compatibility, we still run the charstring specializer
# even when --no-subroutinize is used. Use the new --optimize-cff
# option to disable both specilization and subroutinization
optimize_cff = CFFOptimization.SPECIALIZE
ext = "ttf" if ttf else "otf"
if interpolate_layout_from is not None:
if interpolate_layout_dir is None:
interpolate_layout_dir = self._output_dir(
ext, is_instance=False, interpolatable=interpolatable
)
finder = partial(_varLib_finder, directory=interpolate_layout_dir, ext=ext)
# no need to generate automatic features in ufo2ft, since here we
# are interpolating precompiled GPOS table with fontTools.varLib.
# An empty 'featureWriters' list tells ufo2ft to not generate any
# automatic features.
# TODO: Add an argument to ufo2ft.compileOTF/compileTTF to
# completely skip compiling features into OTL tables
feature_writers = []
compiler_options = dict(
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True, # avoid extra copy
)
if use_afdko:
compiler_options["featureCompilerClass"] = FDKFeatureCompiler
if interpolatable:
if not ttf:
raise NotImplementedError("interpolatable CFF not supported yet")
logger.info("Building interpolation-compatible TTFs")
fonts = ufo2ft.compileInterpolatableTTFs(ufos, **compiler_options)
else:
fonts = self._iter_compile(
ufos,
ttf,
removeOverlaps=remove_overlaps,
overlapsBackend=overlaps_backend,
optimizeCFF=optimize_cff,
roundTolerance=cff_round_tolerance,
**compiler_options
)
do_autohint = ttf and autohint is not None
for font, ufo in zip(fonts, ufos):
if interpolate_layout_from is not None:
master_locations, instance_locations = self._designspace_locations(
interpolate_layout_from
)
loc = instance_locations[_normpath(ufo.path)]
gpos_src = interpolate_layout(
interpolate_layout_from, loc, finder, mapped=True
)
font["GPOS"] = gpos_src["GPOS"]
gsub_src = TTFont(finder(self._closest_location(master_locations, loc)))
if "GDEF" in gsub_src:
font["GDEF"] = gsub_src["GDEF"]
if "GSUB" in gsub_src:
font["GSUB"] = gsub_src["GSUB"]
if do_autohint:
# if we are autohinting, we save the unhinted font to a
# temporary path, and the hinted one to the final destination
fd, otf_path = tempfile.mkstemp("." + ext)
os.close(fd)
elif output_path is None:
otf_path = self._output_path(
ufo, ext, is_instance, interpolatable, output_dir=output_dir
)
else:
otf_path = output_path
logger.info("Saving %s", otf_path)
font.save(otf_path)
# 'subset' is an Optional[bool], can be None, True or False.
# When False, we never subset; when True, we always do; when
# None (default), we check the presence of custom parameters
if subset is False:
pass
elif subset is True or (
(KEEP_GLYPHS_OLD_KEY in ufo.lib or KEEP_GLYPHS_NEW_KEY in ufo.lib)
or any(glyph.lib.get(GLYPH_EXPORT_KEY, True) is False for glyph in ufo)
):
self.subset_otf_from_ufo(otf_path, ufo)
if not do_autohint:
continue
if output_path is not None:
hinted_otf_path = output_path
else:
hinted_otf_path = self._output_path(
ufo,
ext,
is_instance,
interpolatable,
autohinted=True,
output_dir=output_dir,
)
try:
ttfautohint(otf_path, hinted_otf_path, args=autohint)
except TTFAError:
# copy unhinted font to destination before re-raising error
shutil.copyfile(otf_path, hinted_otf_path)
raise
finally:
# must clean up temp file
os.remove(otf_path) | Build OpenType binaries from UFOs.
Args:
ufos: Font objects to compile.
ttf: If True, build fonts with TrueType outlines and .ttf extension.
is_instance: If output fonts are instances, for generating paths.
interpolatable: If output is interpolatable, for generating paths.
use_afdko: If True, use AFDKO to compile feature source.
autohint: Parameters to provide to ttfautohint. If not provided, the
autohinting step is skipped.
subset: Whether to subset the output according to data in the UFOs.
If not provided, also determined by flags in the UFOs.
use_production_names: Whether to use production glyph names in the
output. If not provided, determined by flags in the UFOs.
subroutinize: If True, subroutinize CFF outlines in output.
cff_round_tolerance (float): controls the rounding of point
coordinates in CFF table. It is defined as the maximum absolute
difference between the original float and the rounded integer
value. By default, all floats are rounded to integer (tolerance
0.5); a value of 0 completely disables rounding; values in
between only round floats which are close to their integral
part within the tolerated range. Ignored if ttf=True.
remove_overlaps: If True, remove overlaps in glyph shapes.
overlaps_backend: name of the library to remove overlaps. Can be
either "booleanOperations" (default) or "pathops".
reverse_direction: If True, reverse contour directions when
compiling TrueType outlines.
conversion_error: Error to allow when converting cubic CFF contours
to quadratic TrueType contours.
feature_writers: list of ufo2ft-compatible feature writer classes
or pre-initialized objects that are passed on to ufo2ft
feature compiler to generate automatic feature code. The
default value (None) means that ufo2ft will use its built-in
default feature writers (for kern, mark, mkmk, etc.). An empty
list ([]) will skip any automatic feature generation.
interpolate_layout_from: A DesignSpaceDocument object to give varLib
for interpolating layout tables to use in output.
interpolate_layout_dir: Directory containing the compiled master
fonts to use for interpolating binary layout tables.
output_path: output font file path. Only works when the input
'ufos' list contains a single font.
output_dir: directory where to save output files. Mutually
exclusive with 'output_path' argument. | train | https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L399-L598 | [
"def _normpath(fname):\n return os.path.normcase(os.path.normpath(fname))\n",
"def _iter_compile(self, ufos, ttf=False, **kwargs):\n # generator function that calls ufo2ft compiler for each ufo and\n # yields ttFont instances\n options = dict(kwargs)\n if ttf:\n for key in (\"optimizeCFF\", \"roundTolerance\"):\n options.pop(key, None)\n compile_func, fmt = ufo2ft.compileTTF, \"TTF\"\n else:\n for key in (\"cubicConversionError\", \"reverseDirection\"):\n options.pop(key, None)\n compile_func, fmt = ufo2ft.compileOTF, \"OTF\"\n\n for ufo in ufos:\n name = self._font_name(ufo)\n logger.info(\"Building {} for {}\".format(fmt, name))\n\n yield compile_func(ufo, **options)\n",
"def subset_otf_from_ufo(self, otf_path, ufo):\n \"\"\"Subset a font using export flags set by glyphsLib.\n\n There are two more settings that can change export behavior:\n \"Export Glyphs\" and \"Remove Glyphs\", which are currently not supported\n for complexity reasons. See\n https://github.com/googlei18n/glyphsLib/issues/295.\n \"\"\"\n from fontTools import subset\n\n # ufo2ft always inserts a \".notdef\" glyph as the first glyph\n ufo_order = makeOfficialGlyphOrder(ufo)\n if \".notdef\" not in ufo_order:\n ufo_order.insert(0, \".notdef\")\n ot_order = TTFont(otf_path).getGlyphOrder()\n assert ot_order[0] == \".notdef\"\n assert len(ufo_order) == len(ot_order)\n\n for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):\n keep_glyphs_list = ufo.lib.get(key)\n if keep_glyphs_list is not None:\n keep_glyphs = set(keep_glyphs_list)\n break\n else:\n keep_glyphs = None\n\n include = []\n for source_name, binary_name in zip(ufo_order, ot_order):\n if keep_glyphs and source_name not in keep_glyphs:\n continue\n\n if source_name in ufo:\n exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)\n if not exported:\n continue\n\n include.append(binary_name)\n\n # copied from nototools.subset\n opt = subset.Options()\n opt.name_IDs = [\"*\"]\n opt.name_legacy = True\n opt.name_languages = [\"*\"]\n opt.layout_features = [\"*\"]\n opt.notdef_outline = True\n opt.recalc_bounds = True\n opt.recalc_timestamp = True\n opt.canonical_order = True\n\n opt.glyph_names = True\n\n font = subset.load_font(otf_path, opt, lazy=False)\n subsetter = subset.Subsetter(options=opt)\n subsetter.populate(glyphs=include)\n subsetter.subset(font)\n subset.save_font(font, otf_path, opt)\n",
"def _output_dir(\n self,\n ext,\n is_instance=False,\n interpolatable=False,\n autohinted=False,\n is_variable=False,\n):\n \"\"\"Generate an output directory.\n\n Args:\n ext: extension string.\n is_instance: The output is instance font or not.\n interpolatable: The output is interpolatable or not.\n autohinted: The output is autohinted or not.\n is_variable: The output is variable font or not.\n Return:\n output directory string.\n \"\"\"\n\n assert not (is_variable and any([is_instance, interpolatable]))\n # FIXME? Use user configurable destination folders.\n if is_variable:\n dir_prefix = \"variable_\"\n elif is_instance:\n dir_prefix = \"instance_\"\n else:\n dir_prefix = \"master_\"\n dir_suffix = \"_interpolatable\" if interpolatable else \"\"\n output_dir = dir_prefix + ext + dir_suffix\n if autohinted:\n output_dir = os.path.join(\"autohinted\", output_dir)\n return output_dir\n",
"def _designspace_locations(self, designspace):\n \"\"\"Map font filenames to their locations in a designspace.\"\"\"\n\n maps = []\n for elements in (designspace.sources, designspace.instances):\n location_map = {}\n for element in elements:\n path = _normpath(element.path)\n location_map[path] = element.location\n maps.append(location_map)\n return maps\n",
"def _closest_location(self, location_map, target):\n \"\"\"Return path of font whose location is closest to target.\"\"\"\n\n def dist(a, b):\n return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))\n\n paths = iter(location_map.keys())\n closest = next(paths)\n closest_dist = dist(target, location_map[closest])\n for path in paths:\n cur_dist = dist(target, location_map[path])\n if cur_dist < closest_dist:\n closest = path\n closest_dist = cur_dist\n return closest\n"
] | class FontProject(object):
"""Provides methods for building fonts."""
def __init__(self, timing=False, verbose="INFO", validate_ufo=False):
logging.basicConfig(level=getattr(logging, verbose.upper()))
logging.getLogger("fontTools.subset").setLevel(logging.WARNING)
if timing:
configLogger(logger=timer.logger, level=logging.DEBUG)
logger.debug(
"ufoLib UFO validation is %s", "enabled" if validate_ufo else "disabled"
)
setUfoLibReadValidate(validate_ufo)
setUfoLibWriteValidate(validate_ufo)
@timer()
def build_master_ufos(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
):
"""Build UFOs and MutatorMath designspace from Glyphs source."""
import glyphsLib
if master_dir is None:
master_dir = self._output_dir("ufo")
if not os.path.isdir(master_dir):
os.mkdir(master_dir)
if instance_dir is None:
instance_dir = self._output_dir("ufo", is_instance=True)
if not os.path.isdir(instance_dir):
os.mkdir(instance_dir)
font = glyphsLib.GSFont(glyphs_path)
if designspace_path is not None:
designspace_dir = os.path.dirname(designspace_path)
else:
designspace_dir = master_dir
# glyphsLib.to_designspace expects instance_dir to be relative
instance_dir = os.path.relpath(instance_dir, designspace_dir)
designspace = glyphsLib.to_designspace(
font, family_name=family_name, instance_dir=instance_dir
)
masters = {}
# multiple sources can have the same font/filename (but different layer),
# we want to save a font only once
for source in designspace.sources:
if source.filename in masters:
assert source.font is masters[source.filename]
continue
ufo_path = os.path.join(master_dir, source.filename)
# no need to also set the relative 'filename' attribute as that
# will be auto-updated on writing the designspace document
source.path = ufo_path
source.font.save(ufo_path)
masters[source.filename] = source.font
if designspace_path is None:
designspace_path = os.path.join(master_dir, designspace.filename)
designspace.write(designspace_path)
if mti_source:
self.add_mti_features_to_master_ufos(mti_source, masters.values())
return designspace_path
@timer()
def add_mti_features_to_master_ufos(self, mti_source, masters):
mti_dir = os.path.dirname(mti_source)
with open(mti_source, "rb") as mti_file:
mti_paths = readPlist(mti_file)
for master in masters:
key = os.path.basename(master.path).rstrip(".ufo")
for table, path in mti_paths[key].items():
with open(os.path.join(mti_dir, path), "rb") as mti_source:
ufo_path = (
"com.github.googlei18n.ufo2ft.mtiFeatures/%s.mti"
% table.strip()
)
master.data[ufo_path] = mti_source.read()
# If we have MTI sources, any Adobe feature files derived from
# the Glyphs file should be ignored. We clear it here because
# it only contains junk information anyway.
master.features.text = ""
master.save()
@_deprecated
@timer()
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)):
"""Remove overlaps in UFOs' glyphs' contours."""
from booleanOperations import union, BooleanOperationsError
for ufo in ufos:
font_name = self._font_name(ufo)
logger.info("Removing overlaps for " + font_name)
for glyph in ufo:
if not glyph_filter(glyph):
continue
contours = list(glyph)
glyph.clearContours()
try:
union(contours, glyph.getPointPen())
except BooleanOperationsError:
logger.error(
"Failed to remove overlaps for %s: %r", font_name, glyph.name
)
raise
@_deprecated
@timer()
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True):
"""Move components of UFOs' glyphs to their outlines."""
for ufo in ufos:
logger.info("Decomposing glyphs for " + self._font_name(ufo))
for glyph in ufo:
if not glyph.components or not glyph_filter(glyph):
continue
self._deep_copy_contours(ufo, glyph, glyph, Transform())
glyph.clearComponents()
def _deep_copy_contours(self, ufo, parent, component, transformation):
"""Copy contours from component to parent, including nested components."""
for nested in component.components:
self._deep_copy_contours(
ufo,
parent,
ufo[nested.baseGlyph],
transformation.transform(nested.transformation),
)
if component != parent:
pen = TransformPen(parent.getPen(), transformation)
# if the transformation has a negative determinant, it will reverse
# the contour direction of the component
xx, xy, yx, yy = transformation[:4]
if xx * yy - xy * yx < 0:
pen = ReverseContourPen(pen)
component.draw(pen)
@_deprecated
@timer()
def convert_curves(
self, ufos, compatible=False, reverse_direction=True, conversion_error=None
):
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
if compatible:
logger.info("Converting curves compatibly")
fonts_to_quadratic(
ufos,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
else:
for ufo in ufos:
logger.info("Converting curves for " + self._font_name(ufo))
font_to_quadratic(
ufo,
max_err_em=conversion_error,
reverse_direction=reverse_direction,
dump_stats=True,
)
def build_otfs(self, ufos, **kwargs):
"""Build OpenType binaries with CFF outlines."""
self.save_otfs(ufos, **kwargs)
def build_ttfs(self, ufos, **kwargs):
"""Build OpenType binaries with TrueType outlines."""
self.save_otfs(ufos, ttf=True, **kwargs)
@staticmethod
def _load_designspace_sources(designspace):
# set source.font attributes, but only load fonts once
masters = {}
for source in designspace.sources:
if source.path in masters:
source.font = masters[source.path]
else:
assert source.path is not None
source.font = Font(source.path)
masters[source.path] = source.font
def _build_interpolatable_masters(
self,
designspace,
ttf,
use_production_names=None,
reverse_direction=True,
conversion_error=None,
feature_writers=None,
cff_round_tolerance=None,
**kwargs
):
if hasattr(designspace, "__fspath__"):
ds_path = designspace.__fspath__()
if isinstance(designspace, basestring):
ds_path = designspace
else:
# reload designspace from its path so we have a new copy
# that can be modified in-place.
ds_path = designspace.path
if ds_path is not None:
designspace = designspaceLib.DesignSpaceDocument.fromfile(ds_path)
self._load_designspace_sources(designspace)
if ttf:
return ufo2ft.compileInterpolatableTTFsFromDS(
designspace,
useProductionNames=use_production_names,
reverseDirection=reverse_direction,
cubicConversionError=conversion_error,
featureWriters=feature_writers,
inplace=True,
)
else:
return ufo2ft.compileInterpolatableOTFsFromDS(
designspace,
useProductionNames=use_production_names,
roundTolerance=cff_round_tolerance,
featureWriters=feature_writers,
inplace=True,
)
def build_interpolatable_ttfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=True, **kwargs)
def build_interpolatable_otfs(self, designspace, **kwargs):
"""Build OpenType binaries with interpolatable TrueType outlines
from DesignSpaceDocument object.
"""
return self._build_interpolatable_masters(designspace, ttf=False, **kwargs)
def build_variable_font(
self,
designspace,
output_path=None,
output_dir=None,
master_bin_dir=None,
ttf=True,
):
"""Build OpenType variable font from masters in a designspace."""
assert not (output_path and output_dir), "mutually exclusive args"
ext = "ttf" if ttf else "otf"
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace)
if master_bin_dir is None:
master_bin_dir = self._output_dir(ext, interpolatable=True)
finder = partial(_varLib_finder, directory=master_bin_dir)
else:
assert all(isinstance(s.font, TTFont) for s in designspace.sources)
finder = lambda s: s # noqa: E731
if output_path is None:
output_path = (
os.path.splitext(os.path.basename(designspace.path))[0] + "-VF"
)
output_path = self._output_path(
output_path, ext, is_variable=True, output_dir=output_dir
)
logger.info("Building variable font " + output_path)
font, _, _ = varLib.build(designspace, finder)
font.save(output_path)
def _iter_compile(self, ufos, ttf=False, **kwargs):
# generator function that calls ufo2ft compiler for each ufo and
# yields ttFont instances
options = dict(kwargs)
if ttf:
for key in ("optimizeCFF", "roundTolerance"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileTTF, "TTF"
else:
for key in ("cubicConversionError", "reverseDirection"):
options.pop(key, None)
compile_func, fmt = ufo2ft.compileOTF, "OTF"
for ufo in ufos:
name = self._font_name(ufo)
logger.info("Building {} for {}".format(fmt, name))
yield compile_func(ufo, **options)
@timer()
def _save_interpolatable_fonts(self, designspace, output_dir, ttf):
ext = "ttf" if ttf else "otf"
for source in designspace.sources:
assert isinstance(source.font, TTFont)
otf_path = self._output_path(
source,
ext,
is_instance=False,
interpolatable=True,
output_dir=output_dir,
suffix=source.layerName,
)
logger.info("Saving %s", otf_path)
source.font.save(otf_path)
source.path = otf_path
source.layerName = None
for instance in designspace.instances:
instance.path = instance.filename = None
if output_dir is None:
output_dir = self._output_dir(ext, interpolatable=True)
designspace_path = os.path.join(output_dir, os.path.basename(designspace.path))
logger.info("Saving %s", designspace_path)
designspace.write(designspace_path)
def subset_otf_from_ufo(self, otf_path, ufo):
"""Subset a font using export flags set by glyphsLib.
There are two more settings that can change export behavior:
"Export Glyphs" and "Remove Glyphs", which are currently not supported
for complexity reasons. See
https://github.com/googlei18n/glyphsLib/issues/295.
"""
from fontTools import subset
# ufo2ft always inserts a ".notdef" glyph as the first glyph
ufo_order = makeOfficialGlyphOrder(ufo)
if ".notdef" not in ufo_order:
ufo_order.insert(0, ".notdef")
ot_order = TTFont(otf_path).getGlyphOrder()
assert ot_order[0] == ".notdef"
assert len(ufo_order) == len(ot_order)
for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY):
keep_glyphs_list = ufo.lib.get(key)
if keep_glyphs_list is not None:
keep_glyphs = set(keep_glyphs_list)
break
else:
keep_glyphs = None
include = []
for source_name, binary_name in zip(ufo_order, ot_order):
if keep_glyphs and source_name not in keep_glyphs:
continue
if source_name in ufo:
exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True)
if not exported:
continue
include.append(binary_name)
# copied from nototools.subset
opt = subset.Options()
opt.name_IDs = ["*"]
opt.name_legacy = True
opt.name_languages = ["*"]
opt.layout_features = ["*"]
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
opt.glyph_names = True
font = subset.load_font(otf_path, opt, lazy=False)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(glyphs=include)
subsetter.subset(font)
subset.save_font(font, otf_path, opt)
def run_from_glyphs(
self,
glyphs_path,
designspace_path=None,
master_dir=None,
instance_dir=None,
family_name=None,
mti_source=None,
**kwargs
):
"""Run toolchain from Glyphs source.
Args:
glyphs_path: Path to source file.
designspace_path: Output path of generated designspace document.
By default it's "<family_name>[-<base_style>].designspace".
master_dir: Directory where to save UFO masters (default:
"master_ufo").
instance_dir: Directory where to save UFO instances (default:
"instance_ufo").
family_name: If provided, uses this family name in the output.
mti_source: Path to property list file containing a dictionary
mapping UFO masters to dictionaries mapping layout table
tags to MTI source paths which should be compiled into
those tables.
kwargs: Arguments passed along to run_from_designspace.
"""
logger.info("Building master UFOs and designspace from Glyphs source")
designspace_path = self.build_master_ufos(
glyphs_path,
designspace_path=designspace_path,
master_dir=master_dir,
instance_dir=instance_dir,
family_name=family_name,
mti_source=mti_source,
)
self.run_from_designspace(designspace_path, **kwargs)
def interpolate_instance_ufos(
self,
designspace,
include=None,
round_instances=False,
expand_features_to_instances=False,
):
"""Interpolate master UFOs with MutatorMath and return instance UFOs.
Args:
designspace: a DesignSpaceDocument object containing sources and
instances.
include (str): optional regular expression pattern to match the
DS instance 'name' attribute and only interpolate the matching
instances.
round_instances (bool): round instances' coordinates to integer.
expand_features_to_instances: parses the master feature file, expands all
include()s and writes the resulting full feature file to all instance
UFOs. Use this if you share feature files among masters in external
files. Otherwise, the relative include paths can break as instances
may end up elsewhere. Only done on interpolation.
Returns:
list of defcon.Font objects corresponding to the UFO instances.
Raises:
FontmakeError: if any of the sources defines a custom 'layer', for
this is not supported by MutatorMath.
ValueError: "expand_features_to_instances" is True but no source in the
designspace document is designated with '<features copy="1"/>'.
"""
from glyphsLib.interpolation import apply_instance_data
from mutatorMath.ufo.document import DesignSpaceDocumentReader
if any(source.layerName is not None for source in designspace.sources):
raise FontmakeError(
"MutatorMath doesn't support DesignSpace sources with 'layer' "
"attribute"
)
# TODO: replace mutatorMath with ufoProcessor?
builder = DesignSpaceDocumentReader(
designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True
)
logger.info("Interpolating master UFOs from designspace")
if include is not None:
instances = self._search_instances(designspace, pattern=include)
for instance_name in instances:
builder.readInstance(("name", instance_name))
filenames = set(instances.values())
else:
builder.readInstances()
filenames = None # will include all instances
logger.info("Applying instance data from designspace")
instance_ufos = apply_instance_data(designspace, include_filenames=filenames)
if expand_features_to_instances:
logger.debug("Expanding features to instance UFOs")
master_source = next(
(s for s in designspace.sources if s.copyFeatures), None
)
if not master_source:
raise ValueError("No source is designated as the master for features.")
else:
master_source_font = builder.sources[master_source.name][0]
master_source_features = parseLayoutFeatures(master_source_font).asFea()
for instance_ufo in instance_ufos:
instance_ufo.features.text = master_source_features
instance_ufo.save()
return instance_ufos
def run_from_designspace(
self,
designspace_path,
output=(),
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
"""Run toolchain from a DesignSpace document to produce either static
instance fonts (ttf or otf), interpolatable or variable fonts.
Args:
designspace_path: Path to designspace document.
interpolate: If True output all instance fonts, otherwise just
masters. If the value is a string, only build instance(s) that
match given name. The string is compiled into a regular
expression and matched against the "name" attribute of
designspace instances using `re.fullmatch`.
masters_as_instances: If True, output master fonts as instances.
interpolate_binary_layout: Interpolate layout tables from compiled
master binaries.
round_instances: apply integer rounding when interpolating with
MutatorMath.
kwargs: Arguments passed along to run_from_ufos.
Raises:
TypeError: "variable" or "interpolatable" outputs are incompatible
with arguments "interpolate", "masters_as_instances", and
"interpolate_binary_layout".
"""
interp_outputs = INTERPOLATABLE_OUTPUTS.intersection(output)
static_outputs = STATIC_OUTPUTS.intersection(output)
if interp_outputs:
for argname in (
"interpolate",
"masters_as_instances",
"interpolate_binary_layout",
):
if locals()[argname]:
raise TypeError(
'"%s" argument incompatible with output %r'
% (argname, ", ".join(sorted(interp_outputs)))
)
designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace_path)
# if no --feature-writers option was passed, check in the designspace's
# <lib> element if user supplied a custom featureWriters configuration;
# if so, use that for all the UFOs built from this designspace
if feature_writers is None and FEATURE_WRITERS_KEY in designspace.lib:
feature_writers = loadFeatureWriters(designspace)
if static_outputs:
self._run_from_designspace_static(
designspace,
outputs=static_outputs,
interpolate=interpolate,
masters_as_instances=masters_as_instances,
interpolate_binary_layout=interpolate_binary_layout,
round_instances=round_instances,
feature_writers=feature_writers,
expand_features_to_instances=expand_features_to_instances,
**kwargs
)
if interp_outputs:
self._run_from_designspace_interpolatable(
designspace,
outputs=interp_outputs,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_static(
self,
designspace,
outputs,
interpolate=False,
masters_as_instances=False,
interpolate_binary_layout=False,
round_instances=False,
feature_writers=None,
expand_features_to_instances=False,
**kwargs
):
ufos = []
if not interpolate or masters_as_instances:
ufos.extend((s.path for s in designspace.sources if s.path))
if interpolate:
pattern = interpolate if isinstance(interpolate, basestring) else None
ufos.extend(
self.interpolate_instance_ufos(
designspace,
include=pattern,
round_instances=round_instances,
expand_features_to_instances=expand_features_to_instances,
)
)
if interpolate_binary_layout is False:
interpolate_layout_from = interpolate_layout_dir = None
else:
interpolate_layout_from = designspace
if isinstance(interpolate_binary_layout, basestring):
interpolate_layout_dir = interpolate_binary_layout
else:
interpolate_layout_dir = None
self.run_from_ufos(
ufos,
output=outputs,
is_instance=(interpolate or masters_as_instances),
interpolate_layout_from=interpolate_layout_from,
interpolate_layout_dir=interpolate_layout_dir,
feature_writers=feature_writers,
**kwargs
)
def _run_from_designspace_interpolatable(
self, designspace, outputs, output_path=None, output_dir=None, **kwargs
):
ttf_designspace = otf_designspace = None
if "variable" in outputs:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self.build_variable_font(
ttf_designspace, output_path=output_path, output_dir=output_dir
)
if "ttf-interpolatable" in outputs:
if ttf_designspace is None:
ttf_designspace = self.build_interpolatable_ttfs(designspace, **kwargs)
self._save_interpolatable_fonts(ttf_designspace, output_dir, ttf=True)
if "variable-cff2" in outputs:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self.build_variable_font(
otf_designspace,
output_path=output_path,
output_dir=output_dir,
ttf=False,
)
if "otf-interpolatable" in outputs:
if otf_designspace is None:
otf_designspace = self.build_interpolatable_otfs(designspace, **kwargs)
self._save_interpolatable_fonts(otf_designspace, output_dir, ttf=False)
def run_from_ufos(self, ufos, output=(), **kwargs):
"""Run toolchain from UFO sources.
Args:
ufos: List of UFO sources, as either paths or opened objects.
output: List of output formats to generate.
kwargs: Arguments passed along to save_otfs.
"""
if set(output) == {"ufo"}:
return
# the `ufos` parameter can be a list of UFO objects
# or it can be a path (string) with a glob syntax
ufo_paths = []
if isinstance(ufos, basestring):
ufo_paths = glob.glob(ufos)
ufos = [Font(x) for x in ufo_paths]
elif isinstance(ufos, list):
# ufos can be either paths or open Font objects, so normalize them
ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos]
ufo_paths = [x.path for x in ufos]
else:
raise FontmakeError(
"UFOs parameter is neither a defcon.Font object, a path or a glob, "
"nor a list of any of these.",
ufos,
)
need_reload = False
if "otf" in output:
self.build_otfs(ufos, **kwargs)
need_reload = True
if "ttf" in output:
if need_reload:
ufos = [Font(path) for path in ufo_paths]
self.build_ttfs(ufos, **kwargs)
need_reload = True
@staticmethod
def _search_instances(designspace, pattern):
instances = OrderedDict()
for instance in designspace.instances:
# is 'name' optional? 'filename' certainly must not be
if fullmatch(pattern, instance.name):
instances[instance.name] = instance.filename
if not instances:
raise FontmakeError("No instance found with %r" % pattern)
return instances
def _font_name(self, ufo):
"""Generate a postscript-style font name."""
family_name = (
ufo.info.familyName.replace(" ", "")
if ufo.info.familyName is not None
else "None"
)
style_name = (
ufo.info.styleName.replace(" ", "")
if ufo.info.styleName is not None
else "None"
)
return "{}-{}".format(family_name, style_name)
def _output_dir(
self,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
):
"""Generate an output directory.
Args:
ext: extension string.
is_instance: The output is instance font or not.
interpolatable: The output is interpolatable or not.
autohinted: The output is autohinted or not.
is_variable: The output is variable font or not.
Return:
output directory string.
"""
assert not (is_variable and any([is_instance, interpolatable]))
# FIXME? Use user configurable destination folders.
if is_variable:
dir_prefix = "variable_"
elif is_instance:
dir_prefix = "instance_"
else:
dir_prefix = "master_"
dir_suffix = "_interpolatable" if interpolatable else ""
output_dir = dir_prefix + ext + dir_suffix
if autohinted:
output_dir = os.path.join("autohinted", output_dir)
return output_dir
def _output_path(
self,
ufo_or_font_name,
ext,
is_instance=False,
interpolatable=False,
autohinted=False,
is_variable=False,
output_dir=None,
suffix=None,
):
"""Generate output path for a font file with given extension."""
if isinstance(ufo_or_font_name, basestring):
font_name = ufo_or_font_name
elif ufo_or_font_name.path:
font_name = os.path.splitext(
os.path.basename(os.path.normpath(ufo_or_font_name.path))
)[0]
else:
font_name = self._font_name(ufo_or_font_name)
if output_dir is None:
output_dir = self._output_dir(
ext, is_instance, interpolatable, autohinted, is_variable
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if suffix:
return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext))
else:
return os.path.join(output_dir, "{}.{}".format(font_name, ext))
def _designspace_locations(self, designspace):
"""Map font filenames to their locations in a designspace."""
maps = []
for elements in (designspace.sources, designspace.instances):
location_map = {}
for element in elements:
path = _normpath(element.path)
location_map[path] = element.location
maps.append(location_map)
return maps
def _closest_location(self, location_map, target):
"""Return path of font whose location is closest to target."""
def dist(a, b):
return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys()))
paths = iter(location_map.keys())
closest = next(paths)
closest_dist = dist(target, location_map[closest])
for path in paths:
cur_dist = dist(target, location_map[path])
if cur_dist < closest_dist:
closest = path
closest_dist = cur_dist
return closest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.