repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
Othernet-Project/conz
|
conz/console.py
|
Console.pwa
|
python
|
def pwa(self, val, wa='WARN'):
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
|
Print val: WARN in yellow on STDOUT
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L87-L89
|
[
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.pverb
|
python
|
def pverb(self, *args, **kwargs):
if not self.verbose:
return
self.pstd(*args, **kwargs)
|
Console verbose message to STDOUT
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L95-L99
|
[
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.read
|
python
|
def read(self, prompt='', clean=lambda x: x):
ans = read(prompt + ' ')
return clean(ans)
|
Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L104-L116
|
[
"def safeint(s):\n \"\"\" Convert the string to int without raising errors \"\"\"\n try:\n return int(s.strip())\n except (TypeError, ValueError):\n return None\n",
"def read(self, prompt='', clean=lambda x: x):\n",
"validator=lambda x: x != '', clean=lambda x: x.strip(),\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.rvpl
|
python
|
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
|
Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L118-L150
|
[
"validator=lambda x: x > 12)\n",
"validator=lambda x: x > 12, strict=False, default=15)\n",
"def rewrap_long(s, width=COLS):\n \"\"\" Rewrap longer texts with paragraph breaks (two consecutive LF) \"\"\"\n paras = s.split('\\n\\n')\n return '\\n\\n'.join(rewrap(p) for p in paras)\n",
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n",
"def perr(self, *args, **kwargs):\n \"\"\" Console to STERR \"\"\"\n kwargs['file'] = self.err\n self.print(*args, **kwargs)\n sys.stderr.flush()\n",
"validator=lambda x: x != '', clean=lambda x: x.strip(),\n",
"def read(self, prompt='', clean=lambda x: x):\n \"\"\" Display a prompt and ask user for input\n\n A function to clean the user input can be passed as ``clean`` argument.\n This function takes a single value, which is the string user entered,\n and returns a cleaned value. Default is a pass-through function, which\n is an equivalent of::\n\n def clean(val):\n return val\n \"\"\"\n ans = read(prompt + ' ')\n return clean(ans)\n",
"validator = lambda x: x in ['y', 'yes', 'n', 'no']\n",
"validator = lambda x: x in numbers\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.yesno
|
python
|
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
|
Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L152-L181
|
[
"def rvpl(self, prompt, error='Entered value is invalid', intro=None,\n validator=lambda x: x != '', clean=lambda x: x.strip(),\n strict=True, default=None):\n \"\"\" Start a read-validate-print loop\n\n The RVPL will read the user input, validate it, and loop until the\n entered value passes the validation, then return it.\n\n Error message can be customized using the ``error`` argument. If the\n value is a callable, it will be called with the value and it will be\n expected to return a printable message. Exceptions raised by the\n ``error`` function are not trapped.\n\n When ``intro`` is passed, it is printed above the prompt.\n\n The ``validator`` argument is is a function that validates the user\n input. Default validator simply validates if user entered any value.\n\n The ``clean`` argument specifies a function for the ``read()`` method\n with the same semantics.\n \"\"\"\n if intro:\n self.pstd(utils.rewrap_long(intro))\n val = self.read(prompt, clean)\n while not validator(val):\n if not strict:\n return default\n if hasattr(error, '__call__'):\n self.perr(error(val))\n else:\n self.perr(error)\n val = self.read(prompt, clean)\n return val\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.menu
|
python
|
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
|
Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L183-L239
|
[
"numerator = lambda n: ['abcd'[i] for i in range(n)]\n",
"formatter = lambda i, l: '[{}] {}'.format(i, l)\n",
"def rewrap_long(s, width=COLS):\n \"\"\" Rewrap longer texts with paragraph breaks (two consecutive LF) \"\"\"\n paras = s.split('\\n\\n')\n return '\\n\\n'.join(rewrap(p) for p in paras)\n",
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n",
"numerator=lambda x: [i + 1 for i in range(x)],\n",
"formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),\n",
"def rvpl(self, prompt, error='Entered value is invalid', intro=None,\n validator=lambda x: x != '', clean=lambda x: x.strip(),\n strict=True, default=None):\n \"\"\" Start a read-validate-print loop\n\n The RVPL will read the user input, validate it, and loop until the\n entered value passes the validation, then return it.\n\n Error message can be customized using the ``error`` argument. If the\n value is a callable, it will be called with the value and it will be\n expected to return a printable message. Exceptions raised by the\n ``error`` function are not trapped.\n\n When ``intro`` is passed, it is printed above the prompt.\n\n The ``validator`` argument is is a function that validates the user\n input. Default validator simply validates if user entered any value.\n\n The ``clean`` argument specifies a function for the ``read()`` method\n with the same semantics.\n \"\"\"\n if intro:\n self.pstd(utils.rewrap_long(intro))\n val = self.read(prompt, clean)\n while not validator(val):\n if not strict:\n return default\n if hasattr(error, '__call__'):\n self.perr(error(val))\n else:\n self.perr(error)\n val = self.read(prompt, clean)\n return val\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.readpipe
|
python
|
def readpipe(self, chunk=None):
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
|
Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L241-L261
| null |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.error
|
python
|
def error(self, msg='Program error: {err}', exit=None):
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
|
Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L282-L304
| null |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.progress
|
python
|
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L307-L369
|
[
"def pverb(self, *args, **kwargs):\n \"\"\" Console verbose message to STDOUT \"\"\"\n if not self.verbose:\n return\n self.pstd(*args, **kwargs)\n",
"def error(self, msg='Program error: {err}', exit=None):\n \"\"\" Error handler factory\n\n This function takes a message with optional ``{err}`` placeholder and\n returns a function that takes an exception object, prints the error\n message to STDERR and optionally quits.\n\n If no message is supplied (e.g., passing ``None`` or ``False`` or empty\n string), then nothing is output to STDERR.\n\n The ``exit`` argument can be set to a non-zero value, in which case the\n program quits after printing the message using its value as return\n value of the program.\n\n The returned function can be used with the ``progress()`` context\n manager as error handler.\n \"\"\"\n def handler(exc):\n if msg:\n self.perr(msg.format(err=exc))\n if exit is not None:\n self.quit(exit)\n return handler\n",
"def end(self, s=None, post=None, noraise=False):\n \"\"\" Prints the end banner and raises ``ProgressOK`` exception\n\n When ``noraise`` flag is set to ``True``, then the exception is not\n raised, and progress is allowed to continue.\n\n If ``post`` function is supplied it is invoked with no arguments after\n the close banner is printed, but before exceptions are raised. The\n ``post`` function takes no arguments.\n \"\"\"\n s = s or self.end_msg\n self.printer(self.color.green(s))\n if post:\n post()\n if noraise:\n return\n raise ProgressOK()\n",
"def abrt(self, s=None, post=None, noraise=False):\n \"\"\" Prints the abrt banner and raises ``ProgressAbrt`` exception\n\n When ``noraise`` flag is set to ``True``, then the exception is not\n raised, and progress is allowed to continue.\n\n If ``post`` function is supplied it is invoked with no arguments after\n the close banner is printed, but before exceptions are raised. The\n ``post`` function takes no arguments.\n \"\"\"\n s = s or self.abrt_msg\n self.printer(self.color.red(s))\n if post:\n post()\n if noraise:\n return\n raise ProgressAbrt()\n",
"def handler(exc):\n if msg:\n self.perr(msg.format(err=exc))\n if exit is not None:\n self.quit(exit)\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
|
Othernet-Project/conz
|
conz/progress.py
|
Progress.end
|
python
|
def end(self, s=None, post=None, noraise=False):
s = s or self.end_msg
self.printer(self.color.green(s))
if post:
post()
if noraise:
return
raise ProgressOK()
|
Prints the end banner and raises ``ProgressOK`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/progress.py#L55-L71
|
[
"def pverb(self, *args, **kwargs):\n \"\"\" Console verbose message to STDOUT \"\"\"\n if not self.verbose:\n return\n self.pstd(*args, **kwargs)\n"
] |
class Progress:
"""
Wrapper that manages step progress
"""
color = ansi_colors.color
def __init__(self, printer, end='DONE', abrt='FAIL', prog='.'):
"""
The ``Console`` method to be used is specified using the ``printer``
argument.
``end`` argument specified the progress end banner. It defaults to
'DONE'.
The ``abrt`` argument specifies the abort banner, defaulting to 'FAIL'.
The ``prog`` argument specifies the character to be used as progress
indicator. It defaults to '.'.
The methods in this class all print using printer's ``pverb()`` method.
This can be changed by specifying a different method using the
``mthod`` argument.
"""
self.printer = printer
self.end_msg = end
self.prog_msg = prog
self.abrt_msg = abrt
def abrt(self, s=None, post=None, noraise=False):
""" Prints the abrt banner and raises ``ProgressAbrt`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
"""
s = s or self.abrt_msg
self.printer(self.color.red(s))
if post:
post()
if noraise:
return
raise ProgressAbrt()
def prog(self, s=None):
""" Prints the progress indicator """
s = s or self.prog_msg
self.printer(s, end='')
|
Othernet-Project/conz
|
conz/progress.py
|
Progress.abrt
|
python
|
def abrt(self, s=None, post=None, noraise=False):
s = s or self.abrt_msg
self.printer(self.color.red(s))
if post:
post()
if noraise:
return
raise ProgressAbrt()
|
Prints the abrt banner and raises ``ProgressAbrt`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/progress.py#L73-L89
|
[
"def pverb(self, *args, **kwargs):\n \"\"\" Console verbose message to STDOUT \"\"\"\n if not self.verbose:\n return\n self.pstd(*args, **kwargs)\n"
] |
class Progress:
"""
Wrapper that manages step progress
"""
color = ansi_colors.color
def __init__(self, printer, end='DONE', abrt='FAIL', prog='.'):
"""
The ``Console`` method to be used is specified using the ``printer``
argument.
``end`` argument specified the progress end banner. It defaults to
'DONE'.
The ``abrt`` argument specifies the abort banner, defaulting to 'FAIL'.
The ``prog`` argument specifies the character to be used as progress
indicator. It defaults to '.'.
The methods in this class all print using printer's ``pverb()`` method.
This can be changed by specifying a different method using the
``mthod`` argument.
"""
self.printer = printer
self.end_msg = end
self.prog_msg = prog
self.abrt_msg = abrt
def end(self, s=None, post=None, noraise=False):
""" Prints the end banner and raises ``ProgressOK`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
"""
s = s or self.end_msg
self.printer(self.color.green(s))
if post:
post()
if noraise:
return
raise ProgressOK()
def prog(self, s=None):
""" Prints the progress indicator """
s = s or self.prog_msg
self.printer(s, end='')
|
Othernet-Project/conz
|
conz/progress.py
|
Progress.prog
|
python
|
def prog(self, s=None):
s = s or self.prog_msg
self.printer(s, end='')
|
Prints the progress indicator
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/progress.py#L91-L94
| null |
class Progress:
"""
Wrapper that manages step progress
"""
color = ansi_colors.color
def __init__(self, printer, end='DONE', abrt='FAIL', prog='.'):
"""
The ``Console`` method to be used is specified using the ``printer``
argument.
``end`` argument specified the progress end banner. It defaults to
'DONE'.
The ``abrt`` argument specifies the abort banner, defaulting to 'FAIL'.
The ``prog`` argument specifies the character to be used as progress
indicator. It defaults to '.'.
The methods in this class all print using printer's ``pverb()`` method.
This can be changed by specifying a different method using the
``mthod`` argument.
"""
self.printer = printer
self.end_msg = end
self.prog_msg = prog
self.abrt_msg = abrt
def end(self, s=None, post=None, noraise=False):
""" Prints the end banner and raises ``ProgressOK`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
"""
s = s or self.end_msg
self.printer(self.color.green(s))
if post:
post()
if noraise:
return
raise ProgressOK()
def abrt(self, s=None, post=None, noraise=False):
""" Prints the abrt banner and raises ``ProgressAbrt`` exception
When ``noraise`` flag is set to ``True``, then the exception is not
raised, and progress is allowed to continue.
If ``post`` function is supplied it is invoked with no arguments after
the close banner is printed, but before exceptions are raised. The
``post`` function takes no arguments.
"""
s = s or self.abrt_msg
self.printer(self.color.red(s))
if post:
post()
if noraise:
return
raise ProgressAbrt()
|
Othernet-Project/conz
|
conz/utils.py
|
rewrap
|
python
|
def rewrap(s, width=COLS):
s = ' '.join([l.strip() for l in s.strip().split('\n')])
return '\n'.join(textwrap.wrap(s, width))
|
Join all lines from input string and wrap it at specified width
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/utils.py#L17-L20
| null |
"""
Misc utility functions
Copyright 2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import os
import textwrap
COLS = os.getenv('COLUMNS', 79)
def rewrap_long(s, width=COLS):
""" Rewrap longer texts with paragraph breaks (two consecutive LF) """
paras = s.split('\n\n')
return '\n\n'.join(rewrap(p) for p in paras)
def striplines(s):
""" Strip whitespace from each line of input string """
return '\n'.join([l.strip() for l in s.strip().split('\n')])
def safeint(s):
""" Convert the string to int without raising errors """
try:
return int(s.strip())
except (TypeError, ValueError):
return None
|
Othernet-Project/conz
|
conz/utils.py
|
rewrap_long
|
python
|
def rewrap_long(s, width=COLS):
paras = s.split('\n\n')
return '\n\n'.join(rewrap(p) for p in paras)
|
Rewrap longer texts with paragraph breaks (two consecutive LF)
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/utils.py#L23-L26
| null |
"""
Misc utility functions
Copyright 2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import os
import textwrap
COLS = os.getenv('COLUMNS', 79)
def rewrap(s, width=COLS):
""" Join all lines from input string and wrap it at specified width """
s = ' '.join([l.strip() for l in s.strip().split('\n')])
return '\n'.join(textwrap.wrap(s, width))
def striplines(s):
""" Strip whitespace from each line of input string """
return '\n'.join([l.strip() for l in s.strip().split('\n')])
def safeint(s):
""" Convert the string to int without raising errors """
try:
return int(s.strip())
except (TypeError, ValueError):
return None
|
dustinmm80/healthy
|
pylint_runner.py
|
score
|
python
|
def score(package_path):
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5
|
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L22-L44
|
[
"def find_files(directory, pattern):\n \"\"\"\n Recusively finds files in a directory re\n :param directory: base directory\n :param pattern: wildcard pattern\n :return: generator with filenames matching pattern\n \"\"\"\n for root, __, files in os.walk(directory):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename\n",
"def run_pylint(filename):\n \"\"\"\n Runs pylint on a given file\n :param filename:\n :return: list of pylint errors\n \"\"\"\n ARGS = [\n '-r',\n 'n'\n ]\n pylint_output = WritableObject()\n Run([filename]+ARGS, reporter=TextReporter(pylint_output), exit=False)\n\n lines = []\n for line in pylint_output.read():\n if not line.startswith('*') and line != '\\n':\n lines.append(line)\n\n return lines\n",
"def parse_pylint_output(output):\n \"\"\"\n Parses pylint output, counting number of errors, conventions, etc\n :param output: output list generated by run_pylint()\n :return:\n \"\"\"\n\n stripped_output = [x[0] for x in output]\n\n counter = Counter(stripped_output)\n\n return counter\n"
] |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
from collections import Counter
import fnmatch
import os
from pylint.lint import Run
from pylint.reporters.text import TextReporter
SCORING_VALUES = {
'F': 5,
'E': 4,
'W': 3,
'R': 2,
'C': 1
}
def parse_pylint_output(output):
"""
Parses pylint output, counting number of errors, conventions, etc
:param output: output list generated by run_pylint()
:return:
"""
stripped_output = [x[0] for x in output]
counter = Counter(stripped_output)
return counter
class WritableObject(object):
"""
Dummy output stream for pylint"
"""
def __init__(self):
self.content = []
def write(self, st):
"dummy write"
self.content.append(st)
def read(self):
"dummy read"
return self.content
def run_pylint(filename):
"""
Runs pylint on a given file
:param filename:
:return: list of pylint errors
"""
ARGS = [
'-r',
'n'
]
pylint_output = WritableObject()
Run([filename]+ARGS, reporter=TextReporter(pylint_output), exit=False)
lines = []
for line in pylint_output.read():
if not line.startswith('*') and line != '\n':
lines.append(line)
return lines
def find_files(directory, pattern):
"""
Recusively finds files in a directory re
:param directory: base directory
:param pattern: wildcard pattern
:return: generator with filenames matching pattern
"""
for root, __, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
|
dustinmm80/healthy
|
pylint_runner.py
|
parse_pylint_output
|
python
|
def parse_pylint_output(output):
stripped_output = [x[0] for x in output]
counter = Counter(stripped_output)
return counter
|
Parses pylint output, counting number of errors, conventions, etc
:param output: output list generated by run_pylint()
:return:
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L46-L57
| null |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
from collections import Counter
import fnmatch
import os
from pylint.lint import Run
from pylint.reporters.text import TextReporter
SCORING_VALUES = {
'F': 5,
'E': 4,
'W': 3,
'R': 2,
'C': 1
}
def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5
class WritableObject(object):
"""
Dummy output stream for pylint"
"""
def __init__(self):
self.content = []
def write(self, st):
"dummy write"
self.content.append(st)
def read(self):
"dummy read"
return self.content
def run_pylint(filename):
"""
Runs pylint on a given file
:param filename:
:return: list of pylint errors
"""
ARGS = [
'-r',
'n'
]
pylint_output = WritableObject()
Run([filename]+ARGS, reporter=TextReporter(pylint_output), exit=False)
lines = []
for line in pylint_output.read():
if not line.startswith('*') and line != '\n':
lines.append(line)
return lines
def find_files(directory, pattern):
"""
Recusively finds files in a directory re
:param directory: base directory
:param pattern: wildcard pattern
:return: generator with filenames matching pattern
"""
for root, __, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
|
dustinmm80/healthy
|
pylint_runner.py
|
run_pylint
|
python
|
def run_pylint(filename):
ARGS = [
'-r',
'n'
]
pylint_output = WritableObject()
Run([filename]+ARGS, reporter=TextReporter(pylint_output), exit=False)
lines = []
for line in pylint_output.read():
if not line.startswith('*') and line != '\n':
lines.append(line)
return lines
|
Runs pylint on a given file
:param filename:
:return: list of pylint errors
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L72-L90
|
[
"def read(self):\n \"dummy read\"\n return self.content\n"
] |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
from collections import Counter
import fnmatch
import os
from pylint.lint import Run
from pylint.reporters.text import TextReporter
SCORING_VALUES = {
'F': 5,
'E': 4,
'W': 3,
'R': 2,
'C': 1
}
def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5
def parse_pylint_output(output):
"""
Parses pylint output, counting number of errors, conventions, etc
:param output: output list generated by run_pylint()
:return:
"""
stripped_output = [x[0] for x in output]
counter = Counter(stripped_output)
return counter
class WritableObject(object):
"""
Dummy output stream for pylint"
"""
def __init__(self):
self.content = []
def write(self, st):
"dummy write"
self.content.append(st)
def read(self):
"dummy read"
return self.content
def find_files(directory, pattern):
"""
Recusively finds files in a directory re
:param directory: base directory
:param pattern: wildcard pattern
:return: generator with filenames matching pattern
"""
for root, __, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
|
dustinmm80/healthy
|
pylint_runner.py
|
find_files
|
python
|
def find_files(directory, pattern):
for root, __, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
|
Recusively finds files in a directory re
:param directory: base directory
:param pattern: wildcard pattern
:return: generator with filenames matching pattern
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/pylint_runner.py#L92-L103
| null |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
from collections import Counter
import fnmatch
import os
from pylint.lint import Run
from pylint.reporters.text import TextReporter
SCORING_VALUES = {
'F': 5,
'E': 4,
'W': 3,
'R': 2,
'C': 1
}
def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5
def parse_pylint_output(output):
"""
Parses pylint output, counting number of errors, conventions, etc
:param output: output list generated by run_pylint()
:return:
"""
stripped_output = [x[0] for x in output]
counter = Counter(stripped_output)
return counter
class WritableObject(object):
"""
Dummy output stream for pylint"
"""
def __init__(self):
self.content = []
def write(self, st):
"dummy write"
self.content.append(st)
def read(self):
"dummy read"
return self.content
def run_pylint(filename):
"""
Runs pylint on a given file
:param filename:
:return: list of pylint errors
"""
ARGS = [
'-r',
'n'
]
pylint_output = WritableObject()
Run([filename]+ARGS, reporter=TextReporter(pylint_output), exit=False)
lines = []
for line in pylint_output.read():
if not line.startswith('*') and line != '\n':
lines.append(line)
return lines
|
dustinmm80/healthy
|
checks.py
|
check_license
|
python
|
def check_license(package_info, *args):
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
|
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L33-L46
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_homepage
|
python
|
def check_homepage(package_info, *args):
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
|
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L49-L61
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_summary
|
python
|
def check_summary(package_info, *args):
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
|
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L64-L76
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_description
|
python
|
def check_description(package_info, *args):
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
|
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L79-L91
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_python_classifiers
|
python
|
def check_python_classifiers(package_info, *args):
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
|
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L94-L107
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_author_info
|
python
|
def check_author_info(package_info, *args):
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
|
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L110-L122
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_release_files
|
python
|
def check_release_files(package_info, *args):
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
|
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L125-L138
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_stale(package_info, *args):
"""
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
dustinmm80/healthy
|
checks.py
|
check_stale
|
python
|
def check_stale(package_info, *args):
reason = 'Package not updated in {} days'.format(DAYS_STALE)
result = False
now = datetime.utcnow()
release_urls = args[0]
if len(release_urls) > 0:
package_uploaded_time = release_urls[0]['upload_time']
if now - timedelta(days=DAYS_STALE) <= package_uploaded_time:
result = True
return result, reason, NOT_STALE
|
Is the package stale?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/checks.py#L141-L158
| null |
#! /usr/bin/env python
# coding=utf-8
"""
checks.py
Check functions that healthy employs to generate a score
"""
from datetime import datetime, timedelta
DAYS_STALE = 180 # number of days without update that a package is considered 'stale'
# Points
HAS_LICENSE = 20
HAS_RELEASE_FILES = 30
NOT_STALE = 15
HAS_SUMMARY = 15
HAS_DESCRIPTION = 30
HAS_PYTHON_CLASSIFIERS = 15
HAS_AUTHOR_INFO = 10
HAS_HOMEPAGE = 10
TOTAL_POSSIBLE = sum(
[
HAS_LICENSE, HAS_RELEASE_FILES, NOT_STALE, HAS_SUMMARY, HAS_DESCRIPTION, HAS_PYTHON_CLASSIFIERS,
HAS_AUTHOR_INFO, HAS_HOMEPAGE
]
)
BAD_VALUES = ['UNKNOWN', '', None]
def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE
def check_homepage(package_info, *args):
"""
Does the package have a homepage listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Home page missing"
result = False
if package_info.get('home_page') not in BAD_VALUES:
result = True
return result, reason, HAS_HOMEPAGE
def check_summary(package_info, *args):
"""
Does the package have a summary listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Summary missing"
result = False
if package_info.get('summary') not in BAD_VALUES:
result = True
return result, reason, HAS_SUMMARY
def check_description(package_info, *args):
"""
Does the package have a description listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Description missing"
result = False
if package_info.get('description') not in BAD_VALUES:
result = True
return result, reason, HAS_DESCRIPTION
def check_python_classifiers(package_info, *args):
"""
Does the package have Python classifiers?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "Python classifiers missing"
result = False
if len([c for c in classifiers if c.startswith('Programming Language :: Python ::')]) > 0:
result = True
return result, reason, HAS_PYTHON_CLASSIFIERS
def check_author_info(package_info, *args):
"""
Does the package have author information listed?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "Author name or email missing"
result = False
if package_info.get('author') not in BAD_VALUES or package_info.get('author_email') not in BAD_VALUES:
result = True
return result, reason, HAS_AUTHOR_INFO
def check_release_files(package_info, *args):
"""
Does the package have release files?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None)
"""
reason = "No release files uploaded"
result = False
release_urls = args[0]
if len(release_urls) > 0:
result = True
return result, reason, HAS_RELEASE_FILES
|
dustinmm80/healthy
|
healthy.py
|
calculate_health
|
python
|
def calculate_health(package_name, package_version=None, verbose=False, no_output=False):
total_score = 0
reasons = []
package_releases = CLIENT.package_releases(package_name)
if not package_releases:
if not no_output:
print(TERMINAL.red('{} is not listed on pypi'.format(package_name)))
return 0, []
if package_version is None:
package_version = package_releases[0]
package_info = CLIENT.release_data(package_name, package_version)
release_urls = CLIENT.release_urls(package_name, package_version)
if not package_info or not release_urls:
if not no_output:
print(TERMINAL.red('Version {} is not listed on pypi'.format(
package_version)))
return 0, []
if not no_output:
print(TERMINAL.bold('{} v{}'.format(package_name, package_version)))
print('-----')
checkers = [
checks.check_license,
checks.check_homepage,
checks.check_summary,
checks.check_description,
checks.check_python_classifiers,
checks.check_author_info,
checks.check_release_files,
checks.check_stale
]
for checker in checkers:
result, reason, score = checker(package_info, release_urls)
if result:
total_score += score
else:
reasons.append(reason)
if total_score < 0:
total_score = 0
if not no_output:
percentage = int(float(total_score) / float(checks.TOTAL_POSSIBLE) * 100)
score_string = 'score: {}/{} {}%'.format(total_score, checks.TOTAL_POSSIBLE, percentage)
print(get_health_color(percentage)(score_string))
if verbose and not no_output:
for reason in reasons:
print(reason)
if no_output:
return total_score, reasons
|
Calculates the health of a package, based on several factors
:param package_name: name of package on pypi.python.org
:param package_version: version number of package to check, optional - defaults to latest version
:param verbose: flag to print out reasons
:param no_output: print no output
:param lint: run pylint on the package
:returns: (score: integer, reasons: list of reasons for score)
:rtype: tuple
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/healthy.py#L34-L104
|
[
"def get_health_color(score):\n \"\"\"\n Returns a color based on the health score\n :param score: integer from 0 - 100 representing health\n :return: string of color to apply in terminal\n \"\"\"\n color = TERMINAL.green\n\n if score <= 80:\n color = TERMINAL.yellow\n elif score <= 60:\n color = TERMINAL.orange\n elif score <= 40:\n color = TERMINAL.red\n\n return color\n",
"def passthrough(self, s):\n return s\n"
] |
#! /usr/bin/env python
# coding=utf-8
"""
healthy.py
Checks the health of a Python package, based on it's Pypi information
"""
import argparse
import os
import checks
try:
# Different location in Python 3
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
try:
from blessings import Terminal
except:
class Terminal(object):
"""A dummy Terminal class if we can't import Terminal."""
def passthrough(self, s):
return s
bold = red = green = yellow = orange = passthrough
TERMINAL = Terminal()
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CLIENT = ServerProxy('http://pypi.python.org/pypi')
# def lint_package(download_url):
# """
# Run pylint on the packages files
# :param download_url: download url for the package
# :return: score of the package
# """
# sandbox = create_sandbox()
# package_dir = download_package_to_sandbox(sandbox, download_url)
# pylint_score = score(package_dir)
# destroy_sandbox(sandbox)
#
# return pylint_score
def get_health_color(score):
"""
Returns a color based on the health score
:param score: integer from 0 - 100 representing health
:return: string of color to apply in terminal
"""
color = TERMINAL.green
if score <= 80:
color = TERMINAL.yellow
elif score <= 60:
color = TERMINAL.orange
elif score <= 40:
color = TERMINAL.red
return color
def main():
"""
Parses user input for a package name
:return:
"""
parser = argparse.ArgumentParser('Determines the health of a package')
parser.add_argument(
'package_name',
help='Name of package listed on pypi.python.org',
)
parser.add_argument(
'package_version', nargs='?',
help='Version of package to check',
)
parser.add_argument(
'-v', '--verbose', required=False,
help='Show verbose output - the reasons for the package health score',
action='store_true'
)
parser.add_argument(
'-n', '--no_output', required=False,
help='Show no output - no output will be generated',
action='store_true'
)
args = parser.parse_args()
return calculate_health(args.package_name, args.package_version, args.verbose, args.no_output)
if __name__ == '__main__':
main()
|
dustinmm80/healthy
|
healthy.py
|
get_health_color
|
python
|
def get_health_color(score):
color = TERMINAL.green
if score <= 80:
color = TERMINAL.yellow
elif score <= 60:
color = TERMINAL.orange
elif score <= 40:
color = TERMINAL.red
return color
|
Returns a color based on the health score
:param score: integer from 0 - 100 representing health
:return: string of color to apply in terminal
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/healthy.py#L119-L134
| null |
#! /usr/bin/env python
# coding=utf-8
"""
healthy.py
Checks the health of a Python package, based on it's Pypi information
"""
import argparse
import os
import checks
try:
# Different location in Python 3
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
try:
from blessings import Terminal
except:
class Terminal(object):
"""A dummy Terminal class if we can't import Terminal."""
def passthrough(self, s):
return s
bold = red = green = yellow = orange = passthrough
TERMINAL = Terminal()
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CLIENT = ServerProxy('http://pypi.python.org/pypi')
def calculate_health(package_name, package_version=None, verbose=False, no_output=False):
"""
Calculates the health of a package, based on several factors
:param package_name: name of package on pypi.python.org
:param package_version: version number of package to check, optional - defaults to latest version
:param verbose: flag to print out reasons
:param no_output: print no output
:param lint: run pylint on the package
:returns: (score: integer, reasons: list of reasons for score)
:rtype: tuple
"""
total_score = 0
reasons = []
package_releases = CLIENT.package_releases(package_name)
if not package_releases:
if not no_output:
print(TERMINAL.red('{} is not listed on pypi'.format(package_name)))
return 0, []
if package_version is None:
package_version = package_releases[0]
package_info = CLIENT.release_data(package_name, package_version)
release_urls = CLIENT.release_urls(package_name, package_version)
if not package_info or not release_urls:
if not no_output:
print(TERMINAL.red('Version {} is not listed on pypi'.format(
package_version)))
return 0, []
if not no_output:
print(TERMINAL.bold('{} v{}'.format(package_name, package_version)))
print('-----')
checkers = [
checks.check_license,
checks.check_homepage,
checks.check_summary,
checks.check_description,
checks.check_python_classifiers,
checks.check_author_info,
checks.check_release_files,
checks.check_stale
]
for checker in checkers:
result, reason, score = checker(package_info, release_urls)
if result:
total_score += score
else:
reasons.append(reason)
if total_score < 0:
total_score = 0
if not no_output:
percentage = int(float(total_score) / float(checks.TOTAL_POSSIBLE) * 100)
score_string = 'score: {}/{} {}%'.format(total_score, checks.TOTAL_POSSIBLE, percentage)
print(get_health_color(percentage)(score_string))
if verbose and not no_output:
for reason in reasons:
print(reason)
if no_output:
return total_score, reasons
# def lint_package(download_url):
# """
# Run pylint on the packages files
# :param download_url: download url for the package
# :return: score of the package
# """
# sandbox = create_sandbox()
# package_dir = download_package_to_sandbox(sandbox, download_url)
# pylint_score = score(package_dir)
# destroy_sandbox(sandbox)
#
# return pylint_score
def main():
"""
Parses user input for a package name
:return:
"""
parser = argparse.ArgumentParser('Determines the health of a package')
parser.add_argument(
'package_name',
help='Name of package listed on pypi.python.org',
)
parser.add_argument(
'package_version', nargs='?',
help='Version of package to check',
)
parser.add_argument(
'-v', '--verbose', required=False,
help='Show verbose output - the reasons for the package health score',
action='store_true'
)
parser.add_argument(
'-n', '--no_output', required=False,
help='Show no output - no output will be generated',
action='store_true'
)
args = parser.parse_args()
return calculate_health(args.package_name, args.package_version, args.verbose, args.no_output)
if __name__ == '__main__':
main()
|
dustinmm80/healthy
|
healthy.py
|
main
|
python
|
def main():
parser = argparse.ArgumentParser('Determines the health of a package')
parser.add_argument(
'package_name',
help='Name of package listed on pypi.python.org',
)
parser.add_argument(
'package_version', nargs='?',
help='Version of package to check',
)
parser.add_argument(
'-v', '--verbose', required=False,
help='Show verbose output - the reasons for the package health score',
action='store_true'
)
parser.add_argument(
'-n', '--no_output', required=False,
help='Show no output - no output will be generated',
action='store_true'
)
args = parser.parse_args()
return calculate_health(args.package_name, args.package_version, args.verbose, args.no_output)
|
Parses user input for a package name
:return:
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/healthy.py#L136-L167
|
[
"def calculate_health(package_name, package_version=None, verbose=False, no_output=False):\n \"\"\"\n Calculates the health of a package, based on several factors\n\n :param package_name: name of package on pypi.python.org\n :param package_version: version number of package to check, optional - defaults to latest version\n :param verbose: flag to print out reasons\n :param no_output: print no output\n :param lint: run pylint on the package\n\n :returns: (score: integer, reasons: list of reasons for score)\n :rtype: tuple\n \"\"\"\n total_score = 0\n reasons = []\n\n package_releases = CLIENT.package_releases(package_name)\n if not package_releases:\n if not no_output:\n print(TERMINAL.red('{} is not listed on pypi'.format(package_name)))\n return 0, []\n\n if package_version is None:\n package_version = package_releases[0]\n\n package_info = CLIENT.release_data(package_name, package_version)\n release_urls = CLIENT.release_urls(package_name, package_version)\n\n if not package_info or not release_urls:\n if not no_output:\n print(TERMINAL.red('Version {} is not listed on pypi'.format(\n package_version)))\n return 0, []\n\n if not no_output:\n print(TERMINAL.bold('{} v{}'.format(package_name, package_version)))\n print('-----')\n\n checkers = [\n checks.check_license,\n checks.check_homepage,\n checks.check_summary,\n checks.check_description,\n checks.check_python_classifiers,\n checks.check_author_info,\n checks.check_release_files,\n checks.check_stale\n ]\n\n for checker in checkers:\n result, reason, score = checker(package_info, release_urls)\n if result:\n total_score += score\n else:\n reasons.append(reason)\n\n if total_score < 0:\n total_score = 0\n\n if not no_output:\n percentage = int(float(total_score) / float(checks.TOTAL_POSSIBLE) * 100)\n score_string = 'score: {}/{} {}%'.format(total_score, checks.TOTAL_POSSIBLE, percentage)\n\n print(get_health_color(percentage)(score_string))\n\n if verbose and not no_output:\n for reason in reasons:\n print(reason)\n\n if no_output:\n return total_score, reasons\n"
] |
#! /usr/bin/env python
# coding=utf-8
"""
healthy.py
Checks the health of a Python package, based on it's Pypi information
"""
import argparse
import os
import checks
try:
# Different location in Python 3
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
try:
from blessings import Terminal
except:
class Terminal(object):
"""A dummy Terminal class if we can't import Terminal."""
def passthrough(self, s):
return s
bold = red = green = yellow = orange = passthrough
TERMINAL = Terminal()
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CLIENT = ServerProxy('http://pypi.python.org/pypi')
def calculate_health(package_name, package_version=None, verbose=False, no_output=False):
"""
Calculates the health of a package, based on several factors
:param package_name: name of package on pypi.python.org
:param package_version: version number of package to check, optional - defaults to latest version
:param verbose: flag to print out reasons
:param no_output: print no output
:param lint: run pylint on the package
:returns: (score: integer, reasons: list of reasons for score)
:rtype: tuple
"""
total_score = 0
reasons = []
package_releases = CLIENT.package_releases(package_name)
if not package_releases:
if not no_output:
print(TERMINAL.red('{} is not listed on pypi'.format(package_name)))
return 0, []
if package_version is None:
package_version = package_releases[0]
package_info = CLIENT.release_data(package_name, package_version)
release_urls = CLIENT.release_urls(package_name, package_version)
if not package_info or not release_urls:
if not no_output:
print(TERMINAL.red('Version {} is not listed on pypi'.format(
package_version)))
return 0, []
if not no_output:
print(TERMINAL.bold('{} v{}'.format(package_name, package_version)))
print('-----')
checkers = [
checks.check_license,
checks.check_homepage,
checks.check_summary,
checks.check_description,
checks.check_python_classifiers,
checks.check_author_info,
checks.check_release_files,
checks.check_stale
]
for checker in checkers:
result, reason, score = checker(package_info, release_urls)
if result:
total_score += score
else:
reasons.append(reason)
if total_score < 0:
total_score = 0
if not no_output:
percentage = int(float(total_score) / float(checks.TOTAL_POSSIBLE) * 100)
score_string = 'score: {}/{} {}%'.format(total_score, checks.TOTAL_POSSIBLE, percentage)
print(get_health_color(percentage)(score_string))
if verbose and not no_output:
for reason in reasons:
print(reason)
if no_output:
return total_score, reasons
# def lint_package(download_url):
# """
# Run pylint on the packages files
# :param download_url: download url for the package
# :return: score of the package
# """
# sandbox = create_sandbox()
# package_dir = download_package_to_sandbox(sandbox, download_url)
# pylint_score = score(package_dir)
# destroy_sandbox(sandbox)
#
# return pylint_score
def get_health_color(score):
"""
Returns a color based on the health score
:param score: integer from 0 - 100 representing health
:return: string of color to apply in terminal
"""
color = TERMINAL.green
if score <= 80:
color = TERMINAL.yellow
elif score <= 60:
color = TERMINAL.orange
elif score <= 40:
color = TERMINAL.red
return color
if __name__ == '__main__':
main()
|
dustinmm80/healthy
|
package_utils.py
|
create_sandbox
|
python
|
def create_sandbox(name='healthybox'):
sandbox = tempfile.mkdtemp(prefix=name)
if not os.path.isdir(sandbox):
os.mkdir(sandbox)
return sandbox
|
Create a temporary sandbox directory
:param name: name of the directory to create
:return: The directory created
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/package_utils.py#L14-L24
| null |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
import os
import shutil
import tarfile
import tempfile
import requests
def download_package_to_sandbox(sandbox, package_url):
"""
Downloads an unzips a package to the sandbox
:param sandbox: temporary directory name
:param package_url: link to package download
:returns: name of unzipped package directory
"""
response = requests.get(package_url)
package_tar = os.path.join(sandbox, 'package.tar.gz')
with open(package_tar, 'w') as f:
f.write(response.content)
os.chdir(sandbox)
with tarfile.open('package.tar.gz', 'r:gz') as tf:
tf.extractall()
directory = [d for d in os.listdir(sandbox) if os.path.isdir(d)][0]
return os.path.join(sandbox, directory)
def destroy_sandbox(sandbox):
"""
Destroys a temporary sandbox directory
:param sandbox: name of the directory acting as sandbox
"""
if os.path.isdir(sandbox):
shutil.rmtree(sandbox)
def main():
"""
Main function for this module
"""
sandbox = create_sandbox()
directory = download_package_to_sandbox(
sandbox,
'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'
)
print(directory)
destroy_sandbox(sandbox)
if __name__ == '__main__':
main()
|
dustinmm80/healthy
|
package_utils.py
|
download_package_to_sandbox
|
python
|
def download_package_to_sandbox(sandbox, package_url):
response = requests.get(package_url)
package_tar = os.path.join(sandbox, 'package.tar.gz')
with open(package_tar, 'w') as f:
f.write(response.content)
os.chdir(sandbox)
with tarfile.open('package.tar.gz', 'r:gz') as tf:
tf.extractall()
directory = [d for d in os.listdir(sandbox) if os.path.isdir(d)][0]
return os.path.join(sandbox, directory)
|
Downloads an unzips a package to the sandbox
:param sandbox: temporary directory name
:param package_url: link to package download
:returns: name of unzipped package directory
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/package_utils.py#L26-L48
| null |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
import os
import shutil
import tarfile
import tempfile
import requests
def create_sandbox(name='healthybox'):
"""
Create a temporary sandbox directory
:param name: name of the directory to create
:return: The directory created
"""
sandbox = tempfile.mkdtemp(prefix=name)
if not os.path.isdir(sandbox):
os.mkdir(sandbox)
return sandbox
def destroy_sandbox(sandbox):
"""
Destroys a temporary sandbox directory
:param sandbox: name of the directory acting as sandbox
"""
if os.path.isdir(sandbox):
shutil.rmtree(sandbox)
def main():
"""
Main function for this module
"""
sandbox = create_sandbox()
directory = download_package_to_sandbox(
sandbox,
'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'
)
print(directory)
destroy_sandbox(sandbox)
if __name__ == '__main__':
main()
|
dustinmm80/healthy
|
package_utils.py
|
main
|
python
|
def main():
sandbox = create_sandbox()
directory = download_package_to_sandbox(
sandbox,
'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'
)
print(directory)
destroy_sandbox(sandbox)
|
Main function for this module
|
train
|
https://github.com/dustinmm80/healthy/blob/b59016c3f578ca45b6ce857a2d5c4584b8542288/package_utils.py#L60-L70
|
[
"def create_sandbox(name='healthybox'):\n \"\"\"\n Create a temporary sandbox directory\n :param name: name of the directory to create\n :return: The directory created\n \"\"\"\n sandbox = tempfile.mkdtemp(prefix=name)\n if not os.path.isdir(sandbox):\n os.mkdir(sandbox)\n\n return sandbox\n",
"def download_package_to_sandbox(sandbox, package_url):\n \"\"\"\n Downloads an unzips a package to the sandbox\n :param sandbox: temporary directory name\n :param package_url: link to package download\n :returns: name of unzipped package directory\n \"\"\"\n\n response = requests.get(package_url)\n\n package_tar = os.path.join(sandbox, 'package.tar.gz')\n\n with open(package_tar, 'w') as f:\n f.write(response.content)\n\n os.chdir(sandbox)\n\n with tarfile.open('package.tar.gz', 'r:gz') as tf:\n tf.extractall()\n\n directory = [d for d in os.listdir(sandbox) if os.path.isdir(d)][0]\n\n return os.path.join(sandbox, directory)\n",
"def destroy_sandbox(sandbox):\n \"\"\"\n Destroys a temporary sandbox directory\n :param sandbox: name of the directory acting as sandbox\n \"\"\"\n if os.path.isdir(sandbox):\n shutil.rmtree(sandbox)\n"
] |
#! /usr/bin/env python
# coding=utf-8
"""
Utilities for fetching and unpacking packages from pypi
"""
import os
import shutil
import tarfile
import tempfile
import requests
def create_sandbox(name='healthybox'):
"""
Create a temporary sandbox directory
:param name: name of the directory to create
:return: The directory created
"""
sandbox = tempfile.mkdtemp(prefix=name)
if not os.path.isdir(sandbox):
os.mkdir(sandbox)
return sandbox
def download_package_to_sandbox(sandbox, package_url):
"""
Downloads an unzips a package to the sandbox
:param sandbox: temporary directory name
:param package_url: link to package download
:returns: name of unzipped package directory
"""
response = requests.get(package_url)
package_tar = os.path.join(sandbox, 'package.tar.gz')
with open(package_tar, 'w') as f:
f.write(response.content)
os.chdir(sandbox)
with tarfile.open('package.tar.gz', 'r:gz') as tf:
tf.extractall()
directory = [d for d in os.listdir(sandbox) if os.path.isdir(d)][0]
return os.path.join(sandbox, directory)
def destroy_sandbox(sandbox):
"""
Destroys a temporary sandbox directory
:param sandbox: name of the directory acting as sandbox
"""
if os.path.isdir(sandbox):
shutil.rmtree(sandbox)
if __name__ == '__main__':
main()
|
Sean1708/HipPy
|
hippy/compiler.py
|
Compiler.compile
|
python
|
def compile(self):
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip()
|
Return Hip string if already compiled else compile it.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L15-L20
|
[
"def _compile_value(self, data, indent_level):\n \"\"\"Dispatch to correct compilation method.\"\"\"\n if isinstance(data, dict):\n return self._compile_key_val(data, indent_level)\n elif isinstance(data, list):\n return self._compile_list(data, indent_level)\n else:\n return self._compile_literal(data)\n"
] |
class Compiler:
"""Compiles data structure into a Hip serialized string."""
def __init__(self, data, indent=4):
"""Set the data structure."""
self.data = data
self.buffer = None
self._indent = ' '*indent if indent > 0 else '\t'
def _compile_value(self, data, indent_level):
"""Dispatch to correct compilation method."""
if isinstance(data, dict):
return self._compile_key_val(data, indent_level)
elif isinstance(data, list):
return self._compile_list(data, indent_level)
else:
return self._compile_literal(data)
def _compile_literal(self, data):
"""Write correct representation of literal."""
if data is None:
return 'nil'
elif data is True:
return 'yes'
elif data is False:
return 'no'
else:
return repr(data)
def _compile_list(self, data, indent_level):
"""Correctly write possibly nested list."""
if len(data) == 0:
return '--'
elif not any(isinstance(i, (dict, list)) for i in data):
return ', '.join(self._compile_literal(value) for value in data)
else:
# 'ere be dragons,
# granted there are fewer dragons than the parser,
# but dragons nonetheless
buffer = ''
i = 0
while i < len(data):
if isinstance(data[i], dict):
buffer += '\n'
buffer += self._indent * indent_level
while i < len(data) and isinstance(data[i], dict):
buffer += '-\n'
buffer += self._compile_key_val(data[i], indent_level)
buffer += self._indent * indent_level + '-'
i += 1
buffer += '\n'
elif (
isinstance(data[i], list) and
any(isinstance(item, (dict, list)) for item in data[i])
):
buffer += self._compile_list(data[i], indent_level+1)
elif isinstance(data[i], list):
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_list(data[i], indent_level+1)
else:
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_literal(data[i])
i += 1
return buffer
def _compile_key_val(self, data, indent_level):
"""Compile a dictionary."""
buffer = ''
for (key, val) in data.items():
buffer += self._indent * indent_level
# TODO: assumes key is a string
buffer += key + ':'
if isinstance(val, dict):
buffer += '\n'
buffer += self._compile_key_val(val, indent_level+1)
elif (
isinstance(val, list) and
any(isinstance(i, (dict, list)) for i in val)
):
buffer += self._compile_list(val, indent_level+1)
else:
buffer += ' '
buffer += self._compile_value(val, indent_level)
buffer += '\n'
return buffer
|
Sean1708/HipPy
|
hippy/compiler.py
|
Compiler._compile_value
|
python
|
def _compile_value(self, data, indent_level):
if isinstance(data, dict):
return self._compile_key_val(data, indent_level)
elif isinstance(data, list):
return self._compile_list(data, indent_level)
else:
return self._compile_literal(data)
|
Dispatch to correct compilation method.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L22-L29
|
[
"def _compile_literal(self, data):\n \"\"\"Write correct representation of literal.\"\"\"\n if data is None:\n return 'nil'\n elif data is True:\n return 'yes'\n elif data is False:\n return 'no'\n else:\n return repr(data)\n",
"def _compile_list(self, data, indent_level):\n \"\"\"Correctly write possibly nested list.\"\"\"\n if len(data) == 0:\n return '--'\n elif not any(isinstance(i, (dict, list)) for i in data):\n return ', '.join(self._compile_literal(value) for value in data)\n else:\n # 'ere be dragons,\n # granted there are fewer dragons than the parser,\n # but dragons nonetheless\n buffer = ''\n i = 0\n while i < len(data):\n if isinstance(data[i], dict):\n buffer += '\\n'\n buffer += self._indent * indent_level\n while i < len(data) and isinstance(data[i], dict):\n buffer += '-\\n'\n buffer += self._compile_key_val(data[i], indent_level)\n buffer += self._indent * indent_level + '-'\n i += 1\n buffer += '\\n'\n elif (\n isinstance(data[i], list) and\n any(isinstance(item, (dict, list)) for item in data[i])\n ):\n buffer += self._compile_list(data[i], indent_level+1)\n elif isinstance(data[i], list):\n buffer += '\\n'\n buffer += self._indent * indent_level\n buffer += self._compile_list(data[i], indent_level+1)\n else:\n buffer += '\\n'\n buffer += self._indent * indent_level\n buffer += self._compile_literal(data[i])\n\n i += 1\n\n return buffer\n",
"def _compile_key_val(self, data, indent_level):\n \"\"\"Compile a dictionary.\"\"\"\n buffer = ''\n for (key, val) in data.items():\n buffer += self._indent * indent_level\n # TODO: assumes key is a string\n buffer += key + ':'\n\n if isinstance(val, dict):\n buffer += '\\n'\n buffer += self._compile_key_val(val, indent_level+1)\n elif (\n isinstance(val, list) and\n any(isinstance(i, (dict, list)) for i in val)\n ):\n buffer += self._compile_list(val, indent_level+1)\n else:\n buffer += ' '\n buffer += self._compile_value(val, indent_level)\n buffer += '\\n'\n\n return buffer\n"
] |
class Compiler:
"""Compiles data structure into a Hip serialized string."""
def __init__(self, data, indent=4):
"""Set the data structure."""
self.data = data
self.buffer = None
self._indent = ' '*indent if indent > 0 else '\t'
def compile(self):
"""Return Hip string if already compiled else compile it."""
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip()
def _compile_literal(self, data):
"""Write correct representation of literal."""
if data is None:
return 'nil'
elif data is True:
return 'yes'
elif data is False:
return 'no'
else:
return repr(data)
def _compile_list(self, data, indent_level):
"""Correctly write possibly nested list."""
if len(data) == 0:
return '--'
elif not any(isinstance(i, (dict, list)) for i in data):
return ', '.join(self._compile_literal(value) for value in data)
else:
# 'ere be dragons,
# granted there are fewer dragons than the parser,
# but dragons nonetheless
buffer = ''
i = 0
while i < len(data):
if isinstance(data[i], dict):
buffer += '\n'
buffer += self._indent * indent_level
while i < len(data) and isinstance(data[i], dict):
buffer += '-\n'
buffer += self._compile_key_val(data[i], indent_level)
buffer += self._indent * indent_level + '-'
i += 1
buffer += '\n'
elif (
isinstance(data[i], list) and
any(isinstance(item, (dict, list)) for item in data[i])
):
buffer += self._compile_list(data[i], indent_level+1)
elif isinstance(data[i], list):
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_list(data[i], indent_level+1)
else:
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_literal(data[i])
i += 1
return buffer
def _compile_key_val(self, data, indent_level):
"""Compile a dictionary."""
buffer = ''
for (key, val) in data.items():
buffer += self._indent * indent_level
# TODO: assumes key is a string
buffer += key + ':'
if isinstance(val, dict):
buffer += '\n'
buffer += self._compile_key_val(val, indent_level+1)
elif (
isinstance(val, list) and
any(isinstance(i, (dict, list)) for i in val)
):
buffer += self._compile_list(val, indent_level+1)
else:
buffer += ' '
buffer += self._compile_value(val, indent_level)
buffer += '\n'
return buffer
|
Sean1708/HipPy
|
hippy/compiler.py
|
Compiler._compile_literal
|
python
|
def _compile_literal(self, data):
if data is None:
return 'nil'
elif data is True:
return 'yes'
elif data is False:
return 'no'
else:
return repr(data)
|
Write correct representation of literal.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L31-L40
| null |
class Compiler:
"""Compiles data structure into a Hip serialized string."""
def __init__(self, data, indent=4):
"""Set the data structure."""
self.data = data
self.buffer = None
self._indent = ' '*indent if indent > 0 else '\t'
def compile(self):
"""Return Hip string if already compiled else compile it."""
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip()
def _compile_value(self, data, indent_level):
"""Dispatch to correct compilation method."""
if isinstance(data, dict):
return self._compile_key_val(data, indent_level)
elif isinstance(data, list):
return self._compile_list(data, indent_level)
else:
return self._compile_literal(data)
def _compile_list(self, data, indent_level):
"""Correctly write possibly nested list."""
if len(data) == 0:
return '--'
elif not any(isinstance(i, (dict, list)) for i in data):
return ', '.join(self._compile_literal(value) for value in data)
else:
# 'ere be dragons,
# granted there are fewer dragons than the parser,
# but dragons nonetheless
buffer = ''
i = 0
while i < len(data):
if isinstance(data[i], dict):
buffer += '\n'
buffer += self._indent * indent_level
while i < len(data) and isinstance(data[i], dict):
buffer += '-\n'
buffer += self._compile_key_val(data[i], indent_level)
buffer += self._indent * indent_level + '-'
i += 1
buffer += '\n'
elif (
isinstance(data[i], list) and
any(isinstance(item, (dict, list)) for item in data[i])
):
buffer += self._compile_list(data[i], indent_level+1)
elif isinstance(data[i], list):
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_list(data[i], indent_level+1)
else:
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_literal(data[i])
i += 1
return buffer
def _compile_key_val(self, data, indent_level):
"""Compile a dictionary."""
buffer = ''
for (key, val) in data.items():
buffer += self._indent * indent_level
# TODO: assumes key is a string
buffer += key + ':'
if isinstance(val, dict):
buffer += '\n'
buffer += self._compile_key_val(val, indent_level+1)
elif (
isinstance(val, list) and
any(isinstance(i, (dict, list)) for i in val)
):
buffer += self._compile_list(val, indent_level+1)
else:
buffer += ' '
buffer += self._compile_value(val, indent_level)
buffer += '\n'
return buffer
|
Sean1708/HipPy
|
hippy/compiler.py
|
Compiler._compile_list
|
python
|
def _compile_list(self, data, indent_level):
if len(data) == 0:
return '--'
elif not any(isinstance(i, (dict, list)) for i in data):
return ', '.join(self._compile_literal(value) for value in data)
else:
# 'ere be dragons,
# granted there are fewer dragons than the parser,
# but dragons nonetheless
buffer = ''
i = 0
while i < len(data):
if isinstance(data[i], dict):
buffer += '\n'
buffer += self._indent * indent_level
while i < len(data) and isinstance(data[i], dict):
buffer += '-\n'
buffer += self._compile_key_val(data[i], indent_level)
buffer += self._indent * indent_level + '-'
i += 1
buffer += '\n'
elif (
isinstance(data[i], list) and
any(isinstance(item, (dict, list)) for item in data[i])
):
buffer += self._compile_list(data[i], indent_level+1)
elif isinstance(data[i], list):
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_list(data[i], indent_level+1)
else:
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_literal(data[i])
i += 1
return buffer
|
Correctly write possibly nested list.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L42-L80
| null |
class Compiler:
"""Compiles data structure into a Hip serialized string."""
def __init__(self, data, indent=4):
"""Set the data structure."""
self.data = data
self.buffer = None
self._indent = ' '*indent if indent > 0 else '\t'
def compile(self):
"""Return Hip string if already compiled else compile it."""
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip()
def _compile_value(self, data, indent_level):
"""Dispatch to correct compilation method."""
if isinstance(data, dict):
return self._compile_key_val(data, indent_level)
elif isinstance(data, list):
return self._compile_list(data, indent_level)
else:
return self._compile_literal(data)
def _compile_literal(self, data):
"""Write correct representation of literal."""
if data is None:
return 'nil'
elif data is True:
return 'yes'
elif data is False:
return 'no'
else:
return repr(data)
def _compile_key_val(self, data, indent_level):
"""Compile a dictionary."""
buffer = ''
for (key, val) in data.items():
buffer += self._indent * indent_level
# TODO: assumes key is a string
buffer += key + ':'
if isinstance(val, dict):
buffer += '\n'
buffer += self._compile_key_val(val, indent_level+1)
elif (
isinstance(val, list) and
any(isinstance(i, (dict, list)) for i in val)
):
buffer += self._compile_list(val, indent_level+1)
else:
buffer += ' '
buffer += self._compile_value(val, indent_level)
buffer += '\n'
return buffer
|
Sean1708/HipPy
|
hippy/compiler.py
|
Compiler._compile_key_val
|
python
|
def _compile_key_val(self, data, indent_level):
buffer = ''
for (key, val) in data.items():
buffer += self._indent * indent_level
# TODO: assumes key is a string
buffer += key + ':'
if isinstance(val, dict):
buffer += '\n'
buffer += self._compile_key_val(val, indent_level+1)
elif (
isinstance(val, list) and
any(isinstance(i, (dict, list)) for i in val)
):
buffer += self._compile_list(val, indent_level+1)
else:
buffer += ' '
buffer += self._compile_value(val, indent_level)
buffer += '\n'
return buffer
|
Compile a dictionary.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L82-L103
| null |
class Compiler:
"""Compiles data structure into a Hip serialized string."""
def __init__(self, data, indent=4):
"""Set the data structure."""
self.data = data
self.buffer = None
self._indent = ' '*indent if indent > 0 else '\t'
def compile(self):
"""Return Hip string if already compiled else compile it."""
if self.buffer is None:
self.buffer = self._compile_value(self.data, 0)
return self.buffer.strip()
def _compile_value(self, data, indent_level):
"""Dispatch to correct compilation method."""
if isinstance(data, dict):
return self._compile_key_val(data, indent_level)
elif isinstance(data, list):
return self._compile_list(data, indent_level)
else:
return self._compile_literal(data)
def _compile_literal(self, data):
"""Write correct representation of literal."""
if data is None:
return 'nil'
elif data is True:
return 'yes'
elif data is False:
return 'no'
else:
return repr(data)
def _compile_list(self, data, indent_level):
"""Correctly write possibly nested list."""
if len(data) == 0:
return '--'
elif not any(isinstance(i, (dict, list)) for i in data):
return ', '.join(self._compile_literal(value) for value in data)
else:
# 'ere be dragons,
# granted there are fewer dragons than the parser,
# but dragons nonetheless
buffer = ''
i = 0
while i < len(data):
if isinstance(data[i], dict):
buffer += '\n'
buffer += self._indent * indent_level
while i < len(data) and isinstance(data[i], dict):
buffer += '-\n'
buffer += self._compile_key_val(data[i], indent_level)
buffer += self._indent * indent_level + '-'
i += 1
buffer += '\n'
elif (
isinstance(data[i], list) and
any(isinstance(item, (dict, list)) for item in data[i])
):
buffer += self._compile_list(data[i], indent_level+1)
elif isinstance(data[i], list):
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_list(data[i], indent_level+1)
else:
buffer += '\n'
buffer += self._indent * indent_level
buffer += self._compile_literal(data[i])
i += 1
return buffer
|
Sean1708/HipPy
|
hippy/__init__.py
|
write
|
python
|
def write(file_name, data):
with open(file_name, 'w') as f:
f.write(encode(data))
|
Encode and write a Hip file.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/__init__.py#L21-L24
|
[
"def encode(data):\n \"\"\"Encode data structure into a Hip serialized string.\"\"\"\n return compiler.Compiler(data).compile()\n"
] |
"""Python parser for reading Hip data files."""
from . import lexer, parser, compiler
def encode(data):
"""Encode data structure into a Hip serialized string."""
return compiler.Compiler(data).compile()
def decode(string):
"""Decode a Hip serialized string into a data structure."""
return parser.Parser(lexer.Lexer(string)).data
def read(file_name):
"""Read and decode a Hip file."""
with open(file_name, 'r') as f:
return decode(f.read())
|
Sean1708/HipPy
|
hippy/lexer.py
|
tokenize_number
|
python
|
def tokenize_number(val, line):
try:
num = int(val)
typ = TokenType.int
except ValueError:
num = float(val)
typ = TokenType.float
return {'type': typ, 'value': num, 'line': line}
|
Parse val correctly into int or float.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/lexer.py#L43-L52
| null |
"""Contains the Lexer and Token classes responsible for tokenizing the file.
Also does stuff.
"""
import re
import ast
import enum
from .error import Error
class LexError(Error):
"""Raised when the lexer encounters an error."""
def __init__(self, line, char):
"""Set the line and character which caused the exception."""
self.line = line
self.char = char
def __str__(self):
"""Give a nice error message specifying the root cause."""
return "Unknown character {} on line {}".format(self.char, self.line+1)
class TokenType(enum.Enum):
"""Stores possible token types."""
str = 1
int = 2
float = 3
bool = 4
null = 5
comment = 6
lbreak = 7
ws = 8
hyphen = 9
colon = 10
comma = 11
id = 12
class Lexer:
"""Contains state of tokenizing the file.
And shit.
"""
_token_map = [
# TODO: these can probably be unified
# TODO: this doesn't handle arbitrarily complex strings
# these would probably need to be handled in the parser
(
re.compile(r'"(?:[^\\"]|\\.)*"'),
lambda val, line: {
'type': TokenType.str,
'value': ast.literal_eval(val),
'line': line
},
),
(
re.compile(r"'(?:[^\\']|\\.)*'"),
lambda val, line: {
'type': TokenType.str,
'value': ast.literal_eval(val),
'line': line
},
),
(
re.compile(
r"""
[-+]?
(?: # matches the significand
(?:[0-9]+\.[0-9]*)|(?:[0-9]*\.[0-9]+)|(?:[0-9]+)
)(?: # matches the exponential
[eE][-+]?[0-9]+
)?
""",
re.VERBOSE,
),
tokenize_number
),
(
re.compile(r'yes'),
lambda val, line: {
'type': TokenType.bool, 'value': True, 'line': line
},
),
(
re.compile(r'no'),
lambda val, line: {
'type': TokenType.bool, 'value': False, 'line': line
},
),
(
re.compile(r'nil'),
lambda val, line: {
'type': TokenType.null, 'value': None, 'line': line
},
),
(
re.compile(r'#.*'),
lambda val, line: {
'type': TokenType.comment,
'value': val[1:].strip(),
'line': line
},
),
(
re.compile(r'(?:\r\n|\r|\n)'),
lambda val, line: {
'type': TokenType.lbreak, 'value': val, 'line': line
},
),
(
re.compile(r'\s'),
lambda val, line: {
'type': TokenType.ws, 'value': val, 'line': line
},
),
(
re.compile(r'-'),
lambda val, line: {
'type': TokenType.hyphen, 'value': val, 'line': line
},
),
(
re.compile(r':'),
lambda val, line: {
'type': TokenType.colon, 'value': val, 'line': line
},
),
(
re.compile(r','),
lambda val, line: {
'type': TokenType.comma, 'value': val, 'line': line
},
),
(
re.compile(r'\w+'),
lambda val, line: {
'type': TokenType.id, 'value': val, 'line': line
},
),
]
def __init__(self, content):
"""Initialize lexer state."""
self._content = content.replace('\t', ' ').strip()
self._length = len(self._content)
self._pos = 0
self._line = 0
def __iter__(self):
"""Return the object, since it is an iterator."""
return self
def __next__(self):
"""Retrieve the token at position pos."""
if self._pos >= self._length:
raise StopIteration
remaining = self._content[self._pos:]
for (rgx, func) in self._token_map:
match = rgx.match(remaining)
if match is not None:
token = func(match.group(0), self._line)
if token['type'] is TokenType.lbreak:
self._line += 1
self._pos += match.end(0)
return token
raise LexError(self._line, self._content[self._pos])
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser.data
|
python
|
def data(self):
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
|
Return parsed data structure.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L47-L54
|
[
"def __init__(self, tokens):\n \"\"\"Initialize tokens excluding comments.\"\"\"\n self.tokens = [t for t in tokens if t['type'] is not TT.comment]\n self.num_tokens = len(self.tokens)\n self._cur_position = 0\n self._finished = False\n self._data = None\n self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)\n",
"def _parse(self):\n \"\"\"Parse the token stream into a nice dictionary data structure.\"\"\"\n while self._cur_token['type'] in (TT.ws, TT.lbreak):\n self._skip_whitespace()\n self._skip_newlines()\n\n self._data = self._parse_value()\n\n return self._data\n"
] |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._increment
|
python
|
def _increment(self, n=1):
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
|
Move forward n tokens in the stream.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L71-L77
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._skip_whitespace
|
python
|
def _skip_whitespace(self):
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
|
Increment over whitespace, counting characters.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L79-L86
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._skip_newlines
|
python
|
def _skip_newlines(self):
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
|
Increment over newlines.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L88-L91
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse
|
python
|
def _parse(self):
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
|
Parse the token stream into a nice dictionary data structure.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L93-L101
|
[
"def _skip_whitespace(self):\n \"\"\"Increment over whitespace, counting characters.\"\"\"\n i = 0\n while self._cur_token['type'] is TT.ws and not self._finished:\n self._increment()\n i += 1\n\n return i\n",
"def _skip_newlines(self):\n \"\"\"Increment over newlines.\"\"\"\n while self._cur_token['type'] is TT.lbreak and not self._finished:\n self._increment()\n",
"def _parse_value(self):\n \"\"\"Parse the value of a key-value pair.\"\"\"\n indent = 0\n while self._cur_token['type'] is TT.ws:\n indent = self._skip_whitespace()\n self._skip_newlines()\n\n if self._cur_token['type'] is TT.id:\n return self._parse_key(indent)\n elif self._cur_token['type'] is TT.hyphen:\n self._increment()\n if self._cur_token['type'] is TT.hyphen:\n self._increment()\n return []\n else:\n return self._parse_object_list()\n else:\n # TODO: single comma gives empty list\n return self._parse_literal_list(indent)\n"
] |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_value
|
python
|
def _parse_value(self):
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
|
Parse the value of a key-value pair.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L103-L121
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_key
|
python
|
def _parse_key(self, indent):
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
|
Parse a series of key-value pairs.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L123-L172
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_object_list
|
python
|
def _parse_object_list(self):
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
|
Parse a list of data structures.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L174-L194
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_literal_list
|
python
|
def _parse_literal_list(self, indent):
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
|
Parse a list of literals.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L196-L237
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_comma_list
|
python
|
def _parse_comma_list(self):
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
|
Parse a comma seperated list.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L239-L263
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_newline_list(self, indent):
"""Parse a newline seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Sean1708/HipPy
|
hippy/parser.py
|
Parser._parse_newline_list
|
python
|
def _parse_newline_list(self, indent):
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_newline_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
new_indent = indent
while not self._finished:
if new_indent < indent:
break
elif new_indent == indent:
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
# look ahead to see if it's a comma seperated list
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
(
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
if self.tokens[temp_position]['type'] is TT.comma:
array.append(self._parse_comma_list())
else:
if self._cur_token['type'] is not TT.hyphen:
array.append(self._cur_token['value'])
elif self._nth_token()['type'] is TT.hyphen:
# two consecutive '-'s
array.append([])
self._increment()
self._increment()
else: # new_indent > indent
while self._cur_token['type'] is TT.lbreak:
self._skip_newlines()
self._skip_whitespace()
array.append(self._parse_newline_list(new_indent))
self._skip_whitespace()
if (
not self._finished and
self._cur_token['type'] not in (TT.lbreak, TT.hyphen)
):
raise ParseError('newline', self._cur_token)
temp_position = self._cur_position
new_indent = 0
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
if self.tokens[temp_position]['type'] is TT.lbreak:
new_indent = 0
else:
new_indent += 1
temp_position += 1
return array
|
Parse a newline seperated list.
|
train
|
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/parser.py#L265-L330
| null |
class Parser:
"""Parses an iterable of tokens into a data structure."""
# .==. .==.
# //`^\\ //^`\\
# // ^ ^\(\__/)/^ ^^\\
# //^ ^^ ^/6 6\ ^^ ^ \\
# //^ ^^ ^/( .. )\^ ^ ^ \\
# // ^^ ^/\| v""v |/\^ ^ ^\\
# // ^^/\/ / `~~` \ \/\^ ^\\
# -----------------------------
# HERE BE DRAGONS
def __init__(self, tokens):
"""Initialize tokens excluding comments."""
self.tokens = [t for t in tokens if t['type'] is not TT.comment]
self.num_tokens = len(self.tokens)
self._cur_position = 0
self._finished = False
self._data = None
self._literals = (TT.str, TT.int, TT.float, TT.bool, TT.null)
@property
def data(self):
"""Return parsed data structure."""
if self._data is None:
# reset after possible parsing failure
self.__init__(self.tokens)
return self._parse()
else:
return self._data
@property
def _cur_token(self):
"""Return the current token."""
if self._finished:
return {'value': None, 'type': None, 'line': -1}
else:
return self.tokens[self._cur_position]
def _nth_token(self, n=1):
"""Return token n tokens ahead of the current token."""
try:
return self.tokens[self._cur_position + n]
except IndexError:
return {'value': None, 'type': None, 'line': -1}
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i
def _skip_newlines(self):
"""Increment over newlines."""
while self._cur_token['type'] is TT.lbreak and not self._finished:
self._increment()
def _parse(self):
"""Parse the token stream into a nice dictionary data structure."""
while self._cur_token['type'] in (TT.ws, TT.lbreak):
self._skip_whitespace()
self._skip_newlines()
self._data = self._parse_value()
return self._data
def _parse_value(self):
"""Parse the value of a key-value pair."""
indent = 0
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
if self._cur_token['type'] is TT.id:
return self._parse_key(indent)
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is TT.hyphen:
self._increment()
return []
else:
return self._parse_object_list()
else:
# TODO: single comma gives empty list
return self._parse_literal_list(indent)
def _parse_key(self, indent):
"""Parse a series of key-value pairs."""
data = {}
new_indent = indent
while not self._finished and new_indent == indent:
self._skip_whitespace()
cur_token = self._cur_token
if cur_token['type'] is TT.id:
key = cur_token['value']
next_token = self._nth_token()
if next_token['type'] is TT.colon:
self._increment(2) # move past the ':'
# whitespace before a newline is not important
# whitespace after a newline is important
self._skip_whitespace()
self._skip_newlines()
data[key] = self._parse_value()
else:
raise ParseError("':'", next_token)
else:
if cur_token['type'] is TT.hyphen:
return data
else:
raise ParseError("identifier or '-'", cur_token)
if self.tokens[self._cur_position - 1]['type'] is not TT.lbreak:
# skip whitespace at the end of the line
self._skip_whitespace()
self._skip_newlines()
# find next indentation level without incrementing
new_indent = 0
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] is TT.ws
):
temp_position += 1
new_indent += 1
if indent == 0 or new_indent < indent:
return data
else:
raise Exception(
"Parser screwed up, increase of indent on line {} should "
"have been caught by _parse_value().".format(
cur_token['line']
)
)
def _parse_object_list(self):
"""Parse a list of data structures."""
array = []
indent = 0
while not self._finished:
self._skip_newlines()
if self._cur_token['type'] is TT.ws:
while self._cur_token['type'] is TT.ws:
indent = self._skip_whitespace()
self._skip_newlines()
elif self._cur_token['type'] is TT.id:
array.append(self._parse_key(indent))
elif self._cur_token['type'] is TT.hyphen:
self._increment()
if self._cur_token['type'] is not TT.hyphen or self._finished:
return array
else:
self._increment()
else:
raise ParseError('something different', self._cur_token)
def _parse_literal_list(self, indent):
"""Parse a list of literals."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_literal_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
# find next token after whitespace without incrementing
temp_position = self._cur_position
while (
temp_position < self.num_tokens-1 and (
self.tokens[temp_position]['type'] is TT.ws or
self.tokens[temp_position]['type'] in self._literals
)
):
temp_position += 1
next_token = self.tokens[temp_position]
# end of stream
if next_token['type'] is TT.ws:
return self._cur_token['value']
elif next_token['type'] is TT.comma:
return self._parse_comma_list()
elif next_token['type'] is TT.lbreak:
while (
temp_position < self.num_tokens-1 and
self.tokens[temp_position]['type'] in (TT.lbreak, TT.ws)
):
temp_position += 1
if self.tokens[temp_position]['type'] in self._literals:
return self._parse_newline_list(indent)
else:
rval = self._cur_token['value']
self._increment()
return rval
else:
rval = self._cur_token['value']
self._increment()
return rval
def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array
|
wdbm/shijian
|
shijian.py
|
tail
|
python
|
def tail(
filepath = "log.txt",
lines = 50
):
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
|
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L657-L676
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
convert_type_list_elements
|
python
|
def convert_type_list_elements(
list_object = None,
element_type = str
):
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
|
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L878-L890
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
select_spread
|
python
|
def select_spread(
list_of_elements = None,
number_of_elements = None
):
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
|
This function returns the specified number of elements of a list spread
approximately evenly.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L997-L1015
|
[
"def select_spread(\n list_of_elements = None,\n number_of_elements = None\n ):\n \"\"\"\n This function returns the specified number of elements of a list spread\n approximately evenly.\n \"\"\"\n if len(list_of_elements) <= number_of_elements:\n return list_of_elements\n if number_of_elements == 0:\n return []\n if number_of_elements == 1:\n return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]\n return \\\n [list_of_elements[int(round((len(list_of_elements) - 1) /\\\n (2 * number_of_elements)))]] +\\\n select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\\\n (number_of_elements))):], number_of_elements - 1)\n"
] |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
split_list
|
python
|
def split_list(
list_object = None,
granularity = None
):
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
|
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1017-L1040
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
ranges_edge_pairs
|
python
|
def ranges_edge_pairs(
extent = None,
range_length = None
):
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
|
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1042-L1065
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
ustr
|
python
|
def ustr(text):
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
|
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1231-L1242
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
replace_contractions_with_full_words_and_replace_numbers_with_digits
|
python
|
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
|
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1362-L1499
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
add_time_variables
|
python
|
def add_time_variables(df, reindex = True):
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
|
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1578-L1611
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
daily_plots
|
python
|
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
|
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1613-L1644
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
weekly_plots
|
python
|
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
|
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1646-L1680
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
yearly_plots
|
python
|
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
|
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1682-L1721
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
add_rolling_statistics_variables
|
python
|
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
|
Add rolling statistics variables derived from a specified variable in a
DataFrame.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1723-L1738
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
rescale_variables
|
python
|
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
|
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1740-L1756
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
histogram_hour_counts
|
python
|
def histogram_hour_counts(
df,
variable
):
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1758-L1771
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
histogram_day_counts
|
python
|
def histogram_day_counts(
df,
variable
):
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1773-L1786
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
histogram_month_counts
|
python
|
def histogram_month_counts(
df,
variable
):
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
|
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1788-L1801
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
setup_Jupyter
|
python
|
def setup_Jupyter():
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10)
|
Set up a Jupyter notebook with a few defaults.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L1803-L1811
| null |
# -*- coding: utf-8 -*-
"""
################################################################################
# #
# shijian #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program provides change, time, file, list, statistics, language and #
# other utilities. #
# #
# copyright (C) 2014 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
from __future__ import division
import calendar
import collections
import datetime
import functools
import inspect
import logging
import math
import os
import pickle
import random
import re
import sys
import tempfile
import time
import unicodedata
import uuid
import warnings
if sys.version_info[0] < 3:
import subprocess32 as subprocess
else:
import subprocess
import dateutil.relativedelta
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import scipy.interpolate
import scipy.io.wavfile
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import technicolor
name = "shijian"
version = "2018-06-02T1644Z"
log = logging.getLogger(name)
log.addHandler(technicolor.ColorisingStreamHandler())
log.setLevel(logging.INFO)
def _main():
global clocks
clocks = Clocks()
def time_UNIX(
style = "UNIX time S"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def time_UTC(
style = None
):
return style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
def filename_time_UNIX(
style = "UNIX time S.SSSSSS",
extension = None
):
filename = str(
time_UNIX(
style = style
)
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def filename_time_UTC(
style = "YYYY-MM-DDTHHMMSSZ",
extension = None
):
filename = style_datetime_object(
datetime_object = datetime.datetime.utcnow(),
style = style
)
if extension:
filename = filename + extension
filename_proposed = propose_filename(
filename = filename
)
return filename_proposed
def style_minimal_seconds(seconds):
time_intervals = ["days", "hours", "minutes", "seconds"]
dateutil_object = dateutil.relativedelta.relativedelta(seconds = seconds)
return " ".join("{} {}".format(
int(getattr(dateutil_object, interval)), interval
) for interval in time_intervals if getattr(dateutil_object, interval))
def style_UNIX_timestamp(
timestamp = None,
style = "YYYY-MM-DDTHHMMZ"
):
return style_datetime_object(
datetime_object = datetime.datetime.utcfromtimestamp(timestamp),
style = style
)
def style_datetime_object(
datetime_object = None,
style = "YYYY-MM-DDTHHMMZ"
):
if type(datetime_object) is datetime.datetime:
# filename safe
if style == "YYYY-MM-DDTHHMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
# filename safe with seconds
elif style == "YYYY-MM-DDTHHMMSSZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%SZ")
# filename safe with seconds and microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return datetime_object.strftime("%Y-%m-%dT%H%M%S%fZ")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S UTC")
# elegant
elif style == "YYYY-MM-DD HH:MM:SS Z":
return datetime_object.strftime("%Y-%m-%d %H:%M:%S Z")
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return (datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds()
# UNIX time in seconds rounded
elif style == "UNIX time S":
return int((datetime_object -\
datetime.datetime.utcfromtimestamp(0)).total_seconds())
# human-readable date
elif style == "day DD month YYYY":
return datetime_object.strftime("%A %d %B %Y")
# human-readable time and date
elif style == "HH:MM day DD month YYYY":
return datetime_object.strftime("%H:%M %A %d %B %Y")
# human-readable time with seconds and date
elif style == "HH:MM:SS day DD month YYYY":
return datetime_object.strftime("%H:%M:%S %A %d %B %Y")
# human-readable date with time with seconds
elif style == "day DD month YYYY HH:MM:SS":
return datetime_object.strftime("%A %d %B %Y %H:%M:%S")
# human-readable-audible time with seconds and date
elif style == "HH hours MM minutes SS sounds day DD month YYYY":
return datetime_object.strftime("%H hours %M minutes %S seconds %A %d %B %Y")
# human-readable days, hours and minutes
elif style == "DD:HH:MM":
return datetime_object.strftime("%d:%H:%M")
# human-readable days, hours, minutes and seconds
elif style == "DD:HH:MM:SS":
return datetime_object.strftime("%d:%H:%M:%S")
# human-readable time with seconds
elif style == "HH:MM:SS":
return datetime_object.strftime("%H:%M:%S")
# human-readable-audible time with seconds
elif style == "HH hours MM minutes SS seconds":
return datetime_object.strftime("%H hours %M minutes %S seconds")
# filename safe
else:
return datetime_object.strftime("%Y-%m-%dT%H%MZ")
if type(datetime_object) is datetime.timedelta:
if style == "YYYY-MM-DDTHHMMZ":
style = "{DD} days, {HH}:{MM}:{SS}"
if hasattr(datetime_object, "seconds"):
seconds = datetime_object.seconds + datetime_object.days * 24 * 3600
else:
seconds = int(datetime_object)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return style.format(**{
"Y" : years_total,
"D" : days_total,
"H" : hours_total,
"M" : minutes_total,
"S" : seconds_total,
"YYYY": str(years).zfill(4),
"DD" : str(days).zfill(2),
"HH" : str(hours).zfill(2),
"MM" : str(minutes).zfill(2),
"SS" : str(seconds).zfill(2)
})
def HHMM_to_minutes(
HHMM # string "HHMM"
):
hours, minutes = HHMM[:2], HHMM[2:]
return 60 * int(hours) + int(minutes)
def now_in_minutes():
now = datetime.datetime.utcnow()
return 60 * now.hour + now.minute
def in_daily_time_range(
time_range = None, # string "HHMM--HHMM" e.g. "1700--1000"
time_start = None, # string "HHMM" e.g. "1700"
time_stop = None # string "HHMM" e.g. "1000"
):
if time_range is None and time_start is None and time_stop is None:
return None
if time_range is not None:
time_start = time_range.split("--")[0]
time_stop = time_range.split("--")[1]
now = now_in_minutes()
time_start = HHMM_to_minutes(time_start)
time_stop = HHMM_to_minutes(time_stop)
minutes_per_day = 1440
return (now - time_start) % minutes_per_day <=\
(time_stop - time_start) % minutes_per_day
def timer(function):
@functools.wraps(function)
def decoration(
*args,
**kwargs
):
arguments = inspect.getcallargs(function, *args, **kwargs)
clock = Clock(name = function.__name__)
result = function(*args, **kwargs)
clock.stop()
return result
return decoration
class Clock(object):
def __init__(
self,
name = None,
start = True
):
self._name = name
self._start = start # Boolean start clock on instantiation
self._start_time = None # internal (value to return)
self._start_time_tmp = None # internal (value for calculations)
self._stop_time = None # internal (value to return)
self._update_time = None # internal
# If no name is specified, generate a unique one.
if self._name is None:
self._name = UID()
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
self.reset()
if self._start:
self.start()
def start(self):
self._start_time_tmp = datetime.datetime.utcnow()
self._start_time = datetime.datetime.utcnow()
def stop(self):
self.update()
self._update_time = None
self._start_time_tmp = None
self._stop_time = datetime.datetime.utcnow()
# Update the clock accumulator.
def update(self):
if self._update_time:
self.accumulator += (
datetime.datetime.utcnow() - self._update_time
)
else:
self.accumulator += (
datetime.datetime.utcnow() - self._start_time_tmp
)
self._update_time = datetime.datetime.utcnow()
def reset(self):
self.accumulator = datetime.timedelta(0)
self._start_time_tmp = None
# If the clock has a start time, add the difference between now and the
# start time to the accumulator and return the accumulation. If the clock
# does not have a start time, return the accumulation.
def elapsed(self):
if self._start_time_tmp:
self.update()
return self.accumulator
def name(self):
return self._name
def time(self):
return self.elapsed().total_seconds()
def start_time(self):
if self._start_time:
return style_datetime_object(datetime_object = self._start_time)
else:
return "none"
def stop_time(self):
if self._stop_time:
return style_datetime_object(datetime_object = self._stop_time)
else:
return "none"
def report(self):
string = "clock attribute".ljust(39) + "value"
string += "\nname".ljust(40) + self.name()
string += "\ntime start (s)".ljust(40) + self.start_time()
string += "\ntime stop (s)".ljust(40) + self.stop_time()
string += "\ntime elapsed (s)".ljust(40) + str(self.time())
string += "\n"
return string
def printout(self):
print(self.report())
class Clocks(object):
def __init__(
self
):
self._list_of_clocks = []
self._default_report_style = "statistics"
def add(
self,
clock
):
self._list_of_clocks.append(clock)
def report(
self,
style = None
):
if style is None:
style = self._default_report_style
if self._list_of_clocks != []:
if style == "statistics":
# Create a dictionary of clock types with corresponding lists of
# times for all instances.
dictionary_of_clock_types = {}
# Get the names of all clocks and add them to the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()] = []
# Record the values of all clocks for their respective names in
# the dictionary.
for clock in self._list_of_clocks:
dictionary_of_clock_types[clock.name()].append(clock.time())
# Create a report, calculating the average value for each clock
# type.
string = "clock type".ljust(39) + "mean time (s)"
for name, values in list(dictionary_of_clock_types.items()):
string += "\n" +\
str(name).ljust(39) + str(sum(values)/len(values))
string += "\n"
elif style == "full":
# Create a report, listing the values of all clocks.
string = "clock".ljust(39) + "time (s)"
for clock in self._list_of_clocks:
string += "\n" +\
str(clock.name()).ljust(39) + str(clock.time())
string += "\n"
else:
string = "no clocks"
return string
def printout(
self,
style = None
):
if style is None:
style = self._default_report_style
print(self.report(style = style))
class Progress(object):
def __init__(
self
):
self.data = []
self.quick_calculation = False
self.update_rate = 1 # s
self.clock = Clock(name = "progress update clock")
def engage_quick_calculation_mode(
self
):
self.quick_calculation = True
def disengage_quick_calculation_mode(
self
):
self.quick_calculation = False
def add_datum(
self,
fraction = None,
style = None
):
if len(self.data) == 0:
self.data.append((fraction, time_UNIX()))
elif self.quick_calculation is True:
time_duration_since_last_update = self.clock.time()
if time_duration_since_last_update >= self.update_rate:
self.data.append((fraction, time_UNIX()))
self.clock.reset()
self.clock.start()
else:
self.data.append((fraction, time_UNIX()))
return self.status(style = style)
def estimated_time_of_completion(
self
):
if len(self.data) <= 1:
return 0
else:
try:
model_values = model_linear(
self.data,
quick_calculation = self.quick_calculation
)
b0 = model_values[0]
b1 = model_values[1]
x = 1
y = b0 + b1 * x
except:
y = 0
datetime_object = datetime.datetime.fromtimestamp(int(y))
return datetime_object
# estimated time of arrival
def ETA(
self
):
if len(self.data) <= 1:
return style_datetime_object(
datetime_object = datetime.datetime.now()
)
else:
return style_datetime_object(
datetime_object = self.estimated_time_of_completion()
)
# estimated time remaining
def ETR(
self
):
if len(self.data) <= 1:
return 0
else:
delta_time = \
self.estimated_time_of_completion() - datetime.datetime.now()
if delta_time.total_seconds() >= 0:
return delta_time.total_seconds()
else:
return 0
def fraction(
self
):
return self.data[-1][0]
def percentage(
self
):
return 100 * self.fraction()
def status(
self,
style = None
):
if style is None:
message =\
"{percentage:.2f}% complete; " +\
"estimated completion time: {ETA} ({ETR:.2f} s)\r"
return message.format(
percentage = self.percentage(),
ETA = self.ETA(),
ETR = self.ETR()
)
def UID():
return str(uuid.uuid4())
def unique_number(
style = None
):
# mode: integer 3 significant figures
if style == "integer 3 significant figures":
initial_number = 100
if "unique_numbers_3_significant_figures" not in globals():
global unique_numbers_3_significant_figures
unique_numbers_3_significant_figures = []
if not unique_numbers_3_significant_figures:
unique_numbers_3_significant_figures.append(initial_number)
else:
unique_numbers_3_significant_figures.append(
unique_numbers_3_significant_figures[-1] + 1
)
if\
style == "integer 3 significant figures" and \
unique_numbers_3_significant_figures[-1] > 999:
raise Exception
return unique_numbers_3_significant_figures[-1]
# mode: integer
else:
initial_number = 1
if "unique_numbers" not in globals():
global unique_numbers
unique_numbers = []
if not unique_numbers:
unique_numbers.append(initial_number)
else:
unique_numbers.append(unique_numbers[-1] + 1)
return unique_numbers[-1]
def unique_3_digit_number():
return unique_number(style = "integer 3 significant figures")
## @brief make text filename or URL safe
def slugify(
text = None,
filename = True,
URL = False,
return_str = True
):
if not sys.version_info >= (3, 0):
text = unicode(text, "utf-8")
text = unicodedata.normalize("NFKD", text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
text = re.sub("[^\w\s-]", "", text)
text = text.strip()
if filename and not URL:
text = re.sub("[\s]+", "_", text)
elif URL:
text = text.lower()
text = re.sub("[-\s]+", "-", text)
if return_str:
text = str(text)
return text
## @brief propose a filename
# @detail This function returns a filename string. If a default filename is not
# specified, the function generates one based on the current time. If a default
# filename is specified, the function uses it as the default filename. By
# default, the function then checks to see if using the filename would cause
# overwriting of an existing file. If overwriting is possible, the function
# appends an integer to the filename in a loop in order to generate a filename
# that would not cause overwriting of an existing file. The function can be set
# to overwrite instead of using the default overwrite protection behaviour.
# @return filename string
def propose_filename(
filename = None,
overwrite = False,
slugify_filename = True,
exclude_extension_from_slugify = True
):
# If no file name is specified, generate one.
if not filename:
filename = time_UTC()
filename_proposed = filename
if slugify_filename:
if exclude_extension_from_slugify:
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
filename_base = slugify(text = filename_base)
filename_proposed = filename_base + filename_extension
else:
filename_proposed = slugify(text = filename)
if not overwrite:
count = 0
while os.path.exists(filename_proposed):
count = count + 1
filename_directory = os.path.dirname(filename)
filename_base = os.path.splitext(os.path.basename(filename))[0]
filename_extension = os.path.splitext(os.path.basename(filename))[1]
if filename_directory:
filename_proposed = filename_directory + \
"/" + \
filename_base + \
"_" + \
str(count) + \
filename_extension
else:
filename_proposed = filename_base + \
"_" + \
str(count) + \
filename_extension
return filename_proposed
def tmp_filepath():
"""
Return an extensionless filepath at the directory /tmp without creating a
file at the filepath.
"""
return "/tmp/" + next(tempfile._get_candidate_names())
def tail(
filepath = "log.txt",
lines = 50
):
"""
Return a specified number of last lines of a specified file. If there is an
error or the file does not exist, return False.
"""
try:
filepath = os.path.expanduser(os.path.expandvars(filepath))
if os.path.isfile(filepath):
text = subprocess.check_output(["tail", "-" + str(lines), filepath])
if text:
return text
else:
return False
else:
return False
except:
return False
def ensure_platform_release(
keyphrase = "el7",
require = True,
warn = False
):
import platform
release = platform.release()
if keyphrase not in release:
message =\
"inappropriate environment: " +\
"\"{keyphrase}\" required; \"{release}\" available".format(
keyphrase = keyphrase,
release = release
)
if warn is True:
log.warning(message)
if require is True:
log.fatal(message)
raise(EnvironmentError)
def ensure_program_available(
program
):
log.debug("ensure program {program} available".format(
program = program
))
if which(program) is None:
log.error("program {program} not available".format(
program = program
))
raise(EnvironmentError)
else:
log.debug("program {program} available".format(
program = program
))
def which(
program
):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return(program)
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def running(
program
):
program = str.encode(program)
results = subprocess.Popen(
["ps", "-A"],
stdout = subprocess.PIPE
).communicate()[0].split(b"\n")
matches_current = [
line for line in results if program in line and b"defunct" not in line
]
if matches_current:
return True
else:
return False
def ensure_file_existence(
filename
):
log.debug("ensure existence of file {filename}".format(
filename = filename
))
if not os.path.isfile(os.path.expandvars(filename)):
log.error("file {filename} does not exist".format(
filename = filename
))
raise(IOError)
else:
log.debug("file {filename} found".format(
filename = filename
))
def rm_file(filename):
os.remove(filename)
## @brief return a naturally-sorted list of filenames that are in a sequence or
## a dictionary of lists of filenames that are in a sequence
def find_file_sequences(
extension = "png",
directory = ".",
return_first_sequence_only = True,
):
filenames_of_directory = os.listdir(directory)
filenames_found = [
filename for filename in filenames_of_directory if re.match(
r".*\d+.*\." + extension,
filename
)
]
filename_sequences = collections.defaultdict(list)
for filename in filenames_found:
pattern = re.sub("\d+", "XXX", filename)
filename_sequences[pattern].append(filename)
if return_first_sequence_only is True:
first_key_identified = next(iter(filename_sequences.keys()))
filename_sequence = \
natural_sort(filename_sequences[first_key_identified])
return filename_sequence
else:
return filename_sequences
## @brief return a list of files at a specified directory
def ls_files(
directory = "."
):
return([filename for filename in os.listdir(directory) if os.path.isfile(
os.path.join(directory, filename)
)])
## @brief return a list of files, directories and subdirectories at a specified
## directory
def directory_listing(
directory = ".",
):
files_list = []
for root, directories, filenames in os.walk(directory):
for filename in filenames:
files_list.append(os.path.join(root, filename))
return files_list
## @brief return a list of filepaths at a directory, optionally filtered to
## contain a specified extension
def filepaths_at_directory(
directory = None,
extension_required = None
):
if not os.path.isdir(directory):
log.error("error -- directory {directory} not found".format(directory = directory))
raise(IOError)
filepaths = [os.path.abspath(os.path.join(directory, filename)) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
if extension_required:
filepaths = [filepath for filepath in filepaths if extension_required in os.path.splitext(filepath)[1]]
return filepaths
def engage_command(
command = None,
background = True,
timeout = None
):
log.debug(command)
if background:
if timeout:
log.warning("warning -- command set to run in background; ignoring timeout")
subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash"
)
return None
elif not background:
process = subprocess.Popen(
[command],
shell = True,
executable = "/bin/bash",
stdout = subprocess.PIPE
)
try:
process.wait(timeout = timeout)
output, errors = process.communicate(timeout = timeout)
return output
except:
process.kill()
return False
else:
return None
def percentage_power():
try:
filenames_power = engage_command(command = "upower -e")
filenames_power = [line for line in filenames_power.split("\n") if line]
filenames_power_battery = [filename for filename in filenames_power if "battery" in filename]
filename_power_battery = filenames_power_battery[0] if filenames_power_battery else None
filenames_power_line = [filename for filename in filenames_power if "line" in filename]
filename_power_line = filenames_power_line[0] if filenames_power_line else None
if filename_power_battery:
power_data = engage_command(command = "upower -i {filename}".format(filename = filename_power_battery))
percentage_power = [line for line in power_data.split("\n") if "percentage" in line][0].split()[1]
elif filename_power_line:
percentage_power = "100%"
else:
percentage_power = None
return percentage_power
except:
return None
def convert_type_list_elements(
list_object = None,
element_type = str
):
"""
Recursively convert all elements and all elements of all sublists of a list
to a specified type and return the new list.
"""
if element_type is str:
return [str(element) if not isinstance(element, list) else convert_type_list_elements(
list_object = element,
element_type = str
) for element in list_object]
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def ensure_size(
self,
size = None
):
"""
This function removes the least frequent elements until the size
constraint is met.
"""
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
## @brief return a naturally-sorted list
# @detail This function returns a naturally-sorted list from an input list.
def natural_sort(
list_object
):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanumeric_key = lambda key: [
convert(text) for text in re.split("([0-9]+)", key)
]
return sorted(list_object, key = alphanumeric_key)
def indices_of_list_element_duplicates(
x
):
seen = set()
for index, element in enumerate(x):
if isinstance(element, list):
element = tuple(element)
if isinstance(element, dict):
element = tuple(element.items())
if element not in seen:
seen.add(element)
else:
yield index
def indices_of_greatest_values(
x,
number = 5
):
if len(x) <= number:
number = len(x)
return [y[0] for y in sorted(enumerate(x), key = lambda y: y[1])[-number:]]
def unique_list_elements(x):
unique_elements = []
for element in x:
if element not in unique_elements:
unique_elements.append(element)
return unique_elements
def select_spread(
list_of_elements = None,
number_of_elements = None
):
"""
This function returns the specified number of elements of a list spread
approximately evenly.
"""
if len(list_of_elements) <= number_of_elements:
return list_of_elements
if number_of_elements == 0:
return []
if number_of_elements == 1:
return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]]
return \
[list_of_elements[int(round((len(list_of_elements) - 1) /\
(2 * number_of_elements)))]] +\
select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\
(number_of_elements))):], number_of_elements - 1)
def split_list(
list_object = None,
granularity = None
):
"""
This function splits a list into a specified number of lists. It returns a
list of lists that correspond to these parts. Negative numbers of parts are
not accepted and numbers of parts greater than the number of elements in the
list result in the maximum possible number of lists being returned.
"""
if granularity < 0:
raise Exception("negative granularity")
mean_length = len(list_object) / float(granularity)
split_list_object = []
last_length = float(0)
if len(list_object) > granularity:
while last_length < len(list_object):
split_list_object.append(
list_object[int(last_length):int(last_length + mean_length)]
)
last_length += mean_length
else:
split_list_object = [[element] for element in list_object]
return split_list_object
def ranges_edge_pairs(
extent = None,
range_length = None
):
"""
Return the edges of ranges within an extent of some length. For example, to
separate 76 variables into groups of at most 20 variables, the ranges of the
variables could be 0 to 20, 21 to 41, 42 to 62 and 63 to 76. These range
edges could be returned by this function as a list of tuples:
>>> ranges_edge_pairs(
... extent = 76, # number of variables
... range_length = 20 # maximum number of variables per plot
... )
[(0, 20), (21, 41), (42, 62), (63, 76)]
"""
number_of_ranges = int(math.ceil(extent / range_length))
return [
(
index * range_length + index,
min((index + 1) * range_length + index, extent)
)
for index in range(0, number_of_ranges)
]
def Markdown_list_to_dictionary(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [{}]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or {}
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def Markdown_list_to_OrderedDict(
Markdown_list = None
):
line = re.compile(r"( *)- ([^:\n]+)(?:: ([^\n]*))?\n?")
depth = 0
stack = [collections.OrderedDict()]
for indent, name, value in line.findall(Markdown_list):
indent = len(indent)
if indent > depth:
assert not stack[-1], "unexpected indent"
elif indent < depth:
stack.pop()
stack[-1][name] = value or collections.OrderedDict()
if not value:
# new branch
stack.append(stack[-1][name])
depth = indent
return stack[0]
def open_configuration(
filename = None
):
file_configuration = open(filename, "r").read()
return Markdown_list_to_OrderedDict(file_configuration)
def change_list_resolution(
values = None,
length = None,
interpolation_type = "linear",
dimensions = 1
):
y1 = values
x1 = list(range(0, len(values)))
interpolation = scipy.interpolate.interp1d(
x1,
y1,
kind = interpolation_type
)
x2 = list(numpy.linspace(min(x1), max(x1), length))
y2 = [float(interpolation(x)) for x in x2]
if dimensions == 1:
return y2
elif dimensions == 2:
return (x2, y2)
def change_waveform_to_rectangle_waveform(
values = None,
fraction_amplitude = 0.01
):
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
return values
def change_sound_file_waveform_to_sound_file_rectangle_waveform(
filename_waveform = None,
filename_rectangle_waveform = None,
overwrite = False,
fraction_amplitude = 0.01
):
if filename_rectangle_waveform is None:
filename_rectangle_waveform = filename_waveform
filename_rectangle_waveform = propose_filename(
filename = filename_rectangle_waveform,
overwrite = overwrite
)
rate, values = scipy.io.wavfile.read(filename_waveform)
values = change_waveform_to_rectangle_waveform(
values = values,
fraction_amplitude = fraction_amplitude
)
values[values >= 0] = fraction_amplitude * max(values)
values[values < 0] = fraction_amplitude * min(values)
values[:] = [x * (1 / fraction_amplitude) for x in values]
scipy.io.wavfile.write(filename_rectangle_waveform, rate, values)
def normalize(
x,
summation = None
):
if summation is None:
summation = sum(x) # normalize to unity
return [element/summation for element in x]
def rescale(
x,
minimum = 0,
maximum = 1
):
return [
minimum + (element - min(x)) * ((maximum - minimum)\
/ (max(x) - min(x))) for element in x
]
def composite_variable(
x
):
k = len(x) + 1
variable = 0
for index, element in enumerate(x):
variable += k**(index - 1) * element
return variable
def model_linear(
data = None,
quick_calculation = False
):
if quick_calculation is True:
data = select_spread(data, 10)
n = len(data)
x_values = []
y_values = []
x_squared_values = []
xy_values = []
for datum in data:
x = datum[0]
y = datum[1]
x_values.append(x)
y_values.append(y)
x_squared_values.append(x ** 2)
xy_values.append(x * y)
b1 = (sum(xy_values) - (sum(x_values) * sum(y_values)) / n) / \
(sum(x_squared_values) - (sum(x_values) ** 2) / n)
b0 = (sum(y_values) - b1 * sum(x_values)) / n
return (b0, b1)
def import_object(
filename = None
):
return pickle.load(open(filename, "rb"))
def export_object(
x,
filename = None,
overwrite = False
):
filename = propose_filename(
filename = filename,
overwrite = overwrite
)
pickle.dump(x, open(filename, "wb"))
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
def ustr(text):
"""
Convert a string to Python 2 unicode or Python 3 string as appropriate to
the version of Python in use.
"""
if text is not None:
if sys.version_info >= (3, 0):
return str(text)
else:
return unicode(text)
else:
return text
def number_to_English_text(
number = None
):
ones = [
"",
"one ",
"two ",
"three ",
"four ",
"five ",
"six ",
"seven ",
"eight ",
"nine "
]
teens = [
"ten ",
"eleven ",
"twelve ",
"thirteen ",
"fourteen ",
"fifteen ",
"sixteen ",
"seventeen ",
"eighteen ",
"nineteen "
]
tens = [
"",
"",
"twenty ",
"thirty ",
"forty ",
"fifty ",
"sixty ",
"seventy ",
"eighty ",
"ninety "
]
thousands = [
"",
"thousand ",
"million ",
"billion ",
"trillion ",
"quadrillion ",
"quintillion ",
"sextillion ",
"septillion ",
"octillion ",
"nonillion ",
"decillion ",
"undecillion ",
"duodecillion ",
"tredecillion ",
"quattuordecillion ",
"quindecillion",
"sexdecillion ",
"septendecillion ",
"octodecillion ",
"novemdecillion ",
"vigintillion "
]
# Split the number into 3-digit groups with each group representing
# hundreds, thousands etc.
number_in_groups_of_3 = []
number_as_string = str(number)
for position in range(3, 33, 3):
progressive_number_string = number_as_string[-position:]
progression = len(number_as_string) - position
# Break if the end of the number string is encountered.
if progression < -2:
break
else:
if progression >= 0:
number_in_groups_of_3.append(int(progressive_number_string[:3]))
elif progression >= -1:
number_in_groups_of_3.append(int(progressive_number_string[:2]))
elif progression >= -2:
number_in_groups_of_3.append(int(progressive_number_string[:1]))
# Split the number 3-digit groups into groups of ones, tens etc. and build
# an English text representation of the number.
number_words = ""
for index, group in enumerate(number_in_groups_of_3):
number_1 = group % 10
number_2 = (group % 100) // 10
number_3 = (group % 1000) // 100
if group == 0:
continue
else:
thousand = thousands[index]
if number_2 == 0:
number_words = ones[number_1] + thousand + number_words
elif number_2 == 1:
number_words = teens[number_1] + thousand + number_words
elif number_2 > 1:
number_words = tens[number_2] + ones[number_1] + thousand + number_words
if number_3 > 0:
number_words = ones[number_3] + "hundred " + number_words
return number_words.strip(" ")
def replace_numbers_in_text_with_English_text(
text = None
):
# Split the text into text and numbers.
text = re.split("(\d+)", text)
if text[-1] == "":
text = text[:-1]
text_translated = []
# Replace numbers with English text.
for text_segment in text:
if all(character.isdigit() for character in text_segment):
text_translated.append(number_to_English_text(number = text_segment))
else:
text_translated.append(text_segment)
return "".join(text_translated)
def replace_contractions_with_full_words_and_replace_numbers_with_digits(
text = None,
remove_articles = True
):
"""
This function replaces contractions with full words and replaces numbers
with digits in specified text. There is the option to remove articles.
"""
words = text.split()
text_translated = ""
for word in words:
if remove_articles and word in ["a", "an", "the"]:
continue
contractions_expansions = {
"ain't": "is not",
"aren't": "are not",
"can't": "can not",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"gonna": "going to",
"gotta": "got to",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'll": "I will",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"mightn't": "might not",
"might've": "might have",
"mustn't": "must not",
"must've": "must have",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"shouldn't": "should not",
"should've": "should have",
"somebody's": "somebody is",
"someone'd": "someone would",
"someone'll": "someone will",
"someone's": "someone is",
"that'll": "that will",
"that's": "that is",
"that'd": "that would",
"there'd": "there would",
"there're": "there are",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'd": "what did",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"when'd": "when did",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'd": "who would",
"who'd've": "who would have",
"who'll": "who will",
"who're": "who are",
"who's": "who is",
"who've": "who have",
"why'd": "why did",
"why're": "why are",
"why's": "why is",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"ya'll": "you all",
"you'd": "you would",
"you'd've": "you would have",
"you'll": "you will",
"y'aint": "you are not",
"y'ain't": "you are not",
"you're": "you are",
"you've": "you have"
}
if word in list(contractions_expansions.keys()):
word = contractions_expansions[word]
numbers_digits = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
"eleven": "11",
"twelve": "12",
"thirteen": "13",
"fourteen": "14",
"fifteen": "15",
"sixteen": "16",
"seventeen": "17",
"eighteen": "18",
"nineteen": "19",
"twenty": "20"
}
if word in list(numbers_digits.keys()):
word = numbers_digits[word]
text_translated += " " + word
text_translated = text_translated.strip()
return text_translated
def split_into_sentences(
text = None
):
capitals = "([A-Z])"
prefixes = "(Dr|dr|Hon|hon|Mr|mr|Mrs|mrs|Ms|ms|St|st)[.]"
suffixes = "(Co|co|Inc|inc|Jr|jr|Ltd|ltd|Sr|sr)"
starters = "(But\s|Dr|He\s|However\s|It\s|Mr|Mrs|Ms|Our\s|She\s|That\s|Their\s|They\s|This\s|We\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|gov|io|net|org|pro)"
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + capitals + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(capitals + "[.]" + capitals + "[.]" + capitals + "[.]","\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(capitals + "[.]" + capitals + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + capitals + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [sentence.strip() for sentence in sentences]
return sentences
def trim_incomplete_sentences(
text = None
):
return " ".join(split_into_sentences(text)[1:])
def pseudorandom_MAC_address():
return "{aa:02x}:{bb:02x}:{cc:02x}:{dd:02x}:{ee:02x}:{ff:02x}".format(
aa = random.randint(0, 255),
bb = random.randint(0, 255),
cc = random.randint(0, 255),
dd = random.randint(0, 255),
ee = random.randint(0, 255),
ff = random.randint(0, 255)
)
def get_attribute(
object_instance = None,
name = None,
imputation_default_value = None
):
try:
if "[" in name and "]" in name:
index = int(name.split("[")[1].split("]")[0])
attribute = name.split("[")[0]
value = getattr(object_instance, attribute)[index]
else:
value = getattr(object_instance, name)
except:
value = imputation_default_value
return value
def generate_Python_variable_names(
number = 10
):
names = []
while len(names) < number:
name = str(uuid.uuid4()).replace("-", "")
if name[0].isalpha():
names.append(name)
return names
def add_time_variables(df, reindex = True):
"""
Return a DataFrame with variables for weekday index, weekday name, timedelta
through day, fraction through day, hours through day and days through week
added, optionally with the index set to datetime and the variable `datetime`
removed. It is assumed that the variable `datetime` exists.
"""
if not "datetime" in df.columns:
log.error("field datetime not found in DataFrame")
return False
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
df["month_name"] = df["datetime"].dt.strftime("%B")
df["weekday"] = df["datetime"].dt.weekday
df["weekday_name"] = df["datetime"].dt.weekday_name
df["time_through_day"] = df["datetime"].map(
lambda x: x - datetime.datetime.combine(
x.date(),
datetime.time()
)
)
df["fraction_through_day"] = df["time_through_day"].map(
lambda x: x / datetime.timedelta(hours = 24)
)
df["hour"] = df["datetime"].dt.hour
df["hours_through_day"] = df["fraction_through_day"] * 24
df["days_through_week"] = df.apply(
lambda row: row["weekday"] + row["fraction_through_day"],
axis = 1
)
df["days_through_year"] = df["datetime"].dt.dayofyear
df.index = df["datetime"]
#del df["datetime"]
return df
def daily_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create daily plots of a variable in a DataFrame, optionally renormalized. It
is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
days = []
for group in df.groupby(df.index.day):
days.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("hours")
plt.ylabel(variable);
for day in days:
if renormalize:
values = scaler.fit_transform(day[[variable]])
else:
values = day[variable]
if plot:
plt.plot(day["hours_through_day"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(day["hours_through_day"], values, s = s)
def weekly_plots(
df,
variable,
renormalize = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create weekly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the variable `days_through_week` exists.
"""
if not "days_through_week" in df.columns:
log.error("field days_through_week not found in DataFrame")
return False
weeks = []
for group in df.groupby(df.index.week):
weeks.append(group[1])
scaler = MinMaxScaler()
plt.ylabel(variable);
for week in weeks:
if renormalize:
values = scaler.fit_transform(week[[variable]])
else:
values = week[variable]
if plot:
plt.plot(week["days_through_week"], values, linestyle = linestyle, linewidth = linewidth)
if scatter:
plt.scatter(week["days_through_week"], values, s = s)
plt.xticks(
[ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5],
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
)
def yearly_plots(
df,
variable,
renormalize = True,
horizontal_axis_labels_days = False,
horizontal_axis_labels_months = True,
plot = True,
scatter = False,
linestyle = "-",
linewidth = 1,
s = 1
):
"""
Create yearly plots of a variable in a DataFrame, optionally renormalized.
It is assumed that the DataFrame index is datetime.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
years = []
for group in df.groupby(df.index.year):
years.append(group[1])
scaler = MinMaxScaler()
plt.xlabel("days")
plt.ylabel(variable);
for year in years:
if renormalize:
values = scaler.fit_transform(year[[variable]])
else:
values = year[variable]
if plot:
plt.plot(year["days_through_year"], values, linestyle = linestyle, linewidth = linewidth, label = year.index.year.values[0])
if scatter:
plt.scatter(year["days_through_year"], values, s = s)
if horizontal_axis_labels_months:
plt.xticks(
[ 15.5, 45, 74.5, 105, 135.5, 166, 196.5, 227.5, 258, 288.5, 319, 349.5],
["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
)
plt.legend()
def add_rolling_statistics_variables(
df = None,
variable = None,
window = 20,
upper_factor = 2,
lower_factor = 2
):
"""
Add rolling statistics variables derived from a specified variable in a
DataFrame.
"""
df[variable + "_rolling_mean"] = pd.stats.moments.rolling_mean(df[variable], window)
df[variable + "_rolling_standard_deviation"] = pd.stats.moments.rolling_std(df[variable], window)
df[variable + "_rolling_upper_bound"] = df[variable + "_rolling_mean"] + upper_factor * df[variable + "_rolling_standard_deviation"]
df[variable + "_rolling_lower_bound"] = df[variable + "_rolling_mean"] - lower_factor * df[variable + "_rolling_standard_deviation"]
return df
def rescale_variables(
df,
variables_include = [],
variables_exclude = []
):
"""
Rescale variables in a DataFrame, excluding variables with NaNs and strings,
excluding specified variables, and including specified variables.
"""
variables_not_rescale = variables_exclude
variables_not_rescale.extend(df.columns[df.isna().any()].tolist()) # variables with NaNs
variables_not_rescale.extend(df.select_dtypes(include = ["object", "datetime", "timedelta"]).columns) # variables with strings
variables_rescale = list(set(df.columns) - set(variables_not_rescale))
variables_rescale.extend(variables_include)
scaler = MinMaxScaler()
df[variables_rescale] = scaler.fit_transform(df[variables_rescale])
return df
def histogram_hour_counts(
df,
variable
):
"""
Create a day-long histogram of counts of the variable for each hour. It is
assumed that the DataFrame index is datetime and that the variable
`hour` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.hour)[variable].count()
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_day_counts(
df,
variable
):
"""
Create a week-long histogram of counts of the variable for each day. It is
assumed that the DataFrame index is datetime and that the variable
`weekday_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.weekday_name)[variable].count().reindex(calendar.day_name[0:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def histogram_month_counts(
df,
variable
):
"""
Create a year-long histogram of counts of the variable for each month. It is
assumed that the DataFrame index is datetime and that the variable
`month_name` exists.
"""
if not df.index.dtype in ["datetime64[ns]", "<M8[ns]", ">M8[ns]"]:
log.error("index is not datetime")
return False
counts = df.groupby(df.index.strftime("%B"))[variable].count().reindex(calendar.month_name[1:])
counts.plot(kind = "bar", width = 1, rot = 0, alpha = 0.7)
def log_progress(
sequence,
every = None,
size = None,
name = "items"
):
"""
Display a progress bar widget in a Jupyter notebook. Its dependencies must
be enabled on launching Jupyter, such as in the following way:
jupyter nbextension enable --py widgetsnbextension
The progress bar can be used in a way like the following:
for item in shijian.log_progress([1, 2, 3, 4, 5]):
time.sleep(5)
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5 %
else:
assert every is not None, "sequence is iterator, set every"
if is_iterator:
progress = IntProgress(min = 0, max = 1, value = 1)
progress.bar_style = "info"
else:
progress = IntProgress(min = 0, max = size, value = 0)
label = HTML()
box = VBox(children = [label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = "{name}: {index} / ?".format(
name = name,
index = index
)
else:
progress.value = index
label.value = u"{name}: {index} / {size}".format(
name = name,
index = index,
size = size
)
yield record
except:
progress.bar_style = "danger"
raise
else:
progress.bar_style = "success"
progress.value = index
label.value = "{name}: {index}".format(
name = name,
index = str(index or "?")
)
_main()
|
wdbm/shijian
|
shijian.py
|
List_Consensus.ensure_size
|
python
|
def ensure_size(
self,
size = None
):
if size is None:
size = self.size_constraint
while sys.getsizeof(self) > size:
element_frequencies = collections.Counter(self)
infrequent_element = element_frequencies.most_common()[-1:][0][0]
self.remove(infrequent_element)
|
This function removes the least frequent elements until the size
constraint is met.
|
train
|
https://github.com/wdbm/shijian/blob/ad6aea877e1eb99fe148127ea185f39f1413ed4f/shijian.py#L919-L932
| null |
class List_Consensus(list):
"""
This class is designed to instantiate a list of elements. It features
functionality that limits approximately the memory usage of the list. On
estimating the size of the list as greater than the specified or default
size limit, the list reduces the number of elements it contains. The list
provides functionality to return its most frequent element, which can be
used to determine its "consensus" element.
"""
def __init__(
self,
*args
):
# list initialisation
if sys.version_info >= (3, 0):
super().__init__(*args)
else:
super(List_Consensus, self).__init__(*args)
self.size_constraint = 150 # bytes
def set_size_constraint(
self,
size = None
):
if size is not None:
self.size_constraint = size
def append(
self,
element,
ensure_size = True,
size = None
):
if size is None:
size = self.size_constraint
list.append(self, element)
if ensure_size:
self.ensure_size(
size = size
)
def consensus(
self
):
try:
element_frequencies = collections.Counter(self)
return element_frequencies.most_common(1)[0][0]
except:
return None
|
heikomuller/sco-engine
|
scoengine/reqbuf_worker.py
|
handle_request
|
python
|
def handle_request(request):
connector = request['connector']
hostname = connector['host']
port = connector['port']
virtual_host = connector['virtualHost']
queue = connector['queue']
user = connector['user']
password = connector['password']
# Establish connection with RabbitMQ server
logging.info('Connect : [HOST=' + hostname + ', QUEUE=' + queue + ']')
done = False
attempts = 0
while not done and attempts < 100:
try:
credentials = pika.PlainCredentials(user, password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=hostname,
port=port,
virtual_host=virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=queue, durable=True)
req = request['request']
logging.info('Run : [EXPERIMENT=' + req['experiment_id'] + ', RUN=' + req['run_id'] + ']')
channel.basic_publish(
exchange='',
routing_key=queue,
body=json.dumps(req),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
con.close()
done = True
except pika.exceptions.ConnectionClosed as ex:
attempts += 1
logging.exception(ex)
|
Convert a model run request from the buffer into a message in a RabbitMQ
queue.
Parameters
----------
request : dict
Buffer entry containing 'connector' and 'request' field
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/reqbuf_worker.py#L19-L64
| null |
#!venv/bin/python
"""Request buffer worker - Polls the request buffer and transforms documents
into RabbitMQ messages.
"""
import logging
import json
import pika
import sys
import time
import yaml
from scodata.mongo import MongoDBFactory
"""MongoDB collection that is used as request buffer."""
COLL_REQBUFFER = 'requestbuffer'
if __name__ == '__main__':
# Expects the config.yaml file as input
if len(sys.argv) != 2:
print 'Usage: <config.yaml>'
sys.exit()
# Read configuration file (YAML)
with open(sys.argv[1], 'r') as f:
obj = yaml.load(f)
config = {item['key']:item['value'] for item in obj['properties']}
# Get Mongo client factory
mongo = MongoDBFactory(db_name=config['mongo.db'])
# Init logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s:%(message)s'
)
# Start an endless loop to handle requests. Necessary because pika throws
# ConnectionClosed exception occasionally when sending acknowledgement. This
# way we can keep a remote worker alive by re-connecting.
while True:
coll = mongo.get_database()[COLL_REQBUFFER]
requests = []
for doc in coll.find():
requests.append(doc)
for req in requests:
handle_request(req)
coll.delete_one({'_id': req['_id']})
time.sleep(1)
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelOutputFile.from_dict
|
python
|
def from_dict(doc):
if 'path' in doc:
path = doc['path']
else:
path = None
return ModelOutputFile(
doc['filename'],
doc['mimeType'],
path=path
)
|
Create a model output file object from a dictionary.
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L61-L73
| null |
class ModelOutputFile(object):
"""Description of a output file generated by a model run. Each file has a
unique name (i.e., the relative path to the file in the model output
directory) and a Mime type.
Attributes
----------
filename : string
Relative path to the file in the model output directory
mime_type : string
File mime type
path : string
Path to the attachment in the model output directory
"""
def __init__(self, filename, mime_type, path=None):
"""Initialize the filename and mime type.
Parameters
----------
filename : string
Relative path to the file in the model output directory
mime_type : string
File mime type
path : string, optional
Path to the attachment in the model output directory. If missing the
filename will be used.
"""
self.filename = filename
self.mime_type = mime_type
self.path = path if not path is None else filename
@staticmethod
def to_dict(self):
"""Get a dictionary serialization of the object to convert into a Json
object.
Returns
-------
dict
"""
doc = {
'filename' : self.filename,
'mimeType' : self.mime_type
}
# Add path if present
if self.filename != self.path:
doc['path'] = self.path
return doc
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelOutputFile.to_dict
|
python
|
def to_dict(self):
doc = {
'filename' : self.filename,
'mimeType' : self.mime_type
}
# Add path if present
if self.filename != self.path:
doc['path'] = self.path
return doc
|
Get a dictionary serialization of the object to convert into a Json
object.
Returns
-------
dict
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L75-L90
| null |
class ModelOutputFile(object):
"""Description of a output file generated by a model run. Each file has a
unique name (i.e., the relative path to the file in the model output
directory) and a Mime type.
Attributes
----------
filename : string
Relative path to the file in the model output directory
mime_type : string
File mime type
path : string
Path to the attachment in the model output directory
"""
def __init__(self, filename, mime_type, path=None):
"""Initialize the filename and mime type.
Parameters
----------
filename : string
Relative path to the file in the model output directory
mime_type : string
File mime type
path : string, optional
Path to the attachment in the model output directory. If missing the
filename will be used.
"""
self.filename = filename
self.mime_type = mime_type
self.path = path if not path is None else filename
@staticmethod
def from_dict(doc):
"""Create a model output file object from a dictionary.
"""
if 'path' in doc:
path = doc['path']
else:
path = None
return ModelOutputFile(
doc['filename'],
doc['mimeType'],
path=path
)
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelOutputs.from_dict
|
python
|
def from_dict(doc):
return ModelOutputs(
ModelOutputFile.from_dict(doc['prediction']),
[ModelOutputFile.from_dict(a) for a in doc['attachments']]
)
|
Create a model output object from a dictionary.
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L131-L138
|
[
"def from_dict(doc):\n \"\"\"Create a model output file object from a dictionary.\n\n \"\"\"\n if 'path' in doc:\n path = doc['path']\n else:\n path = None\n return ModelOutputFile(\n doc['filename'],\n doc['mimeType'],\n path=path\n )\n"
] |
class ModelOutputs(object):
"""Description of output files that are generated by a particular model. The
output has to include a prediction file and a optional list of attachments.
Each attachment has a unique filename and a Mime type.
Raises ValueError if the list of attachments contains multiple entries for
the same file name.
Attributes
----------
prediction_file : ModelOutputFile
Name of the output file that contains the prediction result
attachments : list(ModelOutputFile), optional
List if additional output files that are to be uploaded as attachments
for successful model runs
"""
def __init__(self, prediction_file, attachments=None):
"""Initialize the prediction filename and attachments.
Parameters
----------
prediction_file : ModelOutputFile
Name of the output file that contains the prediction result
attachments : list(ModelOutputFile), optional
List if additional output files that are to be uploaded as attachments
for successful model runs
"""
self.prediction_file = prediction_file
self.attachments = attachments if not attachments is None else []
# Ensure that the filenames of all attachments are unique.
names = set()
for attachment in self.attachments:
if attachment.filename in names:
raise ValueError('duplicate attachment: ' + attachment.filename)
names.add(attachment.filename)
@staticmethod
def to_dict(self):
"""Get a dictionary serialization of the object to convert into a Json
object.
Returns
-------
dict
"""
return {
'prediction' : self.prediction_file.to_dict(),
'attachments' : [a.to_dict() for a in self.attachments]
}
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelOutputs.to_dict
|
python
|
def to_dict(self):
return {
'prediction' : self.prediction_file.to_dict(),
'attachments' : [a.to_dict() for a in self.attachments]
}
|
Get a dictionary serialization of the object to convert into a Json
object.
Returns
-------
dict
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L140-L151
| null |
class ModelOutputs(object):
"""Description of output files that are generated by a particular model. The
output has to include a prediction file and a optional list of attachments.
Each attachment has a unique filename and a Mime type.
Raises ValueError if the list of attachments contains multiple entries for
the same file name.
Attributes
----------
prediction_file : ModelOutputFile
Name of the output file that contains the prediction result
attachments : list(ModelOutputFile), optional
List if additional output files that are to be uploaded as attachments
for successful model runs
"""
def __init__(self, prediction_file, attachments=None):
"""Initialize the prediction filename and attachments.
Parameters
----------
prediction_file : ModelOutputFile
Name of the output file that contains the prediction result
attachments : list(ModelOutputFile), optional
List if additional output files that are to be uploaded as attachments
for successful model runs
"""
self.prediction_file = prediction_file
self.attachments = attachments if not attachments is None else []
# Ensure that the filenames of all attachments are unique.
names = set()
for attachment in self.attachments:
if attachment.filename in names:
raise ValueError('duplicate attachment: ' + attachment.filename)
names.add(attachment.filename)
@staticmethod
def from_dict(doc):
"""Create a model output object from a dictionary.
"""
return ModelOutputs(
ModelOutputFile.from_dict(doc['prediction']),
[ModelOutputFile.from_dict(a) for a in doc['attachments']]
)
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelRegistry.delete_model
|
python
|
def delete_model(self, model_id, erase=False):
return self.delete_object(model_id, erase=erase)
|
Delete the model with given identifier in the database. Returns the
handle for the deleted model or None if object identifier is unknown.
Parameters
----------
model_id : string
Unique model identifier
erase : Boolean, optinal
If true, the record will be deleted from the database. Otherwise,
the active flag will be set to False to support provenance tracking.
Returns
-------
ModelHandle
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L239-L255
| null |
class ModelRegistry(MongoDBStore):
"""Default implementation for model registry. Uses MongoDB as storage
backend and makes use of the SCO datastore implementation. Provides
wrappers for delete, exists, get, and list model operations.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models are being stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
super(ModelRegistry, self).__init__(mongo.get_database().models)
def exists_model(self, model_id):
"""Returns true if a model with the given identifier exists in the
registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
Boolean
True, if model with given identifier exists.
"""
# Return True if query for object identifier with active flag on returns
# a result.
return self.exists_object(model_id)
def from_dict(self, document):
"""Create a model database object from a given dictionary serialization.
Parameters
----------
document : dict
Dictionary serialization of the object
Returns
-------
ModelHandle
"""
# The timestamp is optional (e.g., in cases where model definitions are
# loaded from file).
if 'timestamp' in document:
timestamp = datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
)
else:
timestamp = None
# Create handle for database object
return ModelHandle(
document['_id'],
document['properties'],
[AttributeDefinition.from_dict(el) for el in document['parameters']],
ModelOutputs.from_dict(document['outputs']),
document['connector'],
timestamp=timestamp
)
def get_model(self, model_id):
"""Retrieve model with given identifier from the database.
Parameters
----------
identifier : string
Unique model identifier
Returns
-------
ModelHandle
Handle for model with given identifier or None if no model
with identifier exists.
"""
return self.get_object(model_id, include_inactive=False)
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers
Returns
-------
ModelHandle
Handle for created model object in database
"""
# Create object handle and store it in database before returning it
obj = ModelHandle(model_id, properties, parameters, outputs, connector)
self.insert_object(obj)
return obj
def to_dict(self, model):
"""Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
"""
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj
def update_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
model = self.get_model(model_id)
if model is None:
return None
model.connector = connector
self.replace_object(model)
return model
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelRegistry.register_model
|
python
|
def register_model(self, model_id, properties, parameters, outputs, connector):
# Create object handle and store it in database before returning it
obj = ModelHandle(model_id, properties, parameters, outputs, connector)
self.insert_object(obj)
return obj
|
Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers
Returns
-------
ModelHandle
Handle for created model object in database
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L339-L367
| null |
class ModelRegistry(MongoDBStore):
"""Default implementation for model registry. Uses MongoDB as storage
backend and makes use of the SCO datastore implementation. Provides
wrappers for delete, exists, get, and list model operations.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models are being stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
super(ModelRegistry, self).__init__(mongo.get_database().models)
def delete_model(self, model_id, erase=False):
"""Delete the model with given identifier in the database. Returns the
handle for the deleted model or None if object identifier is unknown.
Parameters
----------
model_id : string
Unique model identifier
erase : Boolean, optinal
If true, the record will be deleted from the database. Otherwise,
the active flag will be set to False to support provenance tracking.
Returns
-------
ModelHandle
"""
return self.delete_object(model_id, erase=erase)
def exists_model(self, model_id):
"""Returns true if a model with the given identifier exists in the
registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
Boolean
True, if model with given identifier exists.
"""
# Return True if query for object identifier with active flag on returns
# a result.
return self.exists_object(model_id)
def from_dict(self, document):
"""Create a model database object from a given dictionary serialization.
Parameters
----------
document : dict
Dictionary serialization of the object
Returns
-------
ModelHandle
"""
# The timestamp is optional (e.g., in cases where model definitions are
# loaded from file).
if 'timestamp' in document:
timestamp = datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
)
else:
timestamp = None
# Create handle for database object
return ModelHandle(
document['_id'],
document['properties'],
[AttributeDefinition.from_dict(el) for el in document['parameters']],
ModelOutputs.from_dict(document['outputs']),
document['connector'],
timestamp=timestamp
)
def get_model(self, model_id):
"""Retrieve model with given identifier from the database.
Parameters
----------
identifier : string
Unique model identifier
Returns
-------
ModelHandle
Handle for model with given identifier or None if no model
with identifier exists.
"""
return self.get_object(model_id, include_inactive=False)
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
def to_dict(self, model):
"""Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
"""
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj
def update_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
model = self.get_model(model_id)
if model is None:
return None
model.connector = connector
self.replace_object(model)
return model
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelRegistry.to_dict
|
python
|
def to_dict(self, model):
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj
|
Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L369-L389
| null |
class ModelRegistry(MongoDBStore):
"""Default implementation for model registry. Uses MongoDB as storage
backend and makes use of the SCO datastore implementation. Provides
wrappers for delete, exists, get, and list model operations.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models are being stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
super(ModelRegistry, self).__init__(mongo.get_database().models)
def delete_model(self, model_id, erase=False):
"""Delete the model with given identifier in the database. Returns the
handle for the deleted model or None if object identifier is unknown.
Parameters
----------
model_id : string
Unique model identifier
erase : Boolean, optinal
If true, the record will be deleted from the database. Otherwise,
the active flag will be set to False to support provenance tracking.
Returns
-------
ModelHandle
"""
return self.delete_object(model_id, erase=erase)
def exists_model(self, model_id):
"""Returns true if a model with the given identifier exists in the
registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
Boolean
True, if model with given identifier exists.
"""
# Return True if query for object identifier with active flag on returns
# a result.
return self.exists_object(model_id)
def from_dict(self, document):
"""Create a model database object from a given dictionary serialization.
Parameters
----------
document : dict
Dictionary serialization of the object
Returns
-------
ModelHandle
"""
# The timestamp is optional (e.g., in cases where model definitions are
# loaded from file).
if 'timestamp' in document:
timestamp = datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
)
else:
timestamp = None
# Create handle for database object
return ModelHandle(
document['_id'],
document['properties'],
[AttributeDefinition.from_dict(el) for el in document['parameters']],
ModelOutputs.from_dict(document['outputs']),
document['connector'],
timestamp=timestamp
)
def get_model(self, model_id):
"""Retrieve model with given identifier from the database.
Parameters
----------
identifier : string
Unique model identifier
Returns
-------
ModelHandle
Handle for model with given identifier or None if no model
with identifier exists.
"""
return self.get_object(model_id, include_inactive=False)
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers
Returns
-------
ModelHandle
Handle for created model object in database
"""
# Create object handle and store it in database before returning it
obj = ModelHandle(model_id, properties, parameters, outputs, connector)
self.insert_object(obj)
return obj
def update_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
model = self.get_model(model_id)
if model is None:
return None
model.connector = connector
self.replace_object(model)
return model
|
heikomuller/sco-engine
|
scoengine/model.py
|
ModelRegistry.update_connector
|
python
|
def update_connector(self, model_id, connector):
model = self.get_model(model_id)
if model is None:
return None
model.connector = connector
self.replace_object(model)
return model
|
Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/model.py#L391-L412
| null |
class ModelRegistry(MongoDBStore):
"""Default implementation for model registry. Uses MongoDB as storage
backend and makes use of the SCO datastore implementation. Provides
wrappers for delete, exists, get, and list model operations.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models are being stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
super(ModelRegistry, self).__init__(mongo.get_database().models)
def delete_model(self, model_id, erase=False):
"""Delete the model with given identifier in the database. Returns the
handle for the deleted model or None if object identifier is unknown.
Parameters
----------
model_id : string
Unique model identifier
erase : Boolean, optinal
If true, the record will be deleted from the database. Otherwise,
the active flag will be set to False to support provenance tracking.
Returns
-------
ModelHandle
"""
return self.delete_object(model_id, erase=erase)
def exists_model(self, model_id):
"""Returns true if a model with the given identifier exists in the
registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
Boolean
True, if model with given identifier exists.
"""
# Return True if query for object identifier with active flag on returns
# a result.
return self.exists_object(model_id)
def from_dict(self, document):
"""Create a model database object from a given dictionary serialization.
Parameters
----------
document : dict
Dictionary serialization of the object
Returns
-------
ModelHandle
"""
# The timestamp is optional (e.g., in cases where model definitions are
# loaded from file).
if 'timestamp' in document:
timestamp = datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
)
else:
timestamp = None
# Create handle for database object
return ModelHandle(
document['_id'],
document['properties'],
[AttributeDefinition.from_dict(el) for el in document['parameters']],
ModelOutputs.from_dict(document['outputs']),
document['connector'],
timestamp=timestamp
)
def get_model(self, model_id):
"""Retrieve model with given identifier from the database.
Parameters
----------
identifier : string
Unique model identifier
Returns
-------
ModelHandle
Handle for model with given identifier or None if no model
with identifier exists.
"""
return self.get_object(model_id, include_inactive=False)
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers
Returns
-------
ModelHandle
Handle for created model object in database
"""
# Create object handle and store it in database before returning it
obj = ModelHandle(model_id, properties, parameters, outputs, connector)
self.insert_object(obj)
return obj
def to_dict(self, model):
"""Create a dictionary serialization for a model.
Parameters
----------
model : ModelHandle
Returns
-------
dict
Dictionary serialization for a model
"""
# Get the basic Json object from the super class
obj = super(ModelRegistry, self).to_dict(model)
# Add model parameter
obj['parameters'] = [
para.to_dict() for para in model.parameters
]
obj['outputs'] = model.outputs.to_dict()
obj['connector'] = model.connector
return obj
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
init_registry
|
python
|
def init_registry(mongo, model_defs, clear_collection=False):
# Create model registry
registry = SCOEngine(mongo).registry
# Drop collection if clear flag is set to True
if clear_collection:
registry.clear_collection()
for i in range(len(model_defs)):
model = registry.from_dict(model_defs[i])
registry.register_model(
model.identifier,
model.properties,
model.parameters,
model.outputs,
model.connector
)
|
Initialize a model registry with a list of model definitions in Json
format.
Parameters
----------
mongo : scodata.MongoDBFactory
Connector for MongoDB
model_defs : list()
List of model definitions in Json-like format
clear_collection : boolean
If true, collection will be dropped before models are created
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L506-L532
| null |
"""Standard Cortical Observer - Workflow Engine API.
The workflow engine is used to register and run predictive models. The engine
maintains a registry for existing models and it is used to run models for
experiments that are defined in the SCO Data Store.
Workers are used to actually execute a predictive model run. The engine
interacts with these workers over defined communication channels. In the
current implementation the only supported communication forms are via RabbitMQ
or sockets. Thus, each model is registered with the necessary parameters for the
engine communicate run requests to a worker that can execute the model. The
workers may run locally on the same machine as the engine (and the web server)
or on remote machines.
The SCO Engine package is intended to bridge the decoupling of the web server
code from the predictive model code.
"""
from abc import abstractmethod
import json
import pika
from pymongo.errors import DuplicateKeyError
from model import ModelRegistry
# ------------------------------------------------------------------------------
#
# Constants
#
# ------------------------------------------------------------------------------
"""Identifier for known connectors that are used to communicate with model
workers.
"""
CONNECTOR_RABBITMQ = 'rabbitmq'
# ------------------------------------------------------------------------------
#
# Classes
#
# ------------------------------------------------------------------------------
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
"""
return self.registry.list_models(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
def run_model(self, model_run, run_url):
"""Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
def update_model_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
"""
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
# ------------------------------------------------------------------------------
# Connectors
# ------------------------------------------------------------------------------
class SCOEngineConnector(object):
"""Connector to interact with worker."""
@abstractmethod
def run_model(self, model_run, run_url):
"""Run model by sending message to remote worker.
Throws a EngineException if communication with the worker fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
pass
class RabbitMQConnector(SCOEngineConnector):
"""SCO Workflow Engine client using RabbitMQ. Sends Json messages containing
run identifier (and experiment identifier) to run model.
"""
def __init__(self, connector):
"""Initialize the client by providing host name and queue identifier
for message queue. In addition, requires a HATEOAS reference factory
to generate resource URLs.
Parameters
----------
connector : dict
Connection information for RabbitMQ
"""
# Validate the connector information. Raises ValueError in case of an
# invalid connector.
RabbitMQConnector.validate(connector)
self.host = connector['host']
self.port = connector['port']
self.virtual_host = connector['virtualHost']
self.queue = connector['queue']
self.user = connector['user']
self.password = connector['password']
def run_model(self, model_run, run_url):
"""Run model by sending message to RabbitMQ queue containing the
run end experiment identifier. Messages are persistent to ensure that
a worker will process process the run request at some point.
Throws a EngineException if communication with the server fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Open connection to RabbitMQ server. Will raise an exception if the
# server is not running. In this case we raise an EngineException to
# allow caller to delete model run.
try:
credentials = pika.PlainCredentials(self.user, self.password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host=self.virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=self.queue, durable=True)
except pika.exceptions.AMQPError as ex:
err_msg = str(ex)
if err_msg == '':
err_msg = 'unable to connect to RabbitMQ: ' + self.user + '@'
err_msg += self.host + ':' + str(self.port)
err_msg += self.virtual_host + ' ' + self.queue
raise EngineException(err_msg, 500)
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Send request
channel.basic_publish(
exchange='',
routing_key=self.queue,
body=json.dumps(request.to_dict()),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
con.close()
@staticmethod
def validate(connector):
"""Validate the given connector information. Expects the following
elements: host, port (int), virtualHost, queue, user, and password.
Raises ValueError if any of the mandatory elements is missing or not of
expected type.
"""
for key in ['host', 'port', 'virtualHost', 'queue', 'user', 'password']:
if not key in connector:
raise ValueError('missing connector information: ' + key)
# Try to convert the value for'port' to int.
int(connector['port'])
class BufferedConnector(SCOEngineConnector):
"""Connector that writes run request into a MongoDB collection."""
def __init__(self, collection, connector):
"""Initialize the MongoDB collection and the connector.
Parameters
----------
collection : MongoDB Collection
Collection that acts as the run request buffer
connector : dict
Connection information
"""
# Validate the connector information. Raises ValueError in case of an
# invalid connector.
self.collection = collection
self.connector = connector
def run_model(self, model_run, run_url):
"""Create entry in run request buffer.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Write request and connector information into buffer
self.collection.insert_one({
'connector' : self.connector,
'request' : request.to_dict()
})
# ------------------------------------------------------------------------------
# Request Factory
# ------------------------------------------------------------------------------
class RequestFactory(object):
"""Helper class to generate request object for model runs. The requests are
interpreted by different worker implementations to run the predictive model.
"""
def get_request(self, model_run, run_url):
"""Create request object to run model. Requests are handled by SCO
worker implementations.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
Returns
-------
ModelRunRequest
Object representing model run request
"""
return ModelRunRequest(
model_run.identifier,
model_run.experiment_id,
run_url
)
class ModelRunRequest(object):
"""Object capturing information to run predictive model. Contains run and
experiment identifier (used primarily by local workers) as well as resource
Url (for remote worker that use SCO Client).
Attributes
----------
run_id : string
Unique model run identifier
experiment_id : string
Unique experiment identifier
resource_url : string
Url for model run instance
"""
def __init__(self, run_id, experiment_id, resource_url):
"""Initialize request attributes.
Parameters
----------
run_id : string
Unique model run identifier
experiment_id : string
Unique experiment identifier
resource_url : string
Url for model run instance
"""
self.run_id = run_id
self.experiment_id = experiment_id
self.resource_url = resource_url
@staticmethod
def from_dict(json_obj):
"""Create model run request from a dictionary serialization.
Parameters
----------
json_obj : dict
Json dump for object representing the model run request.
Returns
-------
ModelRunRequest
"""
return ModelRunRequest(
json_obj['run_id'],
json_obj['experiment_id'],
json_obj['href']
)
def to_dict(self):
"""Return dictionary serialization of the run request.
Returns
-------
dict
Dictionary representing the model run request.
"""
return {
'run_id' : self.run_id,
'experiment_id' : self.experiment_id,
'href' : self.resource_url
}
# ------------------------------------------------------------------------------
# Exception
# ------------------------------------------------------------------------------
class EngineException(Exception):
"""Base class for SCO engine exceptions."""
def __init__(self, message, status_code):
"""Initialize error message and status code.
Parameters
----------
message : string
Error message.
status_code : int
Http status code.
"""
Exception.__init__(self)
self.message = message
self.status_code = status_code
def to_dict(self):
"""Dictionary representation of the exception.
Returns
-------
Dictionary
"""
return {'message' : self.message}
# ------------------------------------------------------------------------------
#
# Helper Methods
#
# ------------------------------------------------------------------------------
def init_registry_from_json(mongo, filename, clear_collection=False):
"""Initialize a model registry with a list of model definitions that are
stored in a given file in Json format.
Parameters
----------
mongo : scodata.MongoDBFactory
Connector for MongoDB
filename : string
Path to file containing model definitions
clear_collection : boolean
If true, collection will be dropped before models are created
"""
# Read model definition file (JSON)
with open(filename, 'r') as f:
models = json.load(f)
init_registry(mongo, models, clear_collection)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
SCOEngine.list_models
|
python
|
def list_models(self, limit=-1, offset=-1):
return self.registry.list_models(limit=limit, offset=offset)
|
Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L94-L108
| null |
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
def run_model(self, model_run, run_url):
"""Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
def update_model_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
"""
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
SCOEngine.register_model
|
python
|
def register_model(self, model_id, properties, parameters, outputs, connector):
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
|
Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L110-L148
|
[
"def validate_connector(self, connector):\n \"\"\"Validate a given connector. Raises ValueError if the connector is not\n valid.\n\n Parameters\n ----------\n connector : dict\n Connection information\n \"\"\"\n if not 'connector' in connector:\n raise ValueError('missing connector name')\n elif connector['connector'] != CONNECTOR_RABBITMQ:\n raise ValueError('unknown connector: ' + str(connector['connector']))\n # Call the connector specific validator. Will raise a ValueError if\n # given connector information is invalid\n RabbitMQConnector.validate(connector)\n"
] |
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
"""
return self.registry.list_models(limit=limit, offset=offset)
def run_model(self, model_run, run_url):
"""Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
def update_model_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
"""
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
SCOEngine.run_model
|
python
|
def run_model(self, model_run, run_url):
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
|
Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L150-L170
|
[
"def get_model(self, model_id):\n \"\"\"Get the registered model with the given identifier.\n\n Parameters\n ----------\n model_id : string\n Unique model identifier\n\n Returns\n -------\n ModelHandle\n Handle for requested model or None if no model with given identifier\n exists.\n \"\"\"\n return self.registry.get_model(model_id)\n",
"def run_model(self, model_run, run_url):\n \"\"\"Run model by sending message to RabbitMQ queue containing the\n run end experiment identifier. Messages are persistent to ensure that\n a worker will process process the run request at some point.\n\n Throws a EngineException if communication with the server fails.\n\n Parameters\n ----------\n model_run : ModelRunHandle\n Handle to model run\n run_url : string\n URL for model run information\n \"\"\"\n # Open connection to RabbitMQ server. Will raise an exception if the\n # server is not running. In this case we raise an EngineException to\n # allow caller to delete model run.\n try:\n credentials = pika.PlainCredentials(self.user, self.password)\n con = pika.BlockingConnection(pika.ConnectionParameters(\n host=self.host,\n port=self.port,\n virtual_host=self.virtual_host,\n credentials=credentials\n ))\n channel = con.channel()\n channel.queue_declare(queue=self.queue, durable=True)\n except pika.exceptions.AMQPError as ex:\n err_msg = str(ex)\n if err_msg == '':\n err_msg = 'unable to connect to RabbitMQ: ' + self.user + '@'\n err_msg += self.host + ':' + str(self.port)\n err_msg += self.virtual_host + ' ' + self.queue\n raise EngineException(err_msg, 500)\n # Create model run request\n request = RequestFactory().get_request(model_run, run_url)\n # Send request\n channel.basic_publish(\n exchange='',\n routing_key=self.queue,\n body=json.dumps(request.to_dict()),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n )\n )\n con.close()\n"
] |
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
"""
return self.registry.list_models(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
def update_model_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
"""
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
SCOEngine.update_model_connector
|
python
|
def update_model_connector(self, model_id, connector):
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
|
Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L172-L191
|
[
"def validate_connector(self, connector):\n \"\"\"Validate a given connector. Raises ValueError if the connector is not\n valid.\n\n Parameters\n ----------\n connector : dict\n Connection information\n \"\"\"\n if not 'connector' in connector:\n raise ValueError('missing connector name')\n elif connector['connector'] != CONNECTOR_RABBITMQ:\n raise ValueError('unknown connector: ' + str(connector['connector']))\n # Call the connector specific validator. Will raise a ValueError if\n # given connector information is invalid\n RabbitMQConnector.validate(connector)\n"
] |
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
"""
return self.registry.list_models(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
def run_model(self, model_run, run_url):
"""Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
def validate_connector(self, connector):
"""Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
"""
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
SCOEngine.validate_connector
|
python
|
def validate_connector(self, connector):
if not 'connector' in connector:
raise ValueError('missing connector name')
elif connector['connector'] != CONNECTOR_RABBITMQ:
raise ValueError('unknown connector: ' + str(connector['connector']))
# Call the connector specific validator. Will raise a ValueError if
# given connector information is invalid
RabbitMQConnector.validate(connector)
|
Validate a given connector. Raises ValueError if the connector is not
valid.
Parameters
----------
connector : dict
Connection information
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L214-L229
|
[
"def validate(connector):\n \"\"\"Validate the given connector information. Expects the following\n elements: host, port (int), virtualHost, queue, user, and password.\n\n Raises ValueError if any of the mandatory elements is missing or not of\n expected type.\n \"\"\"\n for key in ['host', 'port', 'virtualHost', 'queue', 'user', 'password']:\n if not key in connector:\n raise ValueError('missing connector information: ' + key)\n # Try to convert the value for'port' to int.\n int(connector['port'])\n"
] |
class SCOEngine(object):
"""SCO workflow engine. Maintains a registry of models and communicates with
backend workers to run predictive models.
"""
def __init__(self, mongo):
"""Initialize the MongoDB collection where models and connector
information is stored.
Parameters
----------
mongo : scodata.MongoDBFactory
MongoDB connector
"""
# Data is bein stored in a collection named 'models'
self.registry = ModelRegistry(mongo)
def delete_model(self, model_id):
"""Delete the model with the given identifier from the model registry.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
handle of deleted model or None if it did not exist.
"""
# Ensure that the existing model is erased so we can re-register a model
# with the same identifier later on.
return self.registry.delete_model(model_id, erase=True)
def get_model(self, model_id):
"""Get the registered model with the given identifier.
Parameters
----------
model_id : string
Unique model identifier
Returns
-------
ModelHandle
Handle for requested model or None if no model with given identifier
exists.
"""
return self.registry.get_model(model_id)
def list_models(self, limit=-1, offset=-1):
"""Get a list of models in the registry.
Parameters
----------
limit : int
Limit number of items in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
list(ModelHandle)
"""
return self.registry.list_models(limit=limit, offset=offset)
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Register a new model with the engine. Expects connection information
for RabbitMQ to submit model run requests to workers.
Raises ValueError if the given model identifier is not unique.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers. Expected
to contain at least the connector name 'connector'.
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to register the model. Will raise
# ValueError if model with given identifier exists. Catch duplicate
# key error to transform it into a ValueError
try:
return self.registry.register_model(
model_id,
properties,
parameters,
outputs,
connector
)
except DuplicateKeyError as ex:
raise ValueError(str(ex))
def run_model(self, model_run, run_url):
"""Execute the given model run.
Throws a ValueError if the given run specifies an unknown model or if
the model connector is invalid. An EngineException is thrown if running
the model (i.e., communication with the backend) fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Get model to verify that it exists and to get connector information
model = self.get_model(model_run.model_id)
if model is None:
raise ValueError('unknown model: ' + model_run.model_id)
# By now there is only one connector. Use the buffered connector to
# avoid closed connection exceptions
RabbitMQConnector(model.connector).run_model(model_run, run_url)
def update_model_connector(self, model_id, connector):
"""Update the connector information for a given model.
Returns None if the specified model not exist.
Parameters
----------
model_id : string
Unique model identifier
connector : dict
New connection information
Returns
-------
ModelHandle
"""
# Validate the given connector information
self.validate_connector(connector)
# Connector information is valid. Ok to update the model.
return self.registry.update_connector(model_id, connector)
def upsert_model_properties(self, model_id, properties):
"""Upsert properties of given model.
Raises ValueError if given property dictionary results in an illegal
operation.
Returns None if the specified model does not exist.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
ModelHandle
"""
return self.registry.upsert_object_property(model_id, properties)
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
RabbitMQConnector.run_model
|
python
|
def run_model(self, model_run, run_url):
# Open connection to RabbitMQ server. Will raise an exception if the
# server is not running. In this case we raise an EngineException to
# allow caller to delete model run.
try:
credentials = pika.PlainCredentials(self.user, self.password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host=self.virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=self.queue, durable=True)
except pika.exceptions.AMQPError as ex:
err_msg = str(ex)
if err_msg == '':
err_msg = 'unable to connect to RabbitMQ: ' + self.user + '@'
err_msg += self.host + ':' + str(self.port)
err_msg += self.virtual_host + ' ' + self.queue
raise EngineException(err_msg, 500)
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Send request
channel.basic_publish(
exchange='',
routing_key=self.queue,
body=json.dumps(request.to_dict()),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
con.close()
|
Run model by sending message to RabbitMQ queue containing the
run end experiment identifier. Messages are persistent to ensure that
a worker will process process the run request at some point.
Throws a EngineException if communication with the server fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L278-L323
|
[
"def get_request(self, model_run, run_url):\n \"\"\"Create request object to run model. Requests are handled by SCO\n worker implementations.\n\n Parameters\n ----------\n model_run : ModelRunHandle\n Handle to model run\n run_url : string\n URL for model run information\n\n Returns\n -------\n ModelRunRequest\n Object representing model run request\n \"\"\"\n return ModelRunRequest(\n model_run.identifier,\n model_run.experiment_id,\n run_url\n )\n",
"def to_dict(self):\n \"\"\"Return dictionary serialization of the run request.\n\n Returns\n -------\n dict\n Dictionary representing the model run request.\n \"\"\"\n return {\n 'run_id' : self.run_id,\n 'experiment_id' : self.experiment_id,\n 'href' : self.resource_url\n }\n"
] |
class RabbitMQConnector(SCOEngineConnector):
"""SCO Workflow Engine client using RabbitMQ. Sends Json messages containing
run identifier (and experiment identifier) to run model.
"""
def __init__(self, connector):
"""Initialize the client by providing host name and queue identifier
for message queue. In addition, requires a HATEOAS reference factory
to generate resource URLs.
Parameters
----------
connector : dict
Connection information for RabbitMQ
"""
# Validate the connector information. Raises ValueError in case of an
# invalid connector.
RabbitMQConnector.validate(connector)
self.host = connector['host']
self.port = connector['port']
self.virtual_host = connector['virtualHost']
self.queue = connector['queue']
self.user = connector['user']
self.password = connector['password']
@staticmethod
def validate(connector):
"""Validate the given connector information. Expects the following
elements: host, port (int), virtualHost, queue, user, and password.
Raises ValueError if any of the mandatory elements is missing or not of
expected type.
"""
for key in ['host', 'port', 'virtualHost', 'queue', 'user', 'password']:
if not key in connector:
raise ValueError('missing connector information: ' + key)
# Try to convert the value for'port' to int.
int(connector['port'])
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
BufferedConnector.run_model
|
python
|
def run_model(self, model_run, run_url):
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Write request and connector information into buffer
self.collection.insert_one({
'connector' : self.connector,
'request' : request.to_dict()
})
|
Create entry in run request buffer.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L357-L373
|
[
"def get_request(self, model_run, run_url):\n \"\"\"Create request object to run model. Requests are handled by SCO\n worker implementations.\n\n Parameters\n ----------\n model_run : ModelRunHandle\n Handle to model run\n run_url : string\n URL for model run information\n\n Returns\n -------\n ModelRunRequest\n Object representing model run request\n \"\"\"\n return ModelRunRequest(\n model_run.identifier,\n model_run.experiment_id,\n run_url\n )\n",
"def to_dict(self):\n \"\"\"Return dictionary serialization of the run request.\n\n Returns\n -------\n dict\n Dictionary representing the model run request.\n \"\"\"\n return {\n 'run_id' : self.run_id,\n 'experiment_id' : self.experiment_id,\n 'href' : self.resource_url\n }\n"
] |
class BufferedConnector(SCOEngineConnector):
"""Connector that writes run request into a MongoDB collection."""
def __init__(self, collection, connector):
"""Initialize the MongoDB collection and the connector.
Parameters
----------
collection : MongoDB Collection
Collection that acts as the run request buffer
connector : dict
Connection information
"""
# Validate the connector information. Raises ValueError in case of an
# invalid connector.
self.collection = collection
self.connector = connector
|
heikomuller/sco-engine
|
scoengine/__init__.py
|
RequestFactory.get_request
|
python
|
def get_request(self, model_run, run_url):
return ModelRunRequest(
model_run.identifier,
model_run.experiment_id,
run_url
)
|
Create request object to run model. Requests are handled by SCO
worker implementations.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
Returns
-------
ModelRunRequest
Object representing model run request
|
train
|
https://github.com/heikomuller/sco-engine/blob/3e7782d059ec808d930f0992794b6f5a8fd73c2c/scoengine/__init__.py#L383-L403
| null |
class RequestFactory(object):
"""Helper class to generate request object for model runs. The requests are
interpreted by different worker implementations to run the predictive model.
"""
|
davisd50/sparc.cache
|
sparc/cache/item.py
|
schema_map
|
python
|
def schema_map(schema):
mapper = {}
for name in getFieldNames(schema):
mapper[name] = name
return mapper
|
Return a valid ICachedItemMapper.map for schema
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/item.py#L14-L19
| null |
import datetime
import inspect
from zope.interface import alsoProvides
from zope.interface import implements
from zope.component import subscribers
from zope.component.interfaces import IFactory
from zope.component.factory import Factory
from zope.schema import getFieldNames
from sparc.cache import ICachableItem, ICachedItem, IAgeableCachedItem, ICachedItemMapper
from sparc.logging import logging
logger = logging.getLogger(__name__)
class CachedItemMapperFactory(object):
implements(IFactory)
title = u"Create object with ICachedItemMapper from ICachableSource, CachedItemFactory"
description = u"Allows for easy ICachedItemMapper generation"
def __call__(self, CachableSource, CachedItemFactory):
item = CachableSource.first()
if not item:
raise ValueError("expected CachableSource to be able to generate at least 1 item.")
for mapper in subscribers((CachableSource,CachedItemFactory,), ICachedItemMapper):
logger.debug("testing mapper/cachedItem combination: %s, %s", str(mapper), str(CachedItemFactory))
if mapper.check(item):
logger.debug("found valid ICachedItemMapper %s", str(mapper))
return mapper
logger.debug("skipping CachedItemMapper %s because item failed mapper validation check", str(mapper))
raise LookupError("unable to find subscribed ICachedItemMapper for given source and factory: %s, %s", str(CachableSource). str(CachedItemFactory))
def getInterfaces(self):
return [ICachedItemMapper]
class cachableItemMixin(object):
"""Base class for ICachableItem implementations
"""
implements(ICachableItem)
def __init__(self, key, attributes):
"""Object initialization
Args:
key: String name of an attributes key that represents the unique identify of the request
attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
"""
self.key = key
self.attributes = attributes
def getId(self):
return self.attributes[self.key]
def validate(self):
if not self.attributes.has_key(self.key):
raise KeyError("expected item's attributes to have entry for key field: %s in keys: %s", self.key, str(self.attributes.keys()))
if not self.attributes[self.key]:
raise ValueError("expected item's key attribute to have a non-empty value")
logger.debug("item passed validation: %s", str(self.getId()))
simpleCachableItemFactory = Factory(cachableItemMixin)
class CachableItemFromSchema(cachableItemMixin):
"""Create a ICachableItem from a zope schema interface
"""
implements(ICachableItem)
def __init__(self, key, schema, object=None):
super(CachableItemFromSchema, self).__init__(key,
{k:None for k in getFieldNames(schema)})
if object:
for name in getFieldNames(schema):
self.attributes[name] = getattr(object, name)
cachableItemFromSchemaFactory = Factory(CachableItemFromSchema)
class cachedItemMixin(object):
"""Base class for ICachedItem implementations
"""
implements(ICachedItem)
_key = 'Must be defined by implementers'
# implementers can place a list of Interfaces here that will used when checking
# equivalence. Otherwise all attributes are checked minus getId() and those
# starting with '_'
_eq_checked_interfaces = []
def getId(self):
return getattr(self, self._key)
def __eq__(self, instance):
attributes = []
if self._eq_checked_interfaces:
for iface in self._eq_checked_interfaces:
for name in iface:
attributes.append(name)
else:
for name, value in inspect.getmembers(self):
if name.startswith("_") or name in ['getId']:
continue
attributes.append(name)
for name in attributes:
if not hasattr(instance, name):
return False
if getattr(self, name) != getattr(instance, name):
return False
return True
def __ne__(self, instance):
return not self.__eq__(instance)
class ageableCacheItemMixin(cachedItemMixin):
"""Base class for IAgeableCachedItem implementations
Implementers can set:
self._birth: [optional]. Python datetime of cache item creation
self._expiration: [do not set if self._expiration_age is set]. Python
datetime of when cache item is no longer valid
self._expiration_age: [do not set if self._expiration is set]. Python
timedelta of maximum item age before it is considered
invalid.
If the parameters above are not set, these defaults will be assigned:
_birth: defaults to now
_expiration|_expiration_age: defaults to datetime.MAXYEAR
"""
implements(IAgeableCachedItem)
def __init__(self):
if not hasattr(self, '_birth'):
self._birth = datetime.datetime.now()
if hasattr(self, '_expiration'):
self._expiration_age = self._expiration - self._birth
elif hasattr(self, '_expiration_age'):
self._expiration = self._birth + self._expiration_age
else:
self._expiration = datetime.datetime(datetime.MAXYEAR, 12, 31)
self._expiration_age = self._expiration - self._birth
def birth(self):
return self._birth
def age(self):
return datetime.datetime.now() - self._birth
def expiration(self):
return self._expiration
def expiration_age(self):
return self._expiration_age
def expired(self):
return datetime.datetime.now > self._expiration
class SimpleItemMapper(object):
"""A simple attribute item mapper
A very simple implementation that will generate on-the-fly ICachedItem
objects with one-to-one mappings to ICachableItem.attributes key/value.
"""
implements(ICachedItemMapper)
def __init__(self, key, CacheableItem, filter=None):
"""Init
Args:
key: String name of CacheableItem.attributes key that should be
considered the unique primary key identifier for the object.
CacheableItem: instance of sparc.cache.ICachableItem whose
attributes have the required keys populated
filter: Callable taking two arguments. The first argument is the
attribute name, the second is the ICachableItem value of
that attribute. The return value should be what will be
assigned to the related attribute on the ICacheItem
"""
self._key = key
self.mapper = {k:k for k in CacheableItem.attributes}
self.filter = filter if filter else lambda x,y:y
#ICachedItemMapper
def key(self):
return self._key
def factory(self):
#base = cachedItemMixin()
#base._key = self.key()
ci = type('SimpleItemMapperCachedItem', (cachedItemMixin,), {key:None for key in self.mapper.keys()})()
ci._key = self.key()
alsoProvides(ci, ICachedItem)
return ci
def get(self, CachableItem):
ci = self.factory()
for name in self.mapper:
setattr(ci, name, self.filter(name, CachableItem.attributes[name]))
return ci
def check(self, CachableItem):
for name in self.mapper:
if name not in CachableItem.attributes:
return False
return True
simpleItemMapperFactory = Factory(SimpleItemMapper)
|
davisd50/sparc.cache
|
sparc/cache/sources/csvdata.py
|
CSVSource.items
|
python
|
def items(self):
for dictreader in self._csv_dictreader_list:
for entry in dictreader:
item = self.factory()
item.key = self.key()
item.attributes = entry
try:
item.validate()
except Exception as e:
logger.debug("skipping entry due to item validation exception: %s", str(e))
continue
logger.debug("found validated item in CSV source, key: %s", str(item.attributes[self.key()]))
yield item
|
Returns a generator of available ICachableItem in the ICachableSource
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/csvdata.py#L75-L89
|
[
"def key(self):\n \"\"\"Returns string identifier key that marks unique item entries (e.g. primary key field name)\"\"\"\n return self._key if self._key else self.factory().key\n"
] |
class CSVSource(object):
implements(ICachableSource)
def __init__(self, source, factory, key = None):
"""Initialize the CSV data source
The CSV data source, where the first data row in the represented file
contains the field headers (names).
Args:
source: This can be a String, a csv.DictReader, or a generic object
that supports the iterator protocol (see csv.reader). If
this is a String, it should point to either a CSV file,
or a directory containing CSV files. If it is a
object supporting the iterator protocol, each call to next()
should return a valid csv line.
factory: A callable that implements zope.component.factory.IFactory
that generates instances of ICachableItem.
key: String name of CSV header field that acts as the unique key for each
CSV item entry (i.e. the primary key field)
Raises:
ValueError: if string source parameter does not point a referencable file or directory
"""
# TODO: This class current has more methods than ICachableSource. We either
# need to update the interface, or create a new one for the extra methods
self._key = key
self.source = source
self.factory = factory
self._files = list()
self._csv_dictreader_list = list()
if isinstance(source, str):
if os.path.isfile(source):
_file = open(source,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
elif os.path.isdir(source):
for _entry in os.listdir(source):
_file = open(_entry,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
else:
raise ValueError("expected string source parameter to reference a valid file or directory: " + str(source))
elif isinstance(source, DictReader):
self._csv_dictreader_list.append(source)
else:
self._csv_dictreader_list.append(DictReader(source))
def __del__(self):
"""Object cleanup
This will close all open file handles held by the object.
"""
for f in self._files:
f.close()
def key(self):
"""Returns string identifier key that marks unique item entries (e.g. primary key field name)"""
return self._key if self._key else self.factory().key
def getById(self, Id):
"""Returns ICachableItem that matches id
Args:
id: String that identifies the item to return whose key matches
"""
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
for item in csvsource.items():
if Id == item.getId():
return item
except StopIteration:
return None
def first(self):
"""Returns the first ICachableItem in the ICachableSource"""
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
item = csvsource.items().next()
return item
except StopIteration:
return None
|
davisd50/sparc.cache
|
sparc/cache/sources/csvdata.py
|
CSVSource.getById
|
python
|
def getById(self, Id):
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
for item in csvsource.items():
if Id == item.getId():
return item
except StopIteration:
return None
|
Returns ICachableItem that matches id
Args:
id: String that identifies the item to return whose key matches
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/csvdata.py#L91-L104
|
[
"def key(self):\n \"\"\"Returns string identifier key that marks unique item entries (e.g. primary key field name)\"\"\"\n return self._key if self._key else self.factory().key\n",
"def items(self):\n \"\"\"Returns a generator of available ICachableItem in the ICachableSource\n \"\"\"\n for dictreader in self._csv_dictreader_list:\n for entry in dictreader:\n item = self.factory()\n item.key = self.key()\n item.attributes = entry\n try:\n item.validate()\n except Exception as e:\n logger.debug(\"skipping entry due to item validation exception: %s\", str(e))\n continue\n logger.debug(\"found validated item in CSV source, key: %s\", str(item.attributes[self.key()]))\n yield item\n"
] |
class CSVSource(object):
implements(ICachableSource)
def __init__(self, source, factory, key = None):
"""Initialize the CSV data source
The CSV data source, where the first data row in the represented file
contains the field headers (names).
Args:
source: This can be a String, a csv.DictReader, or a generic object
that supports the iterator protocol (see csv.reader). If
this is a String, it should point to either a CSV file,
or a directory containing CSV files. If it is a
object supporting the iterator protocol, each call to next()
should return a valid csv line.
factory: A callable that implements zope.component.factory.IFactory
that generates instances of ICachableItem.
key: String name of CSV header field that acts as the unique key for each
CSV item entry (i.e. the primary key field)
Raises:
ValueError: if string source parameter does not point a referencable file or directory
"""
# TODO: This class current has more methods than ICachableSource. We either
# need to update the interface, or create a new one for the extra methods
self._key = key
self.source = source
self.factory = factory
self._files = list()
self._csv_dictreader_list = list()
if isinstance(source, str):
if os.path.isfile(source):
_file = open(source,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
elif os.path.isdir(source):
for _entry in os.listdir(source):
_file = open(_entry,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
else:
raise ValueError("expected string source parameter to reference a valid file or directory: " + str(source))
elif isinstance(source, DictReader):
self._csv_dictreader_list.append(source)
else:
self._csv_dictreader_list.append(DictReader(source))
def __del__(self):
"""Object cleanup
This will close all open file handles held by the object.
"""
for f in self._files:
f.close()
def key(self):
"""Returns string identifier key that marks unique item entries (e.g. primary key field name)"""
return self._key if self._key else self.factory().key
def items(self):
"""Returns a generator of available ICachableItem in the ICachableSource
"""
for dictreader in self._csv_dictreader_list:
for entry in dictreader:
item = self.factory()
item.key = self.key()
item.attributes = entry
try:
item.validate()
except Exception as e:
logger.debug("skipping entry due to item validation exception: %s", str(e))
continue
logger.debug("found validated item in CSV source, key: %s", str(item.attributes[self.key()]))
yield item
def getById(self, Id):
"""Returns ICachableItem that matches id
Args:
id: String that identifies the item to return whose key matches
"""
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
for item in csvsource.items():
if Id == item.getId():
return item
except StopIteration:
return None
def first(self):
"""Returns the first ICachableItem in the ICachableSource"""
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
item = csvsource.items().next()
return item
except StopIteration:
return None
|
davisd50/sparc.cache
|
sparc/cache/sources/csvdata.py
|
CSVSource.first
|
python
|
def first(self):
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
item = csvsource.items().next()
return item
except StopIteration:
return None
|
Returns the first ICachableItem in the ICachableSource
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/csvdata.py#L106-L114
|
[
"def key(self):\n \"\"\"Returns string identifier key that marks unique item entries (e.g. primary key field name)\"\"\"\n return self._key if self._key else self.factory().key\n",
"def items(self):\n \"\"\"Returns a generator of available ICachableItem in the ICachableSource\n \"\"\"\n for dictreader in self._csv_dictreader_list:\n for entry in dictreader:\n item = self.factory()\n item.key = self.key()\n item.attributes = entry\n try:\n item.validate()\n except Exception as e:\n logger.debug(\"skipping entry due to item validation exception: %s\", str(e))\n continue\n logger.debug(\"found validated item in CSV source, key: %s\", str(item.attributes[self.key()]))\n yield item\n"
] |
class CSVSource(object):
implements(ICachableSource)
def __init__(self, source, factory, key = None):
"""Initialize the CSV data source
The CSV data source, where the first data row in the represented file
contains the field headers (names).
Args:
source: This can be a String, a csv.DictReader, or a generic object
that supports the iterator protocol (see csv.reader). If
this is a String, it should point to either a CSV file,
or a directory containing CSV files. If it is a
object supporting the iterator protocol, each call to next()
should return a valid csv line.
factory: A callable that implements zope.component.factory.IFactory
that generates instances of ICachableItem.
key: String name of CSV header field that acts as the unique key for each
CSV item entry (i.e. the primary key field)
Raises:
ValueError: if string source parameter does not point a referencable file or directory
"""
# TODO: This class current has more methods than ICachableSource. We either
# need to update the interface, or create a new one for the extra methods
self._key = key
self.source = source
self.factory = factory
self._files = list()
self._csv_dictreader_list = list()
if isinstance(source, str):
if os.path.isfile(source):
_file = open(source,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
elif os.path.isdir(source):
for _entry in os.listdir(source):
_file = open(_entry,'rb')
self._files.append(_file)
self._csv_dictreader_list.append(DictReader(_file))
else:
raise ValueError("expected string source parameter to reference a valid file or directory: " + str(source))
elif isinstance(source, DictReader):
self._csv_dictreader_list.append(source)
else:
self._csv_dictreader_list.append(DictReader(source))
def __del__(self):
"""Object cleanup
This will close all open file handles held by the object.
"""
for f in self._files:
f.close()
def key(self):
"""Returns string identifier key that marks unique item entries (e.g. primary key field name)"""
return self._key if self._key else self.factory().key
def items(self):
"""Returns a generator of available ICachableItem in the ICachableSource
"""
for dictreader in self._csv_dictreader_list:
for entry in dictreader:
item = self.factory()
item.key = self.key()
item.attributes = entry
try:
item.validate()
except Exception as e:
logger.debug("skipping entry due to item validation exception: %s", str(e))
continue
logger.debug("found validated item in CSV source, key: %s", str(item.attributes[self.key()]))
yield item
def getById(self, Id):
"""Returns ICachableItem that matches id
Args:
id: String that identifies the item to return whose key matches
"""
# we need to create a new object to insure we don't corrupt the generator count
csvsource = CSVSource(self.source, self.factory, self.key())
try:
for item in csvsource.items():
if Id == item.getId():
return item
except StopIteration:
return None
|
davisd50/sparc.cache
|
sparc/cache/sql/sql.py
|
SqlObjectCacheArea.get
|
python
|
def get(self, CachableItem):
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
|
Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L124-L135
| null |
class SqlObjectCacheArea(object):
"""Adapter implementation for cachable storage into a SQLAlchemy DB backend
You MUST indicate that your SQL Alchemy Session objects provide the
related marker interfaces prior to calling this adapter (see usage example
in sql.txt)
Interface implementation requirements:
To use this class, several class dependencies must be met. The following
break-down should help you better understand the Interface dependencies
- ICacheArea (this class)
- ISqlAlchemySession (marker interface, applied to SQLAlchemy session object)
- ICachedItemMapper
- ICachableSource
- ICachedItem (indirect...required for __init__)
- ICachableItem (needed via method calls)
"""
implements(ITransactionalCacheArea)
adapts(ISqlAlchemyDeclarativeBase, ISqlAlchemySession, ICachedItemMapper)
def __init__(self, SqlAlchemyDeclarativeBase, SqlAlchemySession, CachedItemMapper):
"""Object initialization
"""
self.Base = SqlAlchemyDeclarativeBase
self.session = SqlAlchemySession
self.mapper = CachedItemMapper
if not isinstance(SqlAlchemySession, Session):
raise TypeError("expected SQLAlchmey_session to be an instance of:"
+ " sqlalchemy.orm.Session")
assert SqlAlchemySession.bind, "expected SQLAlchmey_session to be "\
+ "bound to Engine"
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
"""
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: True if CachableItem requires a cache update
"""
# we'll create a new ICachedItem from the current data and compare it to
# ICachedItem we get from the DB
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates cache area with latest information
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all available entries in ICachableSource"""
_count = 0
for item in CachableSource.items():
if self.cache(item):
_count += 1
return _count
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def reset(self):
"""Deletes all entries in the cache area"""
self.Base.metadata.drop_all(self.session.bind)
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
self.Base.metadata.create_all(self.session.bind)
logger.debug("initialized sqlalchemy orm tables")
|
davisd50/sparc.cache
|
sparc/cache/sql/sql.py
|
SqlObjectCacheArea.cache
|
python
|
def cache(self, CachableItem):
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False
|
Updates cache area with latest information
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L153-L170
|
[
"def get(self, CachableItem):\n \"\"\"Returns current ICachedItem for ICachableItem\n\n Args:\n CachableItem: ICachableItem, used as a reference to find a cached version\n\n Returns: ICachedItem or None, if CachableItem has not been cached\n \"\"\"\n return self.session.\\\n query(self.mapper.factory().__class__).\\\n filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\\\n first()\n"
] |
class SqlObjectCacheArea(object):
"""Adapter implementation for cachable storage into a SQLAlchemy DB backend
You MUST indicate that your SQL Alchemy Session objects provide the
related marker interfaces prior to calling this adapter (see usage example
in sql.txt)
Interface implementation requirements:
To use this class, several class dependencies must be met. The following
break-down should help you better understand the Interface dependencies
- ICacheArea (this class)
- ISqlAlchemySession (marker interface, applied to SQLAlchemy session object)
- ICachedItemMapper
- ICachableSource
- ICachedItem (indirect...required for __init__)
- ICachableItem (needed via method calls)
"""
implements(ITransactionalCacheArea)
adapts(ISqlAlchemyDeclarativeBase, ISqlAlchemySession, ICachedItemMapper)
def __init__(self, SqlAlchemyDeclarativeBase, SqlAlchemySession, CachedItemMapper):
"""Object initialization
"""
self.Base = SqlAlchemyDeclarativeBase
self.session = SqlAlchemySession
self.mapper = CachedItemMapper
if not isinstance(SqlAlchemySession, Session):
raise TypeError("expected SQLAlchmey_session to be an instance of:"
+ " sqlalchemy.orm.Session")
assert SqlAlchemySession.bind, "expected SQLAlchmey_session to be "\
+ "bound to Engine"
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
"""
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: True if CachableItem requires a cache update
"""
# we'll create a new ICachedItem from the current data and compare it to
# ICachedItem we get from the DB
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all available entries in ICachableSource"""
_count = 0
for item in CachableSource.items():
if self.cache(item):
_count += 1
return _count
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def reset(self):
"""Deletes all entries in the cache area"""
self.Base.metadata.drop_all(self.session.bind)
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
self.Base.metadata.create_all(self.session.bind)
logger.debug("initialized sqlalchemy orm tables")
|
davisd50/sparc.cache
|
sparc/cache/sql/sql.py
|
SqlObjectCacheArea.import_source
|
python
|
def import_source(self, CachableSource):
_count = 0
for item in CachableSource.items():
if self.cache(item):
_count += 1
return _count
|
Updates cache area and returns number of items updated with all available entries in ICachableSource
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L172-L178
|
[
"def cache(self, CachableItem):\n \"\"\"Updates cache area with latest information\n \"\"\"\n _cachedItem = self.get(CachableItem)\n if not _cachedItem:\n _dirtyCachedItem = self.mapper.get(CachableItem)\n logger.debug(\"new cachable item added to sql cache area {id: %s, type: %s}\", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))\n cached_item = self.session.merge(_dirtyCachedItem)\n notify(CacheObjectCreatedEvent(cached_item, self))\n return cached_item\n else:\n _newCacheItem = self.mapper.get(CachableItem)\n if _cachedItem != _newCacheItem:\n logger.debug(\"Cachable item modified in sql cache area {id: %s, type: %s}\", str(_newCacheItem.getId()), str(_newCacheItem.__class__))\n cached_item = self.session.merge(_newCacheItem)\n notify(CacheObjectModifiedEvent(cached_item, self))\n return cached_item\n return False\n"
] |
class SqlObjectCacheArea(object):
"""Adapter implementation for cachable storage into a SQLAlchemy DB backend
You MUST indicate that your SQL Alchemy Session objects provide the
related marker interfaces prior to calling this adapter (see usage example
in sql.txt)
Interface implementation requirements:
To use this class, several class dependencies must be met. The following
break-down should help you better understand the Interface dependencies
- ICacheArea (this class)
- ISqlAlchemySession (marker interface, applied to SQLAlchemy session object)
- ICachedItemMapper
- ICachableSource
- ICachedItem (indirect...required for __init__)
- ICachableItem (needed via method calls)
"""
implements(ITransactionalCacheArea)
adapts(ISqlAlchemyDeclarativeBase, ISqlAlchemySession, ICachedItemMapper)
def __init__(self, SqlAlchemyDeclarativeBase, SqlAlchemySession, CachedItemMapper):
"""Object initialization
"""
self.Base = SqlAlchemyDeclarativeBase
self.session = SqlAlchemySession
self.mapper = CachedItemMapper
if not isinstance(SqlAlchemySession, Session):
raise TypeError("expected SQLAlchmey_session to be an instance of:"
+ " sqlalchemy.orm.Session")
assert SqlAlchemySession.bind, "expected SQLAlchmey_session to be "\
+ "bound to Engine"
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
"""
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: True if CachableItem requires a cache update
"""
# we'll create a new ICachedItem from the current data and compare it to
# ICachedItem we get from the DB
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates cache area with latest information
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def reset(self):
"""Deletes all entries in the cache area"""
self.Base.metadata.drop_all(self.session.bind)
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
self.Base.metadata.create_all(self.session.bind)
logger.debug("initialized sqlalchemy orm tables")
|
davisd50/sparc.cache
|
sparc/cache/sql/sql.py
|
SqlObjectCacheArea.reset
|
python
|
def reset(self):
self.Base.metadata.drop_all(self.session.bind)
self.initialize()
|
Deletes all entries in the cache area
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L186-L189
|
[
"def initialize(self):\n \"\"\"Instantiates the cache area to be ready for updates\"\"\"\n self.Base.metadata.create_all(self.session.bind)\n logger.debug(\"initialized sqlalchemy orm tables\")\n"
] |
class SqlObjectCacheArea(object):
"""Adapter implementation for cachable storage into a SQLAlchemy DB backend
You MUST indicate that your SQL Alchemy Session objects provide the
related marker interfaces prior to calling this adapter (see usage example
in sql.txt)
Interface implementation requirements:
To use this class, several class dependencies must be met. The following
break-down should help you better understand the Interface dependencies
- ICacheArea (this class)
- ISqlAlchemySession (marker interface, applied to SQLAlchemy session object)
- ICachedItemMapper
- ICachableSource
- ICachedItem (indirect...required for __init__)
- ICachableItem (needed via method calls)
"""
implements(ITransactionalCacheArea)
adapts(ISqlAlchemyDeclarativeBase, ISqlAlchemySession, ICachedItemMapper)
def __init__(self, SqlAlchemyDeclarativeBase, SqlAlchemySession, CachedItemMapper):
"""Object initialization
"""
self.Base = SqlAlchemyDeclarativeBase
self.session = SqlAlchemySession
self.mapper = CachedItemMapper
if not isinstance(SqlAlchemySession, Session):
raise TypeError("expected SQLAlchmey_session to be an instance of:"
+ " sqlalchemy.orm.Session")
assert SqlAlchemySession.bind, "expected SQLAlchmey_session to be "\
+ "bound to Engine"
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
"""
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: True if CachableItem requires a cache update
"""
# we'll create a new ICachedItem from the current data and compare it to
# ICachedItem we get from the DB
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates cache area with latest information
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all available entries in ICachableSource"""
_count = 0
for item in CachableSource.items():
if self.cache(item):
_count += 1
return _count
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
self.Base.metadata.create_all(self.session.bind)
logger.debug("initialized sqlalchemy orm tables")
|
davisd50/sparc.cache
|
sparc/cache/sql/sql.py
|
SqlObjectCacheArea.initialize
|
python
|
def initialize(self):
self.Base.metadata.create_all(self.session.bind)
logger.debug("initialized sqlalchemy orm tables")
|
Instantiates the cache area to be ready for updates
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sql/sql.py#L191-L194
| null |
class SqlObjectCacheArea(object):
"""Adapter implementation for cachable storage into a SQLAlchemy DB backend
You MUST indicate that your SQL Alchemy Session objects provide the
related marker interfaces prior to calling this adapter (see usage example
in sql.txt)
Interface implementation requirements:
To use this class, several class dependencies must be met. The following
break-down should help you better understand the Interface dependencies
- ICacheArea (this class)
- ISqlAlchemySession (marker interface, applied to SQLAlchemy session object)
- ICachedItemMapper
- ICachableSource
- ICachedItem (indirect...required for __init__)
- ICachableItem (needed via method calls)
"""
implements(ITransactionalCacheArea)
adapts(ISqlAlchemyDeclarativeBase, ISqlAlchemySession, ICachedItemMapper)
def __init__(self, SqlAlchemyDeclarativeBase, SqlAlchemySession, CachedItemMapper):
"""Object initialization
"""
self.Base = SqlAlchemyDeclarativeBase
self.session = SqlAlchemySession
self.mapper = CachedItemMapper
if not isinstance(SqlAlchemySession, Session):
raise TypeError("expected SQLAlchmey_session to be an instance of:"
+ " sqlalchemy.orm.Session")
assert SqlAlchemySession.bind, "expected SQLAlchmey_session to be "\
+ "bound to Engine"
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: ICachedItem or None, if CachableItem has not been cached
"""
return self.session.\
query(self.mapper.factory().__class__).\
filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\
first()
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem
Args:
CachableItem: ICachableItem, used as a reference to find a cached version
Returns: True if CachableItem requires a cache update
"""
# we'll create a new ICachedItem from the current data and compare it to
# ICachedItem we get from the DB
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates cache area with latest information
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_dirtyCachedItem = self.mapper.get(CachableItem)
logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__))
cached_item = self.session.merge(_dirtyCachedItem)
notify(CacheObjectCreatedEvent(cached_item, self))
return cached_item
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
cached_item = self.session.merge(_newCacheItem)
notify(CacheObjectModifiedEvent(cached_item, self))
return cached_item
return False
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all available entries in ICachableSource"""
_count = 0
for item in CachableSource.items():
if self.cache(item):
_count += 1
return _count
def commit(self):
self.session.commit()
def rollback(self):
self.session.rollback()
def reset(self):
"""Deletes all entries in the cache area"""
self.Base.metadata.drop_all(self.session.bind)
self.initialize()
|
davisd50/sparc.cache
|
sparc/cache/sources/normalize.py
|
normalizedFieldNameCachableItemMixin.normalize
|
python
|
def normalize(cls, name):
name = name.lower() # lower-case
for _replace in [' ','-','(',')','?']:
name = name.replace(_replace,'')
return name
|
Return string in all lower case with spaces and question marks removed
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/normalize.py#L67-L72
| null |
class normalizedFieldNameCachableItemMixin(cachableItemMixin):
"""Base class for ICachableItem implementations for data requiring normalized field names.
This class provides extra functionality to deal with minor differences
in attribute names when different variations of the string should be
considered equal. The following strings should be considered equal:
- Entry #
- ENTRY #
- Entry#
"""
def __init__(self, key, attributes):
"""Object initialization
Args:
key: String name of an attributes key that represents the unique identify of the request
attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
"""
self._attributes_normalized = {}
self._set_attributes(attributes if attributes else {})
self._key_normalized = ''
self._set_key(key)
def _set_attributes(self, attributes):
self._attributes_raw = attributes
for key, value in attributes.iteritems():
self._attributes_normalized[self.normalize(key)] = value
def _get_attributes(self):
return self._attributes_normalized
def _set_key(self, key):
self._key_raw = key
self._key_normalized = self.normalize(key)
def _get_key(self):
return self._key_normalized
@classmethod
attributes = property(_get_attributes, _set_attributes)
key = property(_get_key, _set_key)
|
davisd50/sparc.cache
|
sparc/cache/sources/normalize.py
|
normalizedDateTimeResolver.manage
|
python
|
def manage(self, dateTimeString):
dateTime = None
dateTimeString = dateTimeString.replace('-', '/')
_date_time_split = dateTimeString.split(' ') # [0] = date, [1] = time (if exists)
_date = _date_time_split[0]
_time = '00:00:00' # default
if len(_date_time_split) > 1:
_time = _date_time_split[1]
if dateTimeString.find('/') == 4: # YYYY/MM/DD...
dateList = _date.split('/') + _time.split(':')
dateTime = datetime(*map(lambda x: int(x), dateList))
elif 1 <= dateTimeString.find('/') <= 2: # MM/DD/YYYY or M/D?/YYYY
_date_split = _date.split('/')
dateList = [_date_split[2], _date_split[0], _date_split[1]] + _time.split(':')
dateTime = datetime(*map(lambda x: int(x), dateList))
if not dateTime:
raise ValueError("unable to manage unsupported string format: %s"%(dateTimeString))
return dateTime
|
Return a Python datetime object based on the dateTimeString
This will handle date times in the following formats:
YYYY/MM/DD HH:MM:SS
2014/11/05 21:47:28
2014/11/5 21:47:28
11/05/2014
11/5/2014
11/05/2014 16:28:00
11/05/2014 16:28
11/5/2014 16:28:00
11/5/2014 16:28
It can also handle these formats when using a - instead of a / for a
date separator.
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/normalize.py#L94-L129
| null |
class normalizedDateTimeResolver(object):
implements(IManagedCachedItemMapperAttribute)
adapts(INormalizedDateTime)
def __init__(self, context):
self.context = context
def manage(self, dateTimeString):
"""Return a Python datetime object based on the dateTimeString
This will handle date times in the following formats:
YYYY/MM/DD HH:MM:SS
2014/11/05 21:47:28
2014/11/5 21:47:28
11/05/2014
11/5/2014
11/05/2014 16:28:00
11/05/2014 16:28
11/5/2014 16:28:00
11/5/2014 16:28
It can also handle these formats when using a - instead of a / for a
date separator.
"""
dateTime = None
dateTimeString = dateTimeString.replace('-', '/')
_date_time_split = dateTimeString.split(' ') # [0] = date, [1] = time (if exists)
_date = _date_time_split[0]
_time = '00:00:00' # default
if len(_date_time_split) > 1:
_time = _date_time_split[1]
if dateTimeString.find('/') == 4: # YYYY/MM/DD...
dateList = _date.split('/') + _time.split(':')
dateTime = datetime(*map(lambda x: int(x), dateList))
elif 1 <= dateTimeString.find('/') <= 2: # MM/DD/YYYY or M/D?/YYYY
_date_split = _date.split('/')
dateList = [_date_split[2], _date_split[0], _date_split[1]] + _time.split(':')
dateTime = datetime(*map(lambda x: int(x), dateList))
if not dateTime:
raise ValueError("unable to manage unsupported string format: %s"%(dateTimeString))
return dateTime
|
davisd50/sparc.cache
|
sparc/cache/splunk/area.py
|
CacheAreaForSplunkKV.current_kv_names
|
python
|
def current_kv_names(self):
return current_kv_names(self.sci, self.username, self.appname, request=self._request)
|
Return set of string names of current available Splunk KV collections
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L51-L53
| null |
class CacheAreaForSplunkKV(object):
"""An area where cached information can be stored persistently."""
implements(ITrimmableCacheArea)
adapts(sparc.cache.ICachedItemMapper,
sparc.db.splunk.ISplunkKVCollectionSchema,
sparc.db.splunk.ISplunkConnectionInfo,
sparc.db.splunk.ISPlunkKVCollectionIdentifier,
sparc.utils.requests.IRequest)
def __init__(self, mapper, schema, sci, kv_id, request):
"""Object initializer
Args:
mapper: Object providing sparc.cache.ICachedItemMapper that will
convert ICachableItem instances into ICachedItem instances.
schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema.
sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide
connection information for Splunk indexing server
kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier
request: Object providing sparc.utils.requests.IRequest
"""
self.gooble_request_warnings = False
self.mapper = mapper
self.schema = schema
self.sci = sci
self.kv_id = kv_id
self._request = request
self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],)
self.collname = kv_id.collection
self.appname = kv_id.application
self.username = kv_id.username
self.url = "".join(['https://',sci['host'],':',sci['port'],
'/servicesNS/',self.username,'/',
self.appname,'/'])
def request(self, *args, **kwargs):
return self._request.request(*args, **kwargs)
def _data(self, CachedItem):
data = {k:getattr(CachedItem, k) for k in self.mapper.mapper}
data['_key'] = CachedItem.getId()
return data
def _add(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _update(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(),
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _delete(self, id_):
if not id_:
raise ValueError("Expected valid id for deletion")
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname+'/'+str(id_))
r.raise_for_status()
def _all_ids(self):
r = self.request('get',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
params={'output_type': 'json', 'fields':'id'})
r.raise_for_status()
data = set(map(lambda d: str(d['id']), r.json()))
return data
#ICacheArea
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem or None if not cached"""
cached_item = self.mapper.get(CachableItem)
r = self.request('get',
self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(),
data={'output_mode': 'json'})
if r.ok:
# we need to update the object with the values found in the cache area
data = r.json()
for name in self.mapper.mapper:
setattr(cached_item, name, data[name])
return cached_item
return None
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates caches area with latest item information returning
ICachedItem if cache updates were required.
Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for
ICacheArea/ICachableItem combo.
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_cachedItem = self.mapper.get(CachableItem)
self._add(_cachedItem)
logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__))
notify(CacheObjectCreatedEvent(_cachedItem, self))
return _cachedItem
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
self._update(_newCacheItem)
notify(CacheObjectModifiedEvent(_newCacheItem, self))
return _newCacheItem
return None
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all
available entries in ICachableSource
"""
_count = 0
self._import_source_items_id_list = set() # used to help speed up trim()
for item in CachableSource.items():
self._import_source_items_id_list.add(item.getId())
if self.cache(item):
_count += 1
return _count
def reset(self):
"""Deletes all entries in the cache area"""
if self.collname not in self.current_kv_names():
return # nothing to do
# we'll simply delete the entire collection and then re-create it.
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname)
r.raise_for_status()
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
if self.collname not in self.current_kv_names():
r = self.request('post',
self.url+"storage/collections/config",
headers={'content-type': 'application/json'},
data={'name': self.collname})
r.raise_for_status()
# initialize schema
re = self.request('post',
self.url+"storage/collections/config/"+self.collname,
headers = {'content-type': 'application/json'},
data=self.schema)
re.raise_for_status()
logger.info("initialized Splunk Key Value Collection %s with schema %s"\
% (self.collname, str(self.schema)))
if self.collname not in self.current_kv_names():
raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))
#ITrimmableCacheArea
def trim(self, source):
if not ICachableSource.providedBy(source):
#we'll fake a partial ICachableSource for use with import_source()
source_type = type('FakeCachableSource', (object,), {})
_source = source #re-assign due to closure issue with source re-assignment below
source_type.items = lambda self: _source
source = source_type()
updated = self.import_source(source)
diff = self._all_ids() - self._import_source_items_id_list
map(self._delete, diff)
return (updated, len(diff), )
|
davisd50/sparc.cache
|
sparc/cache/splunk/area.py
|
CacheAreaForSplunkKV.get
|
python
|
def get(self, CachableItem):
cached_item = self.mapper.get(CachableItem)
r = self.request('get',
self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(),
data={'output_mode': 'json'})
if r.ok:
# we need to update the object with the values found in the cache area
data = r.json()
for name in self.mapper.mapper:
setattr(cached_item, name, data[name])
return cached_item
return None
|
Returns current ICachedItem for ICachableItem or None if not cached
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L94-L106
|
[
"def request(self, *args, **kwargs):\n return self._request.request(*args, **kwargs)\n"
] |
class CacheAreaForSplunkKV(object):
"""An area where cached information can be stored persistently."""
implements(ITrimmableCacheArea)
adapts(sparc.cache.ICachedItemMapper,
sparc.db.splunk.ISplunkKVCollectionSchema,
sparc.db.splunk.ISplunkConnectionInfo,
sparc.db.splunk.ISPlunkKVCollectionIdentifier,
sparc.utils.requests.IRequest)
def __init__(self, mapper, schema, sci, kv_id, request):
"""Object initializer
Args:
mapper: Object providing sparc.cache.ICachedItemMapper that will
convert ICachableItem instances into ICachedItem instances.
schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema.
sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide
connection information for Splunk indexing server
kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier
request: Object providing sparc.utils.requests.IRequest
"""
self.gooble_request_warnings = False
self.mapper = mapper
self.schema = schema
self.sci = sci
self.kv_id = kv_id
self._request = request
self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],)
self.collname = kv_id.collection
self.appname = kv_id.application
self.username = kv_id.username
self.url = "".join(['https://',sci['host'],':',sci['port'],
'/servicesNS/',self.username,'/',
self.appname,'/'])
def current_kv_names(self):
"""Return set of string names of current available Splunk KV collections"""
return current_kv_names(self.sci, self.username, self.appname, request=self._request)
def request(self, *args, **kwargs):
return self._request.request(*args, **kwargs)
def _data(self, CachedItem):
data = {k:getattr(CachedItem, k) for k in self.mapper.mapper}
data['_key'] = CachedItem.getId()
return data
def _add(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _update(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(),
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _delete(self, id_):
if not id_:
raise ValueError("Expected valid id for deletion")
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname+'/'+str(id_))
r.raise_for_status()
def _all_ids(self):
r = self.request('get',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
params={'output_type': 'json', 'fields':'id'})
r.raise_for_status()
data = set(map(lambda d: str(d['id']), r.json()))
return data
#ICacheArea
def isDirty(self, CachableItem):
"""True if cached information requires update for ICachableItem"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
def cache(self, CachableItem):
"""Updates caches area with latest item information returning
ICachedItem if cache updates were required.
Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for
ICacheArea/ICachableItem combo.
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_cachedItem = self.mapper.get(CachableItem)
self._add(_cachedItem)
logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__))
notify(CacheObjectCreatedEvent(_cachedItem, self))
return _cachedItem
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
self._update(_newCacheItem)
notify(CacheObjectModifiedEvent(_newCacheItem, self))
return _newCacheItem
return None
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all
available entries in ICachableSource
"""
_count = 0
self._import_source_items_id_list = set() # used to help speed up trim()
for item in CachableSource.items():
self._import_source_items_id_list.add(item.getId())
if self.cache(item):
_count += 1
return _count
def reset(self):
"""Deletes all entries in the cache area"""
if self.collname not in self.current_kv_names():
return # nothing to do
# we'll simply delete the entire collection and then re-create it.
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname)
r.raise_for_status()
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
if self.collname not in self.current_kv_names():
r = self.request('post',
self.url+"storage/collections/config",
headers={'content-type': 'application/json'},
data={'name': self.collname})
r.raise_for_status()
# initialize schema
re = self.request('post',
self.url+"storage/collections/config/"+self.collname,
headers = {'content-type': 'application/json'},
data=self.schema)
re.raise_for_status()
logger.info("initialized Splunk Key Value Collection %s with schema %s"\
% (self.collname, str(self.schema)))
if self.collname not in self.current_kv_names():
raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))
#ITrimmableCacheArea
def trim(self, source):
if not ICachableSource.providedBy(source):
#we'll fake a partial ICachableSource for use with import_source()
source_type = type('FakeCachableSource', (object,), {})
_source = source #re-assign due to closure issue with source re-assignment below
source_type.items = lambda self: _source
source = source_type()
updated = self.import_source(source)
diff = self._all_ids() - self._import_source_items_id_list
map(self._delete, diff)
return (updated, len(diff), )
|
davisd50/sparc.cache
|
sparc/cache/splunk/area.py
|
CacheAreaForSplunkKV.isDirty
|
python
|
def isDirty(self, CachableItem):
_cachedItem = self.get(CachableItem)
if not _cachedItem:
return True
_newCacheItem = self.mapper.get(CachableItem)
return False if _cachedItem == _newCacheItem else True
|
True if cached information requires update for ICachableItem
|
train
|
https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/splunk/area.py#L109-L115
|
[
"def get(self, CachableItem):\n \"\"\"Returns current ICachedItem for ICachableItem or None if not cached\"\"\"\n cached_item = self.mapper.get(CachableItem)\n r = self.request('get',\n self.url+\"storage/collections/data/\"+self.collname+'/'+cached_item.getId(),\n data={'output_mode': 'json'})\n if r.ok:\n # we need to update the object with the values found in the cache area\n data = r.json()\n for name in self.mapper.mapper:\n setattr(cached_item, name, data[name])\n return cached_item\n return None\n"
] |
class CacheAreaForSplunkKV(object):
"""An area where cached information can be stored persistently."""
implements(ITrimmableCacheArea)
adapts(sparc.cache.ICachedItemMapper,
sparc.db.splunk.ISplunkKVCollectionSchema,
sparc.db.splunk.ISplunkConnectionInfo,
sparc.db.splunk.ISPlunkKVCollectionIdentifier,
sparc.utils.requests.IRequest)
def __init__(self, mapper, schema, sci, kv_id, request):
"""Object initializer
Args:
mapper: Object providing sparc.cache.ICachedItemMapper that will
convert ICachableItem instances into ICachedItem instances.
schema: Object providing sparc.db.splunk.ISplunkKVCollectionSchema.
sci: sparc.db.splunk.ISplunkConnectionInfo instance to provide
connection information for Splunk indexing server
kv_id: Object providing sparc.db.splunk.ISPlunkKVCollectionIdentifier
request: Object providing sparc.utils.requests.IRequest
"""
self.gooble_request_warnings = False
self.mapper = mapper
self.schema = schema
self.sci = sci
self.kv_id = kv_id
self._request = request
self._request.req_kwargs['auth'] = (self.sci['username'], self.sci['password'],)
self.collname = kv_id.collection
self.appname = kv_id.application
self.username = kv_id.username
self.url = "".join(['https://',sci['host'],':',sci['port'],
'/servicesNS/',self.username,'/',
self.appname,'/'])
def current_kv_names(self):
"""Return set of string names of current available Splunk KV collections"""
return current_kv_names(self.sci, self.username, self.appname, request=self._request)
def request(self, *args, **kwargs):
return self._request.request(*args, **kwargs)
def _data(self, CachedItem):
data = {k:getattr(CachedItem, k) for k in self.mapper.mapper}
data['_key'] = CachedItem.getId()
return data
def _add(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _update(self, CachedItem):
r = self.request('post',
self.url+"storage/collections/data/"+self.collname+'/'+CachedItem.getId(),
headers={'Content-Type': 'application/json'},
data=json.dumps(self._data(CachedItem)))
r.raise_for_status()
def _delete(self, id_):
if not id_:
raise ValueError("Expected valid id for deletion")
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname+'/'+str(id_))
r.raise_for_status()
def _all_ids(self):
r = self.request('get',
self.url+"storage/collections/data/"+self.collname,
headers={'Content-Type': 'application/json'},
params={'output_type': 'json', 'fields':'id'})
r.raise_for_status()
data = set(map(lambda d: str(d['id']), r.json()))
return data
#ICacheArea
def get(self, CachableItem):
"""Returns current ICachedItem for ICachableItem or None if not cached"""
cached_item = self.mapper.get(CachableItem)
r = self.request('get',
self.url+"storage/collections/data/"+self.collname+'/'+cached_item.getId(),
data={'output_mode': 'json'})
if r.ok:
# we need to update the object with the values found in the cache area
data = r.json()
for name in self.mapper.mapper:
setattr(cached_item, name, data[name])
return cached_item
return None
def cache(self, CachableItem):
"""Updates caches area with latest item information returning
ICachedItem if cache updates were required.
Issues ICacheObjectCreatedEvent, and ICacheObjectModifiedEvent for
ICacheArea/ICachableItem combo.
"""
_cachedItem = self.get(CachableItem)
if not _cachedItem:
_cachedItem = self.mapper.get(CachableItem)
self._add(_cachedItem)
logger.debug("new cachable item added to Splunk KV cache area {id: %s, type: %s}", str(_cachedItem.getId()), str(_cachedItem.__class__))
notify(CacheObjectCreatedEvent(_cachedItem, self))
return _cachedItem
else:
_newCacheItem = self.mapper.get(CachableItem)
if _cachedItem != _newCacheItem:
logger.debug("Cachable item modified in Splunk KV cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__))
self._update(_newCacheItem)
notify(CacheObjectModifiedEvent(_newCacheItem, self))
return _newCacheItem
return None
def import_source(self, CachableSource):
"""Updates cache area and returns number of items updated with all
available entries in ICachableSource
"""
_count = 0
self._import_source_items_id_list = set() # used to help speed up trim()
for item in CachableSource.items():
self._import_source_items_id_list.add(item.getId())
if self.cache(item):
_count += 1
return _count
def reset(self):
"""Deletes all entries in the cache area"""
if self.collname not in self.current_kv_names():
return # nothing to do
# we'll simply delete the entire collection and then re-create it.
r = self.request('delete',
self.url+"storage/collections/data/"+self.collname)
r.raise_for_status()
self.initialize()
def initialize(self):
"""Instantiates the cache area to be ready for updates"""
if self.collname not in self.current_kv_names():
r = self.request('post',
self.url+"storage/collections/config",
headers={'content-type': 'application/json'},
data={'name': self.collname})
r.raise_for_status()
# initialize schema
re = self.request('post',
self.url+"storage/collections/config/"+self.collname,
headers = {'content-type': 'application/json'},
data=self.schema)
re.raise_for_status()
logger.info("initialized Splunk Key Value Collection %s with schema %s"\
% (self.collname, str(self.schema)))
if self.collname not in self.current_kv_names():
raise EnvironmentError('expected %s in list of kv collections %s' % (self.collname, str(self.current_kv_names())))
#ITrimmableCacheArea
def trim(self, source):
if not ICachableSource.providedBy(source):
#we'll fake a partial ICachableSource for use with import_source()
source_type = type('FakeCachableSource', (object,), {})
_source = source #re-assign due to closure issue with source re-assignment below
source_type.items = lambda self: _source
source = source_type()
updated = self.import_source(source)
diff = self._all_ids() - self._import_source_items_id_list
map(self._delete, diff)
return (updated, len(diff), )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.